]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/kpi_vfs.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
CommitLineData
91447636 1/*
5d5c5d0d
A
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
91447636 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
91447636
A
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kpi_vfs.c
61 */
62
63/*
64 * External virtual filesystem routines
65 */
66
67#undef DIAGNOSTIC
68#define DIAGNOSTIC 1
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc_internal.h>
73#include <sys/kauth.h>
74#include <sys/mount.h>
75#include <sys/mount_internal.h>
76#include <sys/time.h>
77#include <sys/vnode_internal.h>
78#include <sys/stat.h>
79#include <sys/namei.h>
80#include <sys/ucred.h>
81#include <sys/buf.h>
82#include <sys/errno.h>
83#include <sys/malloc.h>
84#include <sys/domain.h>
85#include <sys/mbuf.h>
86#include <sys/syslog.h>
87#include <sys/ubc.h>
88#include <sys/vm.h>
89#include <sys/sysctl.h>
90#include <sys/filedesc.h>
91#include <sys/fsevents.h>
92#include <sys/user.h>
93#include <sys/lockf.h>
94#include <sys/xattr.h>
95
96#include <kern/assert.h>
97#include <kern/kalloc.h>
98
0c530ab8
A
99#include <libkern/OSByteOrder.h>
100
91447636
A
101#include <miscfs/specfs/specdev.h>
102
103#include <mach/mach_types.h>
104#include <mach/memory_object_types.h>
105
106#define ESUCCESS 0
107#undef mount_t
108#undef vnode_t
109
110#define COMPAT_ONLY
111
112
113#define THREAD_SAFE_FS(VP) \
114 ((VP)->v_unsafefs ? 0 : 1)
115
116#define NATIVE_XATTR(VP) \
117 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
118
119static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
120 int thread_safe, int force);
121static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
122 vfs_context_t context, int thread_safe);
123
124
125static void
126vnode_setneedinactive(vnode_t vp)
127{
128 cache_purge(vp);
129
130 vnode_lock(vp);
131 vp->v_lflag |= VL_NEEDINACTIVE;
132 vnode_unlock(vp);
133}
134
135
136int
137lock_fsnode(vnode_t vp, int *funnel_state)
138{
139 if (funnel_state)
140 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
141
142 if (vp->v_unsafefs) {
143 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
144 vp->v_unsafefs->fsnode_count++;
145 } else {
146 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
147
148 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
149 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
150
151 if (funnel_state)
152 (void) thread_funnel_set(kernel_flock, *funnel_state);
153 return (ENOENT);
154 }
155 vp->v_unsafefs->fsnodeowner = current_thread();
156 vp->v_unsafefs->fsnode_count = 1;
157 }
158 }
159 return (0);
160}
161
162
163void
164unlock_fsnode(vnode_t vp, int *funnel_state)
165{
166 if (vp->v_unsafefs) {
167 if (--vp->v_unsafefs->fsnode_count == 0) {
168 vp->v_unsafefs->fsnodeowner = NULL;
169 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
170 }
171 }
172 if (funnel_state)
173 (void) thread_funnel_set(kernel_flock, *funnel_state);
174}
175
176
177
178/* ====================================================================== */
179/* ************ EXTERNAL KERNEL APIS ********************************** */
180/* ====================================================================== */
181
182/*
183 * prototypes for exported VFS operations
184 */
185int
186VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
187{
188 int error;
189 int thread_safe;
190 int funnel_state = 0;
191
192 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
193 return(ENOTSUP);
194
195 thread_safe = mp->mnt_vtable->vfc_threadsafe;
196
197
198 if (!thread_safe) {
199 funnel_state = thread_funnel_set(kernel_flock, TRUE);
200 }
201
202 if (vfs_context_is64bit(context)) {
203 if (vfs_64bitready(mp)) {
204 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
205 }
206 else {
207 error = ENOTSUP;
208 }
209 }
210 else {
211 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
212 }
213
214 if (!thread_safe) {
215 (void) thread_funnel_set(kernel_flock, funnel_state);
216 }
217 return (error);
218}
219
220int
221VFS_START(struct mount * mp, int flags, vfs_context_t context)
222{
223 int error;
224 int thread_safe;
225 int funnel_state = 0;
226
227 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
228 return(ENOTSUP);
229
230 thread_safe = mp->mnt_vtable->vfc_threadsafe;
231
232 if (!thread_safe) {
233 funnel_state = thread_funnel_set(kernel_flock, TRUE);
234 }
235 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
236 if (!thread_safe) {
237 (void) thread_funnel_set(kernel_flock, funnel_state);
238 }
239 return (error);
240}
241
242int
243VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
244{
245 int error;
246 int thread_safe;
247 int funnel_state = 0;
248
249 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
250 return(ENOTSUP);
251
252 thread_safe = mp->mnt_vtable->vfc_threadsafe;
253
254 if (!thread_safe) {
255 funnel_state = thread_funnel_set(kernel_flock, TRUE);
256 }
257 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
258 if (!thread_safe) {
259 (void) thread_funnel_set(kernel_flock, funnel_state);
260 }
261 return (error);
262}
263
264int
265VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
266{
267 int error;
268 int thread_safe;
269 int funnel_state = 0;
270 struct vfs_context acontext;
271
272 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
273 return(ENOTSUP);
274
275 if (context == NULL) {
276 acontext.vc_proc = current_proc();
277 acontext.vc_ucred = kauth_cred_get();
278 context = &acontext;
279 }
280 thread_safe = mp->mnt_vtable->vfc_threadsafe;
281
282 if (!thread_safe) {
283 funnel_state = thread_funnel_set(kernel_flock, TRUE);
284 }
285 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
286 if (!thread_safe) {
287 (void) thread_funnel_set(kernel_flock, funnel_state);
288 }
289 return (error);
290}
291
292int
293VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
294{
295 int error;
296 int thread_safe;
297 int funnel_state = 0;
298
299 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
300 return(ENOTSUP);
301
302 thread_safe = mp->mnt_vtable->vfc_threadsafe;
303
304 if (!thread_safe) {
305 funnel_state = thread_funnel_set(kernel_flock, TRUE);
306 }
307 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
308 if (!thread_safe) {
309 (void) thread_funnel_set(kernel_flock, funnel_state);
310 }
311 return (error);
312}
313
314int
315VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
316{
317 int error;
318 int thread_safe;
319 int funnel_state = 0;
320 struct vfs_context acontext;
321
322 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
323 return(ENOTSUP);
324
325 if (context == NULL) {
326 acontext.vc_proc = current_proc();
327 acontext.vc_ucred = kauth_cred_get();
328 context = &acontext;
329 }
330 thread_safe = mp->mnt_vtable->vfc_threadsafe;
331
332 if (!thread_safe) {
333 funnel_state = thread_funnel_set(kernel_flock, TRUE);
334 }
335 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
336 if (!thread_safe) {
337 (void) thread_funnel_set(kernel_flock, funnel_state);
338 }
339 return(error);
340}
341
342int
343VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
344{
345 int error;
346 int thread_safe;
347 int funnel_state = 0;
348 struct vfs_context acontext;
349
350 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
351 return(ENOTSUP);
352
353 if (context == NULL) {
354 acontext.vc_proc = current_proc();
355 acontext.vc_ucred = kauth_cred_get();
356 context = &acontext;
357 }
358 thread_safe = mp->mnt_vtable->vfc_threadsafe;
359
360 if (!thread_safe) {
361 funnel_state = thread_funnel_set(kernel_flock, TRUE);
362 }
363 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
364 if (!thread_safe) {
365 (void) thread_funnel_set(kernel_flock, funnel_state);
366 }
367 return(error);
368}
369
370int
371VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
372{
373 int error;
374 int thread_safe;
375 int funnel_state = 0;
376 struct vfs_context acontext;
377
378 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
379 return(ENOTSUP);
380
381 if (context == NULL) {
382 acontext.vc_proc = current_proc();
383 acontext.vc_ucred = kauth_cred_get();
384 context = &acontext;
385 }
386 thread_safe = mp->mnt_vtable->vfc_threadsafe;
387
388 if (!thread_safe) {
389 funnel_state = thread_funnel_set(kernel_flock, TRUE);
390 }
391 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
392 if (!thread_safe) {
393 (void) thread_funnel_set(kernel_flock, funnel_state);
394 }
395 return(error);
396}
397
398int
399VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
400{
401 int error;
402 int thread_safe;
403 int funnel_state = 0;
404 struct vfs_context acontext;
405
406 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
407 return(ENOTSUP);
408
409 if (context == NULL) {
410 acontext.vc_proc = current_proc();
411 acontext.vc_ucred = kauth_cred_get();
412 context = &acontext;
413 }
414 thread_safe = mp->mnt_vtable->vfc_threadsafe;
415
416 if (!thread_safe) {
417 funnel_state = thread_funnel_set(kernel_flock, TRUE);
418 }
419 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
420 if (!thread_safe) {
421 (void) thread_funnel_set(kernel_flock, funnel_state);
422 }
423 return(error);
424}
425
426int
427VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
428{
429 int error;
430 int thread_safe;
431 int funnel_state = 0;
432 struct vfs_context acontext;
433
434 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
435 return(ENOTSUP);
436
437 if (context == NULL) {
438 acontext.vc_proc = current_proc();
439 acontext.vc_ucred = kauth_cred_get();
440 context = &acontext;
441 }
442 thread_safe = mp->mnt_vtable->vfc_threadsafe;
443
444 if (!thread_safe) {
445 funnel_state = thread_funnel_set(kernel_flock, TRUE);
446 }
447 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
448 if (!thread_safe) {
449 (void) thread_funnel_set(kernel_flock, funnel_state);
450 }
451 return(error);
452}
453
454int
455VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
456{
457 int error;
458 int thread_safe;
459 int funnel_state = 0;
460 struct vfs_context acontext;
461
462 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
463 return(ENOTSUP);
464
465 if (context == NULL) {
466 acontext.vc_proc = current_proc();
467 acontext.vc_ucred = kauth_cred_get();
468 context = &acontext;
469 }
470 thread_safe = THREAD_SAFE_FS(vp);
471
472 if (!thread_safe) {
473 funnel_state = thread_funnel_set(kernel_flock, TRUE);
474 }
475 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
476 if (!thread_safe) {
477 (void) thread_funnel_set(kernel_flock, funnel_state);
478 }
479 return(error);
480}
481
482
483/* returns a copy of vfs type name for the mount_t */
484void
485vfs_name(mount_t mp, char * buffer)
486{
487 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
488}
489
490/* returns vfs type number for the mount_t */
491int
492vfs_typenum(mount_t mp)
493{
494 return(mp->mnt_vtable->vfc_typenum);
495}
496
497
498/* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
499uint64_t
500vfs_flags(mount_t mp)
501{
502 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
503}
504
505/* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
506void
507vfs_setflags(mount_t mp, uint64_t flags)
508{
509 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
510
511 mp->mnt_flag |= lflags;
512}
513
514/* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
515void
516vfs_clearflags(mount_t mp , uint64_t flags)
517{
518 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
519
520 mp->mnt_flag &= ~lflags;
521}
522
523/* Is the mount_t ronly and upgrade read/write requested? */
524int
525vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
526{
527 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
528}
529
530
531/* Is the mount_t mounted ronly */
532int
533vfs_isrdonly(mount_t mp)
534{
535 return (mp->mnt_flag & MNT_RDONLY);
536}
537
538/* Is the mount_t mounted for filesystem synchronous writes? */
539int
540vfs_issynchronous(mount_t mp)
541{
542 return (mp->mnt_flag & MNT_SYNCHRONOUS);
543}
544
545/* Is the mount_t mounted read/write? */
546int
547vfs_isrdwr(mount_t mp)
548{
549 return ((mp->mnt_flag & MNT_RDONLY) == 0);
550}
551
552
553/* Is mount_t marked for update (ie MNT_UPDATE) */
554int
555vfs_isupdate(mount_t mp)
556{
557 return (mp->mnt_flag & MNT_UPDATE);
558}
559
560
561/* Is mount_t marked for reload (ie MNT_RELOAD) */
562int
563vfs_isreload(mount_t mp)
564{
565 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
566}
567
568/* Is mount_t marked for reload (ie MNT_FORCE) */
569int
570vfs_isforce(mount_t mp)
571{
572 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
573 return(1);
574 else
575 return(0);
576}
577
578int
579vfs_64bitready(mount_t mp)
580{
581 if ((mp->mnt_vtable->vfc_64bitready))
582 return(1);
583 else
584 return(0);
585}
586
587int
588vfs_authopaque(mount_t mp)
589{
590 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
591 return(1);
592 else
593 return(0);
594}
595
596int
597vfs_authopaqueaccess(mount_t mp)
598{
599 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
600 return(1);
601 else
602 return(0);
603}
604
605void
606vfs_setauthopaque(mount_t mp)
607{
608 mount_lock(mp);
609 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
610 mount_unlock(mp);
611}
612
613void
614vfs_setauthopaqueaccess(mount_t mp)
615{
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
618 mount_unlock(mp);
619}
620
621void
622vfs_clearauthopaque(mount_t mp)
623{
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
626 mount_unlock(mp);
627}
628
629void
630vfs_clearauthopaqueaccess(mount_t mp)
631{
632 mount_lock(mp);
633 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
634 mount_unlock(mp);
635}
636
637void
638vfs_setextendedsecurity(mount_t mp)
639{
640 mount_lock(mp);
641 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
642 mount_unlock(mp);
643}
644
645void
646vfs_clearextendedsecurity(mount_t mp)
647{
648 mount_lock(mp);
649 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
650 mount_unlock(mp);
651}
652
653int
654vfs_extendedsecurity(mount_t mp)
655{
656 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
657}
658
659/* returns the max size of short symlink in this mount_t */
660uint32_t
661vfs_maxsymlen(mount_t mp)
662{
663 return(mp->mnt_maxsymlinklen);
664}
665
666/* set max size of short symlink on mount_t */
667void
668vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
669{
670 mp->mnt_maxsymlinklen = symlen;
671}
672
673/* return a pointer to the RO vfs_statfs associated with mount_t */
674struct vfsstatfs *
675vfs_statfs(mount_t mp)
676{
677 return(&mp->mnt_vfsstat);
678}
679
680int
681vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
682{
683 int error;
684 char *vname;
685
686 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
687 return(error);
688
689 /*
690 * If we have a filesystem create time, use it to default some others.
691 */
692 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
693 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
694 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
695 }
696
697 return(0);
698}
699
700int
701vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
702{
703 int error;
704
705 if (vfs_isrdonly(mp))
706 return EROFS;
707
708 error = VFS_SETATTR(mp, vfa, ctx);
709
710 /*
711 * If we had alternate ways of setting vfs attributes, we'd
712 * fall back here.
713 */
714
715 return error;
716}
717
718/* return the private data handle stored in mount_t */
719void *
720vfs_fsprivate(mount_t mp)
721{
722 return(mp->mnt_data);
723}
724
725/* set the private data handle in mount_t */
726void
727vfs_setfsprivate(mount_t mp, void *mntdata)
728{
729 mp->mnt_data = mntdata;
730}
731
732
733/*
734 * return the block size of the underlying
735 * device associated with mount_t
736 */
737int
738vfs_devblocksize(mount_t mp) {
739
740 return(mp->mnt_devblocksize);
741}
742
743
744/*
745 * return the io attributes associated with mount_t
746 */
747void
748vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
749{
750 if (mp == NULL) {
751 ioattrp->io_maxreadcnt = MAXPHYS;
752 ioattrp->io_maxwritecnt = MAXPHYS;
753 ioattrp->io_segreadcnt = 32;
754 ioattrp->io_segwritecnt = 32;
755 ioattrp->io_maxsegreadsize = MAXPHYS;
756 ioattrp->io_maxsegwritesize = MAXPHYS;
757 ioattrp->io_devblocksize = DEV_BSIZE;
758 } else {
759 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
760 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
761 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
762 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
763 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
764 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
765 ioattrp->io_devblocksize = mp->mnt_devblocksize;
766 }
767 ioattrp->io_reserved[0] = 0;
768 ioattrp->io_reserved[1] = 0;
769 ioattrp->io_reserved[2] = 0;
770}
771
772
773/*
774 * set the IO attributes associated with mount_t
775 */
776void
777vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
778{
779 if (mp == NULL)
780 return;
781 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
782 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
783 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
784 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
785 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
786 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
787 mp->mnt_devblocksize = ioattrp->io_devblocksize;
788}
789
790/*
791 * Add a new filesystem into the kernel specified in passed in
792 * vfstable structure. It fills in the vnode
793 * dispatch vector that is to be passed to when vnodes are created.
794 * It returns a handle which is to be used to when the FS is to be removed
795 */
796typedef int (*PFI)(void *);
797extern int vfs_opv_numops;
798errno_t
799vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
800{
801#pragma unused(data)
802 struct vfstable *newvfstbl = NULL;
803 int i,j;
804 int (***opv_desc_vector_p)(void *);
805 int (**opv_desc_vector)(void *);
806 struct vnodeopv_entry_desc *opve_descp;
807 int desccount;
808 int descsize;
809 PFI *descptr;
810
811 /*
812 * This routine is responsible for all the initialization that would
813 * ordinarily be done as part of the system startup;
814 */
815
816 if (vfe == (struct vfs_fsentry *)0)
817 return(EINVAL);
818
819 desccount = vfe->vfe_vopcnt;
820 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
821 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
822 return(EINVAL);
823
824
825 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
826 M_WAITOK);
827 bzero(newvfstbl, sizeof(struct vfstable));
828 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
829 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
830 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
831 newvfstbl->vfc_typenum = maxvfsconf++;
832 else
833 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
834
835 newvfstbl->vfc_refcount = 0;
836 newvfstbl->vfc_flags = 0;
837 newvfstbl->vfc_mountroot = NULL;
838 newvfstbl->vfc_next = NULL;
839 newvfstbl->vfc_threadsafe = 0;
840 newvfstbl->vfc_vfsflags = 0;
841 if (vfe->vfe_flags & VFS_TBL64BITREADY)
842 newvfstbl->vfc_64bitready= 1;
843 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
844 newvfstbl->vfc_threadsafe= 1;
845 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
846 newvfstbl->vfc_threadsafe= 1;
847 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
848 newvfstbl->vfc_flags |= MNT_LOCAL;
849 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
850 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
851 else
852 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
853
854
855 /*
856 * Allocate and init the vectors.
857 * Also handle backwards compatibility.
858 *
859 * We allocate one large block to hold all <desccount>
860 * vnode operation vectors stored contiguously.
861 */
862 /* XXX - shouldn't be M_TEMP */
863
864 descsize = desccount * vfs_opv_numops * sizeof(PFI);
865 MALLOC(descptr, PFI *, descsize,
866 M_TEMP, M_WAITOK);
867 bzero(descptr, descsize);
868
869 newvfstbl->vfc_descptr = descptr;
870 newvfstbl->vfc_descsize = descsize;
871
872
873 for (i= 0; i< desccount; i++ ) {
874 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
875 /*
876 * Fill in the caller's pointer to the start of the i'th vector.
877 * They'll need to supply it when calling vnode_create.
878 */
879 opv_desc_vector = descptr + i * vfs_opv_numops;
880 *opv_desc_vector_p = opv_desc_vector;
881
882 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
883 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
884
885 /*
886 * Sanity check: is this operation listed
887 * in the list of operations? We check this
888 * by seeing if its offest is zero. Since
889 * the default routine should always be listed
890 * first, it should be the only one with a zero
891 * offset. Any other operation with a zero
892 * offset is probably not listed in
893 * vfs_op_descs, and so is probably an error.
894 *
895 * A panic here means the layer programmer
896 * has committed the all-too common bug
897 * of adding a new operation to the layer's
898 * list of vnode operations but
899 * not adding the operation to the system-wide
900 * list of supported operations.
901 */
902 if (opve_descp->opve_op->vdesc_offset == 0 &&
903 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
904 printf("vfs_fsadd: operation %s not listed in %s.\n",
905 opve_descp->opve_op->vdesc_name,
906 "vfs_op_descs");
907 panic("vfs_fsadd: bad operation");
908 }
909 /*
910 * Fill in this entry.
911 */
912 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
913 opve_descp->opve_impl;
914 }
915
916
917 /*
918 * Finally, go back and replace unfilled routines
919 * with their default. (Sigh, an O(n^3) algorithm. I
920 * could make it better, but that'd be work, and n is small.)
921 */
922 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
923
924 /*
925 * Force every operations vector to have a default routine.
926 */
927 opv_desc_vector = *opv_desc_vector_p;
928 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
929 panic("vfs_fsadd: operation vector without default routine.");
930 for (j = 0; j < vfs_opv_numops; j++)
931 if (opv_desc_vector[j] == NULL)
932 opv_desc_vector[j] =
933 opv_desc_vector[VOFFSET(vnop_default)];
934
935 } /* end of each vnodeopv_desc parsing */
936
937
938
939 *handle = vfstable_add(newvfstbl);
940
941 if (newvfstbl->vfc_typenum <= maxvfsconf )
942 maxvfsconf = newvfstbl->vfc_typenum + 1;
943 numused_vfsslots++;
944
945 if (newvfstbl->vfc_vfsops->vfs_init)
946 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
947
948 FREE(newvfstbl, M_TEMP);
949
950 return(0);
951}
952
953/*
954 * Removes the filesystem from kernel.
955 * The argument passed in is the handle that was given when
956 * file system was added
957 */
958errno_t
959vfs_fsremove(vfstable_t handle)
960{
961 struct vfstable * vfstbl = (struct vfstable *)handle;
962 void *old_desc = NULL;
963 errno_t err;
964
965 /* Preflight check for any mounts */
966 mount_list_lock();
967 if ( vfstbl->vfc_refcount != 0 ) {
968 mount_list_unlock();
969 return EBUSY;
970 }
971 mount_list_unlock();
972
973 /*
974 * save the old descriptor; the free cannot occur unconditionally,
975 * since vfstable_del() may fail.
976 */
977 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
978 old_desc = vfstbl->vfc_descptr;
979 }
980 err = vfstable_del(vfstbl);
981
982 /* free the descriptor if the delete was successful */
983 if (err == 0 && old_desc) {
984 FREE(old_desc, M_TEMP);
985 }
986
987 return(err);
988}
989
990/*
991 * This returns a reference to mount_t
992 * which should be dropped using vfs_mountrele().
993 * Not doing so will leak a mountpoint
994 * and associated data structures.
995 */
996errno_t
997vfs_mountref(__unused mount_t mp ) /* gives a reference */
998{
999 return(0);
1000}
1001
1002/* This drops the reference on mount_t that was acquired */
1003errno_t
1004vfs_mountrele(__unused mount_t mp ) /* drops reference */
1005{
1006 return(0);
1007}
1008
1009int
1010vfs_context_pid(vfs_context_t context)
1011{
1012 return (context->vc_proc->p_pid);
1013}
1014
1015int
1016vfs_context_suser(vfs_context_t context)
1017{
1018 return (suser(context->vc_ucred, 0));
1019}
1020int
1021vfs_context_issignal(vfs_context_t context, sigset_t mask)
1022{
1023 if (context->vc_proc)
1024 return(proc_pendingsignals(context->vc_proc, mask));
1025 return(0);
1026}
1027
1028int
1029vfs_context_is64bit(vfs_context_t context)
1030{
1031 if (context->vc_proc)
1032 return(proc_is64bit(context->vc_proc));
1033 return(0);
1034}
1035
1036proc_t
1037vfs_context_proc(vfs_context_t context)
1038{
1039 return (context->vc_proc);
1040}
1041
1042vfs_context_t
1043vfs_context_create(vfs_context_t context)
1044{
1045 struct vfs_context * newcontext;
1046
1047 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1048
1049 if (newcontext) {
0c530ab8 1050 kauth_cred_t safecred;
91447636
A
1051 if (context) {
1052 newcontext->vc_proc = context->vc_proc;
0c530ab8 1053 safecred = context->vc_ucred;
91447636
A
1054 } else {
1055 newcontext->vc_proc = proc_self();
0c530ab8 1056 safecred = kauth_cred_get();
91447636 1057 }
0c530ab8
A
1058 if (IS_VALID_CRED(safecred))
1059 kauth_cred_ref(safecred);
1060 newcontext->vc_ucred = safecred;
1061 return(newcontext);
91447636
A
1062 }
1063 return((vfs_context_t)0);
1064}
1065
1066int
1067vfs_context_rele(vfs_context_t context)
1068{
0c530ab8
A
1069 if (context) {
1070 if (IS_VALID_CRED(context->vc_ucred))
1071 kauth_cred_unref(&context->vc_ucred);
91447636 1072 kfree(context, sizeof(struct vfs_context));
0c530ab8 1073 }
91447636
A
1074 return(0);
1075}
1076
1077
1078ucred_t
1079vfs_context_ucred(vfs_context_t context)
1080{
1081 return (context->vc_ucred);
1082}
1083
1084/*
1085 * Return true if the context is owned by the superuser.
1086 */
1087int
1088vfs_context_issuser(vfs_context_t context)
1089{
1090 return(context->vc_ucred->cr_uid == 0);
1091}
1092
1093
1094/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1095
1096
1097/*
1098 * Convert between vnode types and inode formats (since POSIX.1
1099 * defines mode word of stat structure in terms of inode formats).
1100 */
1101enum vtype
1102vnode_iftovt(int mode)
1103{
1104 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1105}
1106
1107int
1108vnode_vttoif(enum vtype indx)
1109{
1110 return(vttoif_tab[(int)(indx)]);
1111}
1112
1113int
1114vnode_makeimode(int indx, int mode)
1115{
1116 return (int)(VTTOIF(indx) | (mode));
1117}
1118
1119
1120/*
1121 * vnode manipulation functions.
1122 */
1123
1124/* returns system root vnode reference; It should be dropped using vrele() */
1125vnode_t
1126vfs_rootvnode(void)
1127{
1128 int error;
1129
1130 error = vnode_get(rootvnode);
1131 if (error)
1132 return ((vnode_t)0);
1133 else
1134 return rootvnode;
1135}
1136
1137
1138uint32_t
1139vnode_vid(vnode_t vp)
1140{
1141 return ((uint32_t)(vp->v_id));
1142}
1143
1144/* returns a mount reference; drop it with vfs_mountrelease() */
1145mount_t
1146vnode_mount(vnode_t vp)
1147{
1148 return (vp->v_mount);
1149}
1150
1151/* returns a mount reference iff vnode_t is a dir and is a mount point */
1152mount_t
1153vnode_mountedhere(vnode_t vp)
1154{
1155 mount_t mp;
1156
1157 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1158 (mp->mnt_vnodecovered == vp))
1159 return (mp);
1160 else
1161 return (mount_t)NULL;
1162}
1163
1164/* returns vnode type of vnode_t */
1165enum vtype
1166vnode_vtype(vnode_t vp)
1167{
1168 return (vp->v_type);
1169}
1170
1171/* returns FS specific node saved in vnode */
1172void *
1173vnode_fsnode(vnode_t vp)
1174{
1175 return (vp->v_data);
1176}
1177
1178void
1179vnode_clearfsnode(vnode_t vp)
1180{
1181 vp->v_data = 0;
1182}
1183
1184dev_t
1185vnode_specrdev(vnode_t vp)
1186{
1187 return(vp->v_rdev);
1188}
1189
1190
1191/* Accessor functions */
1192/* is vnode_t a root vnode */
1193int
1194vnode_isvroot(vnode_t vp)
1195{
1196 return ((vp->v_flag & VROOT)? 1 : 0);
1197}
1198
1199/* is vnode_t a system vnode */
1200int
1201vnode_issystem(vnode_t vp)
1202{
1203 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1204}
1205
1206/* if vnode_t mount operation in progress */
1207int
1208vnode_ismount(vnode_t vp)
1209{
1210 return ((vp->v_flag & VMOUNT)? 1 : 0);
1211}
1212
1213/* is this vnode under recyle now */
1214int
1215vnode_isrecycled(vnode_t vp)
1216{
1217 int ret;
1218
1219 vnode_lock(vp);
1220 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1221 vnode_unlock(vp);
1222 return(ret);
1223}
1224
1225/* is vnode_t marked to not keep data cached once it's been consumed */
1226int
1227vnode_isnocache(vnode_t vp)
1228{
1229 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1230}
1231
1232/*
1233 * has sequential readahead been disabled on this vnode
1234 */
1235int
1236vnode_isnoreadahead(vnode_t vp)
1237{
1238 return ((vp->v_flag & VRAOFF)? 1 : 0);
1239}
1240
1241/* is vnode_t a standard one? */
1242int
1243vnode_isstandard(vnode_t vp)
1244{
1245 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1246}
1247
1248/* don't vflush() if SKIPSYSTEM */
1249int
1250vnode_isnoflush(vnode_t vp)
1251{
1252 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1253}
1254
1255/* is vnode_t a regular file */
1256int
1257vnode_isreg(vnode_t vp)
1258{
1259 return ((vp->v_type == VREG)? 1 : 0);
1260}
1261
1262/* is vnode_t a directory? */
1263int
1264vnode_isdir(vnode_t vp)
1265{
1266 return ((vp->v_type == VDIR)? 1 : 0);
1267}
1268
1269/* is vnode_t a symbolic link ? */
1270int
1271vnode_islnk(vnode_t vp)
1272{
1273 return ((vp->v_type == VLNK)? 1 : 0);
1274}
1275
1276/* is vnode_t a fifo ? */
1277int
1278vnode_isfifo(vnode_t vp)
1279{
1280 return ((vp->v_type == VFIFO)? 1 : 0);
1281}
1282
1283/* is vnode_t a block device? */
1284int
1285vnode_isblk(vnode_t vp)
1286{
1287 return ((vp->v_type == VBLK)? 1 : 0);
1288}
1289
1290/* is vnode_t a char device? */
1291int
1292vnode_ischr(vnode_t vp)
1293{
1294 return ((vp->v_type == VCHR)? 1 : 0);
1295}
1296
1297/* is vnode_t a socket? */
1298int
1299vnode_issock(vnode_t vp)
1300{
1301 return ((vp->v_type == VSOCK)? 1 : 0);
1302}
1303
1304
1305/* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1306void
1307vnode_setnocache(vnode_t vp)
1308{
1309 vnode_lock(vp);
1310 vp->v_flag |= VNOCACHE_DATA;
1311 vnode_unlock(vp);
1312}
1313
1314void
1315vnode_clearnocache(vnode_t vp)
1316{
1317 vnode_lock(vp);
1318 vp->v_flag &= ~VNOCACHE_DATA;
1319 vnode_unlock(vp);
1320}
1321
1322void
1323vnode_setnoreadahead(vnode_t vp)
1324{
1325 vnode_lock(vp);
1326 vp->v_flag |= VRAOFF;
1327 vnode_unlock(vp);
1328}
1329
1330void
1331vnode_clearnoreadahead(vnode_t vp)
1332{
1333 vnode_lock(vp);
1334 vp->v_flag &= ~VRAOFF;
1335 vnode_unlock(vp);
1336}
1337
1338
1339/* mark vnode_t to skip vflush() is SKIPSYSTEM */
1340void
1341vnode_setnoflush(vnode_t vp)
1342{
1343 vnode_lock(vp);
1344 vp->v_flag |= VNOFLUSH;
1345 vnode_unlock(vp);
1346}
1347
1348void
1349vnode_clearnoflush(vnode_t vp)
1350{
1351 vnode_lock(vp);
1352 vp->v_flag &= ~VNOFLUSH;
1353 vnode_unlock(vp);
1354}
1355
1356
1357/* is vnode_t a blkdevice and has a FS mounted on it */
1358int
1359vnode_ismountedon(vnode_t vp)
1360{
1361 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1362}
1363
1364void
1365vnode_setmountedon(vnode_t vp)
1366{
1367 vnode_lock(vp);
1368 vp->v_specflags |= SI_MOUNTEDON;
1369 vnode_unlock(vp);
1370}
1371
1372void
1373vnode_clearmountedon(vnode_t vp)
1374{
1375 vnode_lock(vp);
1376 vp->v_specflags &= ~SI_MOUNTEDON;
1377 vnode_unlock(vp);
1378}
1379
1380
1381void
1382vnode_settag(vnode_t vp, int tag)
1383{
1384 vp->v_tag = tag;
1385
1386}
1387
1388int
1389vnode_tag(vnode_t vp)
1390{
1391 return(vp->v_tag);
1392}
1393
1394vnode_t
1395vnode_parent(vnode_t vp)
1396{
1397
1398 return(vp->v_parent);
1399}
1400
1401void
1402vnode_setparent(vnode_t vp, vnode_t dvp)
1403{
1404 vp->v_parent = dvp;
1405}
1406
1407char *
1408vnode_name(vnode_t vp)
1409{
1410 /* we try to keep v_name a reasonable name for the node */
1411 return(vp->v_name);
1412}
1413
1414void
1415vnode_setname(vnode_t vp, char * name)
1416{
1417 vp->v_name = name;
1418}
1419
1420/* return the registered FS name when adding the FS to kernel */
1421void
1422vnode_vfsname(vnode_t vp, char * buf)
1423{
1424 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1425}
1426
1427/* return the FS type number */
1428int
1429vnode_vfstypenum(vnode_t vp)
1430{
1431 return(vp->v_mount->mnt_vtable->vfc_typenum);
1432}
1433
1434int
1435vnode_vfs64bitready(vnode_t vp)
1436{
1437
1438 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1439 return(1);
1440 else
1441 return(0);
1442}
1443
1444
1445
1446/* return the visible flags on associated mount point of vnode_t */
1447uint32_t
1448vnode_vfsvisflags(vnode_t vp)
1449{
1450 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1451}
1452
1453/* return the command modifier flags on associated mount point of vnode_t */
1454uint32_t
1455vnode_vfscmdflags(vnode_t vp)
1456{
1457 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1458}
1459
1460/* return the max symlink of short links of vnode_t */
1461uint32_t
1462vnode_vfsmaxsymlen(vnode_t vp)
1463{
1464 return(vp->v_mount->mnt_maxsymlinklen);
1465}
1466
1467/* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1468struct vfsstatfs *
1469vnode_vfsstatfs(vnode_t vp)
1470{
1471 return(&vp->v_mount->mnt_vfsstat);
1472}
1473
1474/* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1475void *
1476vnode_vfsfsprivate(vnode_t vp)
1477{
1478 return(vp->v_mount->mnt_data);
1479}
1480
1481/* is vnode_t in a rdonly mounted FS */
1482int
1483vnode_vfsisrdonly(vnode_t vp)
1484{
1485 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1486}
1487
1488
1489/* returns vnode ref to current working directory */
1490vnode_t
1491current_workingdir(void)
1492{
1493 struct proc *p = current_proc();
1494 struct vnode * vp ;
1495
1496 if ( (vp = p->p_fd->fd_cdir) ) {
1497 if ( (vnode_getwithref(vp)) )
1498 return (NULL);
1499 }
1500 return vp;
1501}
1502
1503/* returns vnode ref to current root(chroot) directory */
1504vnode_t
1505current_rootdir(void)
1506{
1507 struct proc *p = current_proc();
1508 struct vnode * vp ;
1509
1510 if ( (vp = p->p_fd->fd_rdir) ) {
1511 if ( (vnode_getwithref(vp)) )
1512 return (NULL);
1513 }
1514 return vp;
1515}
1516
0c530ab8
A
1517/*
1518 * Get a filesec and optional acl contents from an extended attribute.
1519 * Function will attempt to retrive ACL, UUID, and GUID information using a
1520 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1521 *
1522 * Parameters: vp The vnode on which to operate.
1523 * fsecp The filesec (and ACL, if any) being
1524 * retrieved.
1525 * ctx The vnode context in which the
1526 * operation is to be attempted.
1527 *
1528 * Returns: 0 Success
1529 * !0 errno value
1530 *
1531 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1532 * host byte order, as will be the ACL contents, if any.
1533 * Internally, we will cannonize these values from network (PPC)
1534 * byte order after we retrieve them so that the on-disk contents
1535 * of the extended attribute are identical for both PPC and Intel
1536 * (if we were not being required to provide this service via
1537 * fallback, this would be the job of the filesystem
1538 * 'VNOP_GETATTR' call).
1539 *
1540 * We use ntohl() because it has a transitive property on Intel
1541 * machines and no effect on PPC mancines. This guarantees us
1542 *
1543 * XXX: Deleting rather than ignoreing a corrupt security structure is
1544 * probably the only way to reset it without assistance from an
1545 * file system integrity checking tool. Right now we ignore it.
1546 *
1547 * XXX: We should enummerate the possible errno values here, and where
1548 * in the code they originated.
1549 */
91447636
A
1550static int
1551vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1552{
1553 kauth_filesec_t fsec;
1554 uio_t fsec_uio;
1555 size_t fsec_size;
1556 size_t xsize, rsize;
1557 int error;
0c530ab8
A
1558 int i;
1559 uint32_t host_fsec_magic;
1560 uint32_t host_acl_entrycount;
91447636
A
1561
1562 fsec = NULL;
1563 fsec_uio = NULL;
1564 error = 0;
1565
1566 /* find out how big the EA is */
1567 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1568 /* no EA, no filesec */
1569 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1570 error = 0;
1571 /* either way, we are done */
1572 goto out;
1573 }
0c530ab8
A
1574
1575 /*
1576 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1577 * ACE entrly ACL, and if it's larger than that, it must have the right
1578 * number of bytes such that it contains an atomic number of ACEs,
1579 * rather than partial entries. Otherwise, we ignore it.
1580 */
1581 if (!KAUTH_FILESEC_VALID(xsize)) {
1582 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
1583 error = 0;
1584 goto out;
1585 }
91447636
A
1586
1587 /* how many entries would fit? */
1588 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1589
1590 /* get buffer and uio */
1591 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1592 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1593 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1594 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1595 error = ENOMEM;
1596 goto out;
1597 }
1598
1599 /* read security attribute */
1600 rsize = xsize;
1601 if ((error = vn_getxattr(vp,
1602 KAUTH_FILESEC_XATTR,
1603 fsec_uio,
1604 &rsize,
1605 XATTR_NOSECURITY,
1606 ctx)) != 0) {
1607
1608 /* no attribute - no security data */
1609 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1610 error = 0;
1611 /* either way, we are done */
1612 goto out;
1613 }
1614
1615 /*
0c530ab8
A
1616 * Validate security structure; the validation must take place in host
1617 * byte order. If it's corrupt, we will just ignore it.
91447636 1618 */
0c530ab8
A
1619
1620 /* Validate the size before trying to convert it */
91447636
A
1621 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1622 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1623 goto out;
1624 }
0c530ab8
A
1625
1626 /* Validate the magic number before trying to convert it */
1627 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
1628 if (fsec->fsec_magic != host_fsec_magic) {
1629 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
6601e61a
A
1630 goto out;
1631 }
0c530ab8
A
1632
1633 /* Validate the entry count before trying to convert it. */
1634 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
1635 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
1636 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
1637 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
1638 goto out;
1639 }
1640 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
1641 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
1642 goto out;
1643 }
91447636 1644 }
4452a7af 1645
0c530ab8
A
1646 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
1647
91447636
A
1648 *fsecp = fsec;
1649 fsec = NULL;
1650 error = 0;
1651out:
1652 if (fsec != NULL)
1653 kauth_filesec_free(fsec);
1654 if (fsec_uio != NULL)
1655 uio_free(fsec_uio);
1656 if (error)
1657 *fsecp = NULL;
1658 return(error);
1659}
1660
0c530ab8
A
1661/*
1662 * Set a filesec and optional acl contents into an extended attribute.
1663 * function will attempt to store ACL, UUID, and GUID information using a
1664 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1665 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1666 * original caller supplied an acl.
1667 *
1668 * Parameters: vp The vnode on which to operate.
1669 * fsec The filesec being set.
1670 * acl The acl to be associated with 'fsec'.
1671 * ctx The vnode context in which the
1672 * operation is to be attempted.
1673 *
1674 * Returns: 0 Success
1675 * !0 errno value
1676 *
1677 * Notes: Both the fsec and the acl are always valid.
1678 *
1679 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1680 * as are the acl contents, if they are used. Internally, we will
1681 * cannonize these values into network (PPC) byte order before we
1682 * attempt to write them so that the on-disk contents of the
1683 * extended attribute are identical for both PPC and Intel (if we
1684 * were not being required to provide this service via fallback,
1685 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1686 * We reverse this process on the way out, so we leave with the
1687 * same byte order we started with.
1688 *
1689 * XXX: We should enummerate the possible errno values here, and where
1690 * in the code they originated.
1691 */
91447636
A
1692static int
1693vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1694{
0c530ab8
A
1695 uio_t fsec_uio;
1696 int error;
1697 int i;
1698 uint32_t saved_acl_copysize;
91447636
A
1699
1700 fsec_uio = NULL;
1701
1702 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1703 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1704 error = ENOMEM;
1705 goto out;
1706 }
0c530ab8
A
1707 /*
1708 * Save the pre-converted ACL copysize, because it gets swapped too
1709 * if we are running with the wrong endianness.
1710 */
1711 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
1712
1713 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
1714
91447636 1715 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
0c530ab8 1716 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
91447636
A
1717 error = vn_setxattr(vp,
1718 KAUTH_FILESEC_XATTR,
1719 fsec_uio,
1720 XATTR_NOSECURITY, /* we have auth'ed already */
1721 ctx);
1722 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1723
0c530ab8
A
1724 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
1725
91447636
A
1726out:
1727 if (fsec_uio != NULL)
1728 uio_free(fsec_uio);
1729 return(error);
1730}
1731
1732
1733int
1734vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1735{
1736 kauth_filesec_t fsec;
1737 kauth_acl_t facl;
1738 int error;
1739 uid_t nuid;
1740 gid_t ngid;
1741
1742 /* don't ask for extended security data if the filesystem doesn't support it */
1743 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1744 VATTR_CLEAR_ACTIVE(vap, va_acl);
1745 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1746 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1747 }
1748
1749 /*
1750 * If the caller wants size values we might have to synthesise, give the
1751 * filesystem the opportunity to supply better intermediate results.
1752 */
1753 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1754 VATTR_IS_ACTIVE(vap, va_total_size) ||
1755 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1756 VATTR_SET_ACTIVE(vap, va_data_size);
1757 VATTR_SET_ACTIVE(vap, va_data_alloc);
1758 VATTR_SET_ACTIVE(vap, va_total_size);
1759 VATTR_SET_ACTIVE(vap, va_total_alloc);
1760 }
1761
1762 error = VNOP_GETATTR(vp, vap, ctx);
1763 if (error) {
1764 KAUTH_DEBUG("ERROR - returning %d", error);
1765 goto out;
1766 }
1767
1768 /*
1769 * If extended security data was requested but not returned, try the fallback
1770 * path.
1771 */
1772 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1773 fsec = NULL;
1774
1775 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1776 /* try to get the filesec */
1777 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1778 goto out;
1779 }
1780 /* if no filesec, no attributes */
1781 if (fsec == NULL) {
1782 VATTR_RETURN(vap, va_acl, NULL);
1783 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1784 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1785 } else {
1786
1787 /* looks good, try to return what we were asked for */
1788 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1789 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1790
1791 /* only return the ACL if we were actually asked for it */
1792 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1793 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1794 VATTR_RETURN(vap, va_acl, NULL);
1795 } else {
1796 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1797 if (facl == NULL) {
1798 kauth_filesec_free(fsec);
1799 error = ENOMEM;
1800 goto out;
1801 }
1802 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1803 VATTR_RETURN(vap, va_acl, facl);
1804 }
1805 }
1806 kauth_filesec_free(fsec);
1807 }
1808 }
1809 /*
1810 * If someone gave us an unsolicited filesec, toss it. We promise that
1811 * we're OK with a filesystem giving us anything back, but our callers
1812 * only expect what they asked for.
1813 */
1814 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1815 if (vap->va_acl != NULL)
1816 kauth_acl_free(vap->va_acl);
1817 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1818 }
1819
1820#if 0 /* enable when we have a filesystem only supporting UUIDs */
1821 /*
1822 * Handle the case where we need a UID/GID, but only have extended
1823 * security information.
1824 */
1825 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1826 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1827 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1828 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1829 VATTR_RETURN(vap, va_uid, nuid);
1830 }
1831 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1832 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1833 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1834 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1835 VATTR_RETURN(vap, va_gid, ngid);
1836 }
1837#endif
1838
1839 /*
1840 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1841 */
1842 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1843 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1844 nuid = vp->v_mount->mnt_fsowner;
1845 if (nuid == KAUTH_UID_NONE)
1846 nuid = 99;
1847 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1848 nuid = vap->va_uid;
1849 } else {
1850 /* this will always be something sensible */
1851 nuid = vp->v_mount->mnt_fsowner;
1852 }
1853 if ((nuid == 99) && !vfs_context_issuser(ctx))
1854 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1855 VATTR_RETURN(vap, va_uid, nuid);
1856 }
1857 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1858 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1859 ngid = vp->v_mount->mnt_fsgroup;
1860 if (ngid == KAUTH_GID_NONE)
1861 ngid = 99;
1862 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1863 ngid = vap->va_gid;
1864 } else {
1865 /* this will always be something sensible */
1866 ngid = vp->v_mount->mnt_fsgroup;
1867 }
1868 if ((ngid == 99) && !vfs_context_issuser(ctx))
1869 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1870 VATTR_RETURN(vap, va_gid, ngid);
1871 }
1872
1873 /*
1874 * Synthesise some values that can be reasonably guessed.
1875 */
1876 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1877 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1878
1879 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1880 VATTR_RETURN(vap, va_flags, 0);
1881
1882 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1883 VATTR_RETURN(vap, va_filerev, 0);
1884
1885 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1886 VATTR_RETURN(vap, va_gen, 0);
1887
1888 /*
1889 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1890 */
1891 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1892 VATTR_RETURN(vap, va_data_size, 0);
1893
1894 /* do we want any of the possibly-computed values? */
1895 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1896 VATTR_IS_ACTIVE(vap, va_total_size) ||
1897 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1898 /* make sure f_bsize is valid */
1899 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1900 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1901 goto out;
1902 }
1903
1904 /* default va_data_alloc from va_data_size */
1905 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1906 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1907
1908 /* default va_total_size from va_data_size */
1909 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1910 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1911
1912 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1913 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1914 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1915 }
1916
1917 /*
1918 * If we don't have a change time, pull it from the modtime.
1919 */
1920 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1921 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1922
1923 /*
1924 * This is really only supported for the creation VNOPs, but since the field is there
1925 * we should populate it correctly.
1926 */
1927 VATTR_RETURN(vap, va_type, vp->v_type);
1928
1929 /*
1930 * The fsid can be obtained from the mountpoint directly.
1931 */
1932 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1933
1934out:
1935
1936 return(error);
1937}
1938
0c530ab8
A
1939/*
1940 * Set the attributes on a vnode in a vnode context.
1941 *
1942 * Parameters: vp The vnode whose attributes to set.
1943 * vap A pointer to the attributes to set.
1944 * ctx The vnode context in which the
1945 * operation is to be attempted.
1946 *
1947 * Returns: 0 Success
1948 * !0 errno value
1949 *
1950 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
1951 *
1952 * The contents of the data area pointed to by 'vap' may be
1953 * modified if the vnode is on a filesystem which has been
1954 * mounted with ingore ownership flags, or by the underlyng
1955 * VFS itself, or by the fallback code, if the underlying VFS
1956 * does not support ACL, UUID, or GUUID attributes directly.
1957 *
1958 * XXX: We should enummerate the possible errno values here, and where
1959 * in the code they originated.
1960 */
91447636
A
1961int
1962vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1963{
1964 int error, is_ownership_change=0;
1965
1966 /*
1967 * Make sure the filesystem is mounted R/W.
1968 * If not, return an error.
1969 */
0c530ab8
A
1970 if (vfs_isrdonly(vp->v_mount)) {
1971 error = EROFS;
1972 goto out;
1973 }
91447636
A
1974
1975 /*
1976 * If ownership is being ignored on this volume, we silently discard
1977 * ownership changes.
1978 */
1979 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1980 VATTR_CLEAR_ACTIVE(vap, va_uid);
1981 VATTR_CLEAR_ACTIVE(vap, va_gid);
1982 }
1983
1984 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1985 is_ownership_change = 1;
1986 }
1987
1988 /*
1989 * Make sure that extended security is enabled if we're going to try
1990 * to set any.
1991 */
1992 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1993 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
1994 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
0c530ab8
A
1995 error = ENOTSUP;
1996 goto out;
91447636
A
1997 }
1998
1999 error = VNOP_SETATTR(vp, vap, ctx);
2000
2001 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2002 error = vnode_setattr_fallback(vp, vap, ctx);
2003
2004 /*
2005 * If we have changed any of the things about the file that are likely
2006 * to result in changes to authorisation results, blow the vnode auth
2007 * cache
2008 */
2009 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
2010 VATTR_IS_SUPPORTED(vap, va_uid) ||
2011 VATTR_IS_SUPPORTED(vap, va_gid) ||
2012 VATTR_IS_SUPPORTED(vap, va_flags) ||
2013 VATTR_IS_SUPPORTED(vap, va_acl) ||
2014 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
2015 VATTR_IS_SUPPORTED(vap, va_guuid))
2016 vnode_uncache_credentials(vp);
2017 // only send a stat_changed event if this is more than
2018 // just an access time update
2019 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
2020 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
2021 if (is_ownership_change == 0)
2022 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2023 else
2024 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2025 }
2026 }
0c530ab8
A
2027
2028out:
91447636
A
2029 return(error);
2030}
2031
2032/*
0c530ab8
A
2033 * Fallback for setting the attributes on a vnode in a vnode context. This
2034 * Function will attempt to store ACL, UUID, and GUID information utilizing
2035 * a read/modify/write operation against an EA used as a backing store for
2036 * the object.
2037 *
2038 * Parameters: vp The vnode whose attributes to set.
2039 * vap A pointer to the attributes to set.
2040 * ctx The vnode context in which the
2041 * operation is to be attempted.
2042 *
2043 * Returns: 0 Success
2044 * !0 errno value
2045 *
2046 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2047 * as are the fsec and lfsec, if they are used.
2048 *
2049 * The contents of the data area pointed to by 'vap' may be
2050 * modified to indicate that the attribute is supported for
2051 * any given requested attribute.
2052 *
2053 * XXX: We should enummerate the possible errno values here, and where
2054 * in the code they originated.
2055 */
91447636
A
2056int
2057vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2058{
2059 kauth_filesec_t fsec;
2060 kauth_acl_t facl;
2061 struct kauth_filesec lfsec;
2062 int error;
2063
2064 error = 0;
2065
2066 /*
2067 * Extended security fallback via extended attributes.
2068 *
0c530ab8
A
2069 * Note that we do not free the filesec; the caller is expected to
2070 * do this.
91447636
A
2071 */
2072 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2073 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2074 VATTR_NOT_RETURNED(vap, va_guuid)) {
2075 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2076
2077 /*
0c530ab8
A
2078 * Fail for file types that we don't permit extended security
2079 * to be set on.
91447636
A
2080 */
2081 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2082 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2083 error = EINVAL;
2084 goto out;
2085 }
2086
2087 /*
0c530ab8
A
2088 * If we don't have all the extended security items, we need
2089 * to fetch the existing data to perform a read-modify-write
2090 * operation.
91447636
A
2091 */
2092 fsec = NULL;
2093 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2094 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2095 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2096 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2097 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2098 goto out;
2099 }
2100 }
2101 /* if we didn't get a filesec, use our local one */
2102 if (fsec == NULL) {
2103 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2104 fsec = &lfsec;
2105 } else {
2106 KAUTH_DEBUG("SETATTR - updating existing filesec");
2107 }
2108 /* find the ACL */
2109 facl = &fsec->fsec_acl;
2110
2111 /* if we're using the local filesec, we need to initialise it */
2112 if (fsec == &lfsec) {
2113 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2114 fsec->fsec_owner = kauth_null_guid;
2115 fsec->fsec_group = kauth_null_guid;
2116 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2117 facl->acl_flags = 0;
2118 }
2119
2120 /*
2121 * Update with the supplied attributes.
2122 */
2123 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2124 KAUTH_DEBUG("SETATTR - updating owner UUID");
2125 fsec->fsec_owner = vap->va_uuuid;
2126 VATTR_SET_SUPPORTED(vap, va_uuuid);
2127 }
2128 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2129 KAUTH_DEBUG("SETATTR - updating group UUID");
2130 fsec->fsec_group = vap->va_guuid;
2131 VATTR_SET_SUPPORTED(vap, va_guuid);
2132 }
2133 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2134 if (vap->va_acl == NULL) {
2135 KAUTH_DEBUG("SETATTR - removing ACL");
2136 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2137 } else {
2138 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2139 facl = vap->va_acl;
2140 }
2141 VATTR_SET_SUPPORTED(vap, va_acl);
2142 }
2143
2144 /*
0c530ab8
A
2145 * If the filesec data is all invalid, we can just remove
2146 * the EA completely.
91447636
A
2147 */
2148 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2149 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2150 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2151 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2152 /* no attribute is ok, nothing to delete */
2153 if (error == ENOATTR)
2154 error = 0;
2155 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2156 } else {
2157 /* write the EA */
2158 error = vnode_set_filesec(vp, fsec, facl, ctx);
2159 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2160 }
2161
2162 /* if we fetched a filesec, dispose of the buffer */
2163 if (fsec != &lfsec)
2164 kauth_filesec_free(fsec);
2165 }
2166out:
2167
2168 return(error);
2169}
2170
2171/*
2172 * Definition of vnode operations.
2173 */
2174
2175#if 0
2176/*
2177 *#
2178 *#% lookup dvp L ? ?
2179 *#% lookup vpp - L -
2180 */
2181struct vnop_lookup_args {
2182 struct vnodeop_desc *a_desc;
2183 vnode_t a_dvp;
2184 vnode_t *a_vpp;
2185 struct componentname *a_cnp;
2186 vfs_context_t a_context;
2187};
2188#endif /* 0*/
2189
2190errno_t
2191VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2192{
2193 int _err;
2194 struct vnop_lookup_args a;
2195 vnode_t vp;
2196 int thread_safe;
2197 int funnel_state = 0;
2198
2199 a.a_desc = &vnop_lookup_desc;
2200 a.a_dvp = dvp;
2201 a.a_vpp = vpp;
2202 a.a_cnp = cnp;
2203 a.a_context = context;
2204 thread_safe = THREAD_SAFE_FS(dvp);
2205
2206 vnode_cache_credentials(dvp, context);
2207
2208 if (!thread_safe) {
2209 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2210 return (_err);
2211 }
2212 }
2213 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2214
2215 vp = *vpp;
2216
2217 if (!thread_safe) {
2218 if ( (cnp->cn_flags & ISLASTCN) ) {
2219 if ( (cnp->cn_flags & LOCKPARENT) ) {
2220 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2221 /*
2222 * leave the fsnode lock held on
2223 * the directory, but restore the funnel...
2224 * also indicate that we need to drop the
2225 * fsnode_lock when we're done with the
2226 * system call processing for this path
2227 */
2228 cnp->cn_flags |= FSNODELOCKHELD;
2229
2230 (void) thread_funnel_set(kernel_flock, funnel_state);
2231 return (_err);
2232 }
2233 }
2234 }
2235 unlock_fsnode(dvp, &funnel_state);
2236 }
2237 return (_err);
2238}
2239
2240#if 0
2241/*
2242 *#
2243 *#% create dvp L L L
2244 *#% create vpp - L -
2245 *#
2246 */
2247
2248struct vnop_create_args {
2249 struct vnodeop_desc *a_desc;
2250 vnode_t a_dvp;
2251 vnode_t *a_vpp;
2252 struct componentname *a_cnp;
2253 struct vnode_attr *a_vap;
2254 vfs_context_t a_context;
2255};
2256#endif /* 0*/
2257errno_t
2258VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2259{
2260 int _err;
2261 struct vnop_create_args a;
2262 int thread_safe;
2263 int funnel_state = 0;
2264
2265 a.a_desc = &vnop_create_desc;
2266 a.a_dvp = dvp;
2267 a.a_vpp = vpp;
2268 a.a_cnp = cnp;
2269 a.a_vap = vap;
2270 a.a_context = context;
2271 thread_safe = THREAD_SAFE_FS(dvp);
2272
2273 if (!thread_safe) {
2274 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2275 return (_err);
2276 }
2277 }
2278 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2279 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2280 /*
2281 * Remove stale Apple Double file (if any).
2282 */
2283 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2284 }
2285 if (!thread_safe) {
2286 unlock_fsnode(dvp, &funnel_state);
2287 }
2288 return (_err);
2289}
2290
2291#if 0
2292/*
2293 *#
2294 *#% whiteout dvp L L L
2295 *#% whiteout cnp - - -
2296 *#% whiteout flag - - -
2297 *#
2298 */
2299struct vnop_whiteout_args {
2300 struct vnodeop_desc *a_desc;
2301 vnode_t a_dvp;
2302 struct componentname *a_cnp;
2303 int a_flags;
2304 vfs_context_t a_context;
2305};
2306#endif /* 0*/
2307errno_t
2308VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2309{
2310 int _err;
2311 struct vnop_whiteout_args a;
2312 int thread_safe;
2313 int funnel_state = 0;
2314
2315 a.a_desc = &vnop_whiteout_desc;
2316 a.a_dvp = dvp;
2317 a.a_cnp = cnp;
2318 a.a_flags = flags;
2319 a.a_context = context;
2320 thread_safe = THREAD_SAFE_FS(dvp);
2321
2322 if (!thread_safe) {
2323 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2324 return (_err);
2325 }
2326 }
2327 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2328 if (!thread_safe) {
2329 unlock_fsnode(dvp, &funnel_state);
2330 }
2331 return (_err);
2332}
2333
2334 #if 0
2335/*
2336 *#
2337 *#% mknod dvp L U U
2338 *#% mknod vpp - X -
2339 *#
2340 */
2341struct vnop_mknod_args {
2342 struct vnodeop_desc *a_desc;
2343 vnode_t a_dvp;
2344 vnode_t *a_vpp;
2345 struct componentname *a_cnp;
2346 struct vnode_attr *a_vap;
2347 vfs_context_t a_context;
2348};
2349#endif /* 0*/
2350errno_t
2351VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2352{
2353
2354 int _err;
2355 struct vnop_mknod_args a;
2356 int thread_safe;
2357 int funnel_state = 0;
2358
2359 a.a_desc = &vnop_mknod_desc;
2360 a.a_dvp = dvp;
2361 a.a_vpp = vpp;
2362 a.a_cnp = cnp;
2363 a.a_vap = vap;
2364 a.a_context = context;
2365 thread_safe = THREAD_SAFE_FS(dvp);
2366
2367 if (!thread_safe) {
2368 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2369 return (_err);
2370 }
2371 }
2372 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2373 if (!thread_safe) {
2374 unlock_fsnode(dvp, &funnel_state);
2375 }
2376 return (_err);
2377}
2378
2379#if 0
2380/*
2381 *#
2382 *#% open vp L L L
2383 *#
2384 */
2385struct vnop_open_args {
2386 struct vnodeop_desc *a_desc;
2387 vnode_t a_vp;
2388 int a_mode;
2389 vfs_context_t a_context;
2390};
2391#endif /* 0*/
2392errno_t
2393VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2394{
2395 int _err;
2396 struct vnop_open_args a;
2397 int thread_safe;
2398 int funnel_state = 0;
2399 struct vfs_context acontext;
2400
2401 if (context == NULL) {
2402 acontext.vc_proc = current_proc();
2403 acontext.vc_ucred = kauth_cred_get();
2404 context = &acontext;
2405 }
2406 a.a_desc = &vnop_open_desc;
2407 a.a_vp = vp;
2408 a.a_mode = mode;
2409 a.a_context = context;
2410 thread_safe = THREAD_SAFE_FS(vp);
2411
2412 if (!thread_safe) {
2413 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2414 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2415 if ( (_err = lock_fsnode(vp, NULL)) ) {
2416 (void) thread_funnel_set(kernel_flock, funnel_state);
2417 return (_err);
2418 }
2419 }
2420 }
2421 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2422 if (!thread_safe) {
2423 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2424 unlock_fsnode(vp, NULL);
2425 }
2426 (void) thread_funnel_set(kernel_flock, funnel_state);
2427 }
2428 return (_err);
2429}
2430
2431#if 0
2432/*
2433 *#
2434 *#% close vp U U U
2435 *#
2436 */
2437struct vnop_close_args {
2438 struct vnodeop_desc *a_desc;
2439 vnode_t a_vp;
2440 int a_fflag;
2441 vfs_context_t a_context;
2442};
2443#endif /* 0*/
2444errno_t
2445VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2446{
2447 int _err;
2448 struct vnop_close_args a;
2449 int thread_safe;
2450 int funnel_state = 0;
2451 struct vfs_context acontext;
2452
2453 if (context == NULL) {
2454 acontext.vc_proc = current_proc();
2455 acontext.vc_ucred = kauth_cred_get();
2456 context = &acontext;
2457 }
2458 a.a_desc = &vnop_close_desc;
2459 a.a_vp = vp;
2460 a.a_fflag = fflag;
2461 a.a_context = context;
2462 thread_safe = THREAD_SAFE_FS(vp);
2463
2464 if (!thread_safe) {
2465 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2466 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2467 if ( (_err = lock_fsnode(vp, NULL)) ) {
2468 (void) thread_funnel_set(kernel_flock, funnel_state);
2469 return (_err);
2470 }
2471 }
2472 }
2473 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2474 if (!thread_safe) {
2475 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2476 unlock_fsnode(vp, NULL);
2477 }
2478 (void) thread_funnel_set(kernel_flock, funnel_state);
2479 }
2480 return (_err);
2481}
2482
2483#if 0
2484/*
2485 *#
2486 *#% access vp L L L
2487 *#
2488 */
2489struct vnop_access_args {
2490 struct vnodeop_desc *a_desc;
2491 vnode_t a_vp;
2492 int a_action;
2493 vfs_context_t a_context;
2494};
2495#endif /* 0*/
2496errno_t
2497VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2498{
2499 int _err;
2500 struct vnop_access_args a;
2501 int thread_safe;
2502 int funnel_state = 0;
2503 struct vfs_context acontext;
2504
2505 if (context == NULL) {
2506 acontext.vc_proc = current_proc();
2507 acontext.vc_ucred = kauth_cred_get();
2508 context = &acontext;
2509 }
2510 a.a_desc = &vnop_access_desc;
2511 a.a_vp = vp;
2512 a.a_action = action;
2513 a.a_context = context;
2514 thread_safe = THREAD_SAFE_FS(vp);
2515
2516 if (!thread_safe) {
2517 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2518 return (_err);
2519 }
2520 }
2521 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2522 if (!thread_safe) {
2523 unlock_fsnode(vp, &funnel_state);
2524 }
2525 return (_err);
2526}
2527
2528#if 0
2529/*
2530 *#
2531 *#% getattr vp = = =
2532 *#
2533 */
2534struct vnop_getattr_args {
2535 struct vnodeop_desc *a_desc;
2536 vnode_t a_vp;
2537 struct vnode_attr *a_vap;
2538 vfs_context_t a_context;
2539};
2540#endif /* 0*/
2541errno_t
2542VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2543{
2544 int _err;
2545 struct vnop_getattr_args a;
2546 int thread_safe;
2547 int funnel_state;
2548
2549 a.a_desc = &vnop_getattr_desc;
2550 a.a_vp = vp;
2551 a.a_vap = vap;
2552 a.a_context = context;
2553 thread_safe = THREAD_SAFE_FS(vp);
2554
2555 if (!thread_safe) {
2556 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2557 return (_err);
2558 }
2559 }
2560 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2561 if (!thread_safe) {
2562 unlock_fsnode(vp, &funnel_state);
2563 }
2564 return (_err);
2565}
2566
2567#if 0
2568/*
2569 *#
2570 *#% setattr vp L L L
2571 *#
2572 */
2573struct vnop_setattr_args {
2574 struct vnodeop_desc *a_desc;
2575 vnode_t a_vp;
2576 struct vnode_attr *a_vap;
2577 vfs_context_t a_context;
2578};
2579#endif /* 0*/
2580errno_t
2581VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2582{
2583 int _err;
2584 struct vnop_setattr_args a;
2585 int thread_safe;
2586 int funnel_state;
2587
2588 a.a_desc = &vnop_setattr_desc;
2589 a.a_vp = vp;
2590 a.a_vap = vap;
2591 a.a_context = context;
2592 thread_safe = THREAD_SAFE_FS(vp);
2593
2594 if (!thread_safe) {
2595 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2596 return (_err);
2597 }
2598 }
2599 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2600
2601 /*
2602 * Shadow uid/gid/mod change to extended attibute file.
2603 */
2604 if (_err == 0 && !NATIVE_XATTR(vp)) {
2605 struct vnode_attr va;
2606 int change = 0;
2607
2608 VATTR_INIT(&va);
2609 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2610 VATTR_SET(&va, va_uid, vap->va_uid);
2611 change = 1;
2612 }
2613 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2614 VATTR_SET(&va, va_gid, vap->va_gid);
2615 change = 1;
2616 }
2617 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2618 VATTR_SET(&va, va_mode, vap->va_mode);
2619 change = 1;
2620 }
2621 if (change) {
2622 vnode_t dvp;
2623 char *vname;
2624
2625 dvp = vnode_getparent(vp);
2626 vname = vnode_getname(vp);
2627
2628 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2629 if (dvp != NULLVP)
2630 vnode_put(dvp);
2631 if (vname != NULL)
2632 vnode_putname(vname);
2633 }
2634 }
2635 if (!thread_safe) {
2636 unlock_fsnode(vp, &funnel_state);
2637 }
2638 return (_err);
2639}
2640
2641#if 0
2642/*
2643 *#
2644 *#% getattrlist vp = = =
2645 *#
2646 */
2647struct vnop_getattrlist_args {
2648 struct vnodeop_desc *a_desc;
2649 vnode_t a_vp;
2650 struct attrlist *a_alist;
2651 struct uio *a_uio;
2652 int a_options;
2653 vfs_context_t a_context;
2654};
2655#endif /* 0*/
2656errno_t
2657VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2658{
2659 int _err;
2660 struct vnop_getattrlist_args a;
2661 int thread_safe;
2662 int funnel_state = 0;
2663
2664 a.a_desc = &vnop_getattrlist_desc;
2665 a.a_vp = vp;
2666 a.a_alist = alist;
2667 a.a_uio = uio;
2668 a.a_options = options;
2669 a.a_context = context;
2670 thread_safe = THREAD_SAFE_FS(vp);
2671
2672 if (!thread_safe) {
2673 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2674 return (_err);
2675 }
2676 }
2677 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2678 if (!thread_safe) {
2679 unlock_fsnode(vp, &funnel_state);
2680 }
2681 return (_err);
2682}
2683
2684#if 0
2685/*
2686 *#
2687 *#% setattrlist vp L L L
2688 *#
2689 */
2690struct vnop_setattrlist_args {
2691 struct vnodeop_desc *a_desc;
2692 vnode_t a_vp;
2693 struct attrlist *a_alist;
2694 struct uio *a_uio;
2695 int a_options;
2696 vfs_context_t a_context;
2697};
2698#endif /* 0*/
2699errno_t
2700VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2701{
2702 int _err;
2703 struct vnop_setattrlist_args a;
2704 int thread_safe;
2705 int funnel_state = 0;
2706
2707 a.a_desc = &vnop_setattrlist_desc;
2708 a.a_vp = vp;
2709 a.a_alist = alist;
2710 a.a_uio = uio;
2711 a.a_options = options;
2712 a.a_context = context;
2713 thread_safe = THREAD_SAFE_FS(vp);
2714
2715 if (!thread_safe) {
2716 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2717 return (_err);
2718 }
2719 }
2720 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2721
2722 vnode_uncache_credentials(vp);
2723
2724 if (!thread_safe) {
2725 unlock_fsnode(vp, &funnel_state);
2726 }
2727 return (_err);
2728}
2729
2730
2731#if 0
2732/*
2733 *#
2734 *#% read vp L L L
2735 *#
2736 */
2737struct vnop_read_args {
2738 struct vnodeop_desc *a_desc;
2739 vnode_t a_vp;
2740 struct uio *a_uio;
2741 int a_ioflag;
2742 vfs_context_t a_context;
2743};
2744#endif /* 0*/
2745errno_t
2746VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2747{
2748 int _err;
2749 struct vnop_read_args a;
2750 int thread_safe;
2751 int funnel_state = 0;
2752 struct vfs_context acontext;
2753
2754 if (context == NULL) {
2755 acontext.vc_proc = current_proc();
2756 acontext.vc_ucred = kauth_cred_get();
2757 context = &acontext;
2758 }
2759
2760 a.a_desc = &vnop_read_desc;
2761 a.a_vp = vp;
2762 a.a_uio = uio;
2763 a.a_ioflag = ioflag;
2764 a.a_context = context;
2765 thread_safe = THREAD_SAFE_FS(vp);
2766
2767 if (!thread_safe) {
2768 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2769 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2770 if ( (_err = lock_fsnode(vp, NULL)) ) {
2771 (void) thread_funnel_set(kernel_flock, funnel_state);
2772 return (_err);
2773 }
2774 }
2775 }
2776 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2777
2778 if (!thread_safe) {
2779 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2780 unlock_fsnode(vp, NULL);
2781 }
2782 (void) thread_funnel_set(kernel_flock, funnel_state);
2783 }
2784 return (_err);
2785}
2786
2787
2788#if 0
2789/*
2790 *#
2791 *#% write vp L L L
2792 *#
2793 */
2794struct vnop_write_args {
2795 struct vnodeop_desc *a_desc;
2796 vnode_t a_vp;
2797 struct uio *a_uio;
2798 int a_ioflag;
2799 vfs_context_t a_context;
2800};
2801#endif /* 0*/
2802errno_t
2803VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2804{
2805 struct vnop_write_args a;
2806 int _err;
2807 int thread_safe;
2808 int funnel_state = 0;
2809 struct vfs_context acontext;
2810
2811 if (context == NULL) {
2812 acontext.vc_proc = current_proc();
2813 acontext.vc_ucred = kauth_cred_get();
2814 context = &acontext;
2815 }
2816
2817 a.a_desc = &vnop_write_desc;
2818 a.a_vp = vp;
2819 a.a_uio = uio;
2820 a.a_ioflag = ioflag;
2821 a.a_context = context;
2822 thread_safe = THREAD_SAFE_FS(vp);
2823
2824 if (!thread_safe) {
2825 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2826 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2827 if ( (_err = lock_fsnode(vp, NULL)) ) {
2828 (void) thread_funnel_set(kernel_flock, funnel_state);
2829 return (_err);
2830 }
2831 }
2832 }
2833 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2834
2835 if (!thread_safe) {
2836 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2837 unlock_fsnode(vp, NULL);
2838 }
2839 (void) thread_funnel_set(kernel_flock, funnel_state);
2840 }
2841 return (_err);
2842}
2843
2844
2845#if 0
2846/*
2847 *#
2848 *#% ioctl vp U U U
2849 *#
2850 */
2851struct vnop_ioctl_args {
2852 struct vnodeop_desc *a_desc;
2853 vnode_t a_vp;
2854 u_long a_command;
2855 caddr_t a_data;
2856 int a_fflag;
2857 vfs_context_t a_context;
2858};
2859#endif /* 0*/
2860errno_t
2861VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2862{
2863 int _err;
2864 struct vnop_ioctl_args a;
2865 int thread_safe;
2866 int funnel_state = 0;
2867 struct vfs_context acontext;
2868
2869 if (context == NULL) {
2870 acontext.vc_proc = current_proc();
2871 acontext.vc_ucred = kauth_cred_get();
2872 context = &acontext;
2873 }
2874
2875 if (vfs_context_is64bit(context)) {
2876 if (!vnode_vfs64bitready(vp)) {
2877 return(ENOTTY);
2878 }
2879 }
2880
2881 a.a_desc = &vnop_ioctl_desc;
2882 a.a_vp = vp;
2883 a.a_command = command;
2884 a.a_data = data;
2885 a.a_fflag = fflag;
2886 a.a_context= context;
2887 thread_safe = THREAD_SAFE_FS(vp);
2888
2889 if (!thread_safe) {
2890 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2891 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2892 if ( (_err = lock_fsnode(vp, NULL)) ) {
2893 (void) thread_funnel_set(kernel_flock, funnel_state);
2894 return (_err);
2895 }
2896 }
2897 }
2898 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2899 if (!thread_safe) {
2900 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2901 unlock_fsnode(vp, NULL);
2902 }
2903 (void) thread_funnel_set(kernel_flock, funnel_state);
2904 }
2905 return (_err);
2906}
2907
2908
2909#if 0
2910/*
2911 *#
2912 *#% select vp U U U
2913 *#
2914 */
2915struct vnop_select_args {
2916 struct vnodeop_desc *a_desc;
2917 vnode_t a_vp;
2918 int a_which;
2919 int a_fflags;
2920 void *a_wql;
2921 vfs_context_t a_context;
2922};
2923#endif /* 0*/
2924errno_t
2925VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2926{
2927 int _err;
2928 struct vnop_select_args a;
2929 int thread_safe;
2930 int funnel_state = 0;
2931 struct vfs_context acontext;
2932
2933 if (context == NULL) {
2934 acontext.vc_proc = current_proc();
2935 acontext.vc_ucred = kauth_cred_get();
2936 context = &acontext;
2937 }
2938 a.a_desc = &vnop_select_desc;
2939 a.a_vp = vp;
2940 a.a_which = which;
2941 a.a_fflags = fflags;
2942 a.a_context = context;
2943 a.a_wql = wql;
2944 thread_safe = THREAD_SAFE_FS(vp);
2945
2946 if (!thread_safe) {
2947 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2948 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2949 if ( (_err = lock_fsnode(vp, NULL)) ) {
2950 (void) thread_funnel_set(kernel_flock, funnel_state);
2951 return (_err);
2952 }
2953 }
2954 }
2955 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2956 if (!thread_safe) {
2957 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2958 unlock_fsnode(vp, NULL);
2959 }
2960 (void) thread_funnel_set(kernel_flock, funnel_state);
2961 }
2962 return (_err);
2963}
2964
2965
2966#if 0
2967/*
2968 *#
2969 *#% exchange fvp L L L
2970 *#% exchange tvp L L L
2971 *#
2972 */
2973struct vnop_exchange_args {
2974 struct vnodeop_desc *a_desc;
2975 vnode_t a_fvp;
2976 vnode_t a_tvp;
2977 int a_options;
2978 vfs_context_t a_context;
2979};
2980#endif /* 0*/
2981errno_t
2982VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2983{
2984 int _err;
2985 struct vnop_exchange_args a;
2986 int thread_safe;
2987 int funnel_state = 0;
2988 vnode_t lock_first = NULL, lock_second = NULL;
2989
2990 a.a_desc = &vnop_exchange_desc;
2991 a.a_fvp = fvp;
2992 a.a_tvp = tvp;
2993 a.a_options = options;
2994 a.a_context = context;
2995 thread_safe = THREAD_SAFE_FS(fvp);
2996
2997 if (!thread_safe) {
2998 /*
2999 * Lock in vnode address order to avoid deadlocks
3000 */
3001 if (fvp < tvp) {
3002 lock_first = fvp;
3003 lock_second = tvp;
3004 } else {
3005 lock_first = tvp;
3006 lock_second = fvp;
3007 }
3008 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3009 return (_err);
3010 }
3011 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3012 unlock_fsnode(lock_first, &funnel_state);
3013 return (_err);
3014 }
3015 }
3016 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3017 if (!thread_safe) {
3018 unlock_fsnode(lock_second, NULL);
3019 unlock_fsnode(lock_first, &funnel_state);
3020 }
3021 return (_err);
3022}
3023
3024
3025#if 0
3026/*
3027 *#
3028 *#% revoke vp U U U
3029 *#
3030 */
3031struct vnop_revoke_args {
3032 struct vnodeop_desc *a_desc;
3033 vnode_t a_vp;
3034 int a_flags;
3035 vfs_context_t a_context;
3036};
3037#endif /* 0*/
3038errno_t
3039VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
3040{
3041 struct vnop_revoke_args a;
3042 int _err;
3043 int thread_safe;
3044 int funnel_state = 0;
3045
3046 a.a_desc = &vnop_revoke_desc;
3047 a.a_vp = vp;
3048 a.a_flags = flags;
3049 a.a_context = context;
3050 thread_safe = THREAD_SAFE_FS(vp);
3051
3052 if (!thread_safe) {
3053 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3054 }
3055 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3056 if (!thread_safe) {
3057 (void) thread_funnel_set(kernel_flock, funnel_state);
3058 }
3059 return (_err);
3060}
3061
3062
3063#if 0
3064/*
3065 *#
3066 *# mmap - vp U U U
3067 *#
3068 */
3069struct vnop_mmap_args {
3070 struct vnodeop_desc *a_desc;
3071 vnode_t a_vp;
3072 int a_fflags;
3073 vfs_context_t a_context;
3074};
3075#endif /* 0*/
3076errno_t
3077VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
3078{
3079 int _err;
3080 struct vnop_mmap_args a;
3081 int thread_safe;
3082 int funnel_state = 0;
3083
3084 a.a_desc = &vnop_mmap_desc;
3085 a.a_vp = vp;
3086 a.a_fflags = fflags;
3087 a.a_context = context;
3088 thread_safe = THREAD_SAFE_FS(vp);
3089
3090 if (!thread_safe) {
3091 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3092 return (_err);
3093 }
3094 }
3095 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3096 if (!thread_safe) {
3097 unlock_fsnode(vp, &funnel_state);
3098 }
3099 return (_err);
3100}
3101
3102
3103#if 0
3104/*
3105 *#
3106 *# mnomap - vp U U U
3107 *#
3108 */
3109struct vnop_mnomap_args {
3110 struct vnodeop_desc *a_desc;
3111 vnode_t a_vp;
3112 vfs_context_t a_context;
3113};
3114#endif /* 0*/
3115errno_t
3116VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
3117{
3118 int _err;
3119 struct vnop_mnomap_args a;
3120 int thread_safe;
3121 int funnel_state = 0;
3122
3123 a.a_desc = &vnop_mnomap_desc;
3124 a.a_vp = vp;
3125 a.a_context = context;
3126 thread_safe = THREAD_SAFE_FS(vp);
3127
3128 if (!thread_safe) {
3129 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3130 return (_err);
3131 }
3132 }
3133 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3134 if (!thread_safe) {
3135 unlock_fsnode(vp, &funnel_state);
3136 }
3137 return (_err);
3138}
3139
3140
3141#if 0
3142/*
3143 *#
3144 *#% fsync vp L L L
3145 *#
3146 */
3147struct vnop_fsync_args {
3148 struct vnodeop_desc *a_desc;
3149 vnode_t a_vp;
3150 int a_waitfor;
3151 vfs_context_t a_context;
3152};
3153#endif /* 0*/
3154errno_t
3155VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
3156{
3157 struct vnop_fsync_args a;
3158 int _err;
3159 int thread_safe;
3160 int funnel_state = 0;
3161
3162 a.a_desc = &vnop_fsync_desc;
3163 a.a_vp = vp;
3164 a.a_waitfor = waitfor;
3165 a.a_context = context;
3166 thread_safe = THREAD_SAFE_FS(vp);
3167
3168 if (!thread_safe) {
3169 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3170 return (_err);
3171 }
3172 }
3173 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3174 if (!thread_safe) {
3175 unlock_fsnode(vp, &funnel_state);
3176 }
3177 return (_err);
3178}
3179
3180
3181#if 0
3182/*
3183 *#
3184 *#% remove dvp L U U
3185 *#% remove vp L U U
3186 *#
3187 */
3188struct vnop_remove_args {
3189 struct vnodeop_desc *a_desc;
3190 vnode_t a_dvp;
3191 vnode_t a_vp;
3192 struct componentname *a_cnp;
3193 int a_flags;
3194 vfs_context_t a_context;
3195};
3196#endif /* 0*/
3197errno_t
3198VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3199{
3200 int _err;
3201 struct vnop_remove_args a;
3202 int thread_safe;
3203 int funnel_state = 0;
3204
3205 a.a_desc = &vnop_remove_desc;
3206 a.a_dvp = dvp;
3207 a.a_vp = vp;
3208 a.a_cnp = cnp;
3209 a.a_flags = flags;
3210 a.a_context = context;
3211 thread_safe = THREAD_SAFE_FS(dvp);
3212
3213 if (!thread_safe) {
3214 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3215 return (_err);
3216 }
3217 }
3218 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3219
3220 if (_err == 0) {
3221 vnode_setneedinactive(vp);
3222
3223 if ( !(NATIVE_XATTR(dvp)) ) {
3224 /*
3225 * Remove any associated extended attibute file (._ AppleDouble file).
3226 */
3227 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3228 }
3229 }
3230 if (!thread_safe) {
3231 unlock_fsnode(vp, &funnel_state);
3232 }
3233 return (_err);
3234}
3235
3236
3237#if 0
3238/*
3239 *#
3240 *#% link vp U U U
3241 *#% link tdvp L U U
3242 *#
3243 */
3244struct vnop_link_args {
3245 struct vnodeop_desc *a_desc;
3246 vnode_t a_vp;
3247 vnode_t a_tdvp;
3248 struct componentname *a_cnp;
3249 vfs_context_t a_context;
3250};
3251#endif /* 0*/
3252errno_t
3253VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3254{
3255 int _err;
3256 struct vnop_link_args a;
3257 int thread_safe;
3258 int funnel_state = 0;
3259
3260 /*
3261 * For file systems with non-native extended attributes,
3262 * disallow linking to an existing "._" Apple Double file.
3263 */
3264 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3265 char *vname;
3266
3267 vname = vnode_getname(vp);
3268 if (vname != NULL) {
3269 _err = 0;
3270 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3271 _err = EPERM;
3272 }
3273 vnode_putname(vname);
3274 if (_err)
3275 return (_err);
3276 }
3277 }
3278 a.a_desc = &vnop_link_desc;
3279 a.a_vp = vp;
3280 a.a_tdvp = tdvp;
3281 a.a_cnp = cnp;
3282 a.a_context = context;
3283 thread_safe = THREAD_SAFE_FS(vp);
3284
3285 if (!thread_safe) {
3286 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3287 return (_err);
3288 }
3289 }
3290 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3291 if (!thread_safe) {
3292 unlock_fsnode(vp, &funnel_state);
3293 }
3294 return (_err);
3295}
3296
3297
3298#if 0
3299/*
3300 *#
3301 *#% rename fdvp U U U
3302 *#% rename fvp U U U
3303 *#% rename tdvp L U U
3304 *#% rename tvp X U U
3305 *#
3306 */
3307struct vnop_rename_args {
3308 struct vnodeop_desc *a_desc;
3309 vnode_t a_fdvp;
3310 vnode_t a_fvp;
3311 struct componentname *a_fcnp;
3312 vnode_t a_tdvp;
3313 vnode_t a_tvp;
3314 struct componentname *a_tcnp;
3315 vfs_context_t a_context;
3316};
3317#endif /* 0*/
3318errno_t
3319VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3320 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3321 vfs_context_t context)
3322{
3323 int _err;
3324 struct vnop_rename_args a;
3325 int funnel_state = 0;
3326 char smallname1[48];
3327 char smallname2[48];
3328 char *xfromname = NULL;
3329 char *xtoname = NULL;
3330 vnode_t lock_first = NULL, lock_second = NULL;
3331 vnode_t fdvp_unsafe = NULLVP;
3332 vnode_t tdvp_unsafe = NULLVP;
3333
3334 a.a_desc = &vnop_rename_desc;
3335 a.a_fdvp = fdvp;
3336 a.a_fvp = fvp;
3337 a.a_fcnp = fcnp;
3338 a.a_tdvp = tdvp;
3339 a.a_tvp = tvp;
3340 a.a_tcnp = tcnp;
3341 a.a_context = context;
3342
3343 if (!THREAD_SAFE_FS(fdvp))
3344 fdvp_unsafe = fdvp;
3345 if (!THREAD_SAFE_FS(tdvp))
3346 tdvp_unsafe = tdvp;
3347
3348 if (fdvp_unsafe != NULLVP) {
3349 /*
3350 * Lock parents in vnode address order to avoid deadlocks
3351 * note that it's possible for the fdvp to be unsafe,
3352 * but the tdvp to be safe because tvp could be a directory
3353 * in the root of a filesystem... in that case, tdvp is the
3354 * in the filesystem that this root is mounted on
3355 */
3356 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3357 lock_first = fdvp_unsafe;
3358 lock_second = NULL;
3359 } else if (fdvp_unsafe < tdvp_unsafe) {
3360 lock_first = fdvp_unsafe;
3361 lock_second = tdvp_unsafe;
3362 } else {
3363 lock_first = tdvp_unsafe;
3364 lock_second = fdvp_unsafe;
3365 }
3366 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3367 return (_err);
3368
3369 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3370 unlock_fsnode(lock_first, &funnel_state);
3371 return (_err);
3372 }
3373
3374 /*
3375 * Lock both children in vnode address order to avoid deadlocks
3376 */
3377 if (tvp == NULL || tvp == fvp) {
3378 lock_first = fvp;
3379 lock_second = NULL;
3380 } else if (fvp < tvp) {
3381 lock_first = fvp;
3382 lock_second = tvp;
3383 } else {
3384 lock_first = tvp;
3385 lock_second = fvp;
3386 }
3387 if ( (_err = lock_fsnode(lock_first, NULL)) )
3388 goto out1;
3389
3390 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3391 unlock_fsnode(lock_first, NULL);
3392 goto out1;
3393 }
3394 }
3395 /*
3396 * Save source and destination names (._ AppleDouble files).
3397 * Skip if source already has a "._" prefix.
3398 */
3399 if (!NATIVE_XATTR(fdvp) &&
3400 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3401 size_t len;
3402
3403 /* Get source attribute file name. */
3404 len = fcnp->cn_namelen + 3;
3405 if (len > sizeof(smallname1)) {
3406 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3407 } else {
3408 xfromname = &smallname1[0];
3409 }
3410 strcpy(xfromname, "._");
3411 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3412 xfromname[len-1] = '\0';
3413
3414 /* Get destination attribute file name. */
3415 len = tcnp->cn_namelen + 3;
3416 if (len > sizeof(smallname2)) {
3417 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3418 } else {
3419 xtoname = &smallname2[0];
3420 }
3421 strcpy(xtoname, "._");
3422 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3423 xtoname[len-1] = '\0';
3424 }
3425
3426 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3427
3428 if (fdvp_unsafe != NULLVP) {
3429 if (lock_second != NULL)
3430 unlock_fsnode(lock_second, NULL);
3431 unlock_fsnode(lock_first, NULL);
3432 }
3433 if (_err == 0) {
3434 if (tvp && tvp != fvp)
3435 vnode_setneedinactive(tvp);
3436 }
3437
3438 /*
3439 * Rename any associated extended attibute file (._ AppleDouble file).
3440 */
3441 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3442 struct nameidata fromnd, tond;
3443 int killdest = 0;
3444 int error;
3445
3446 /*
3447 * Get source attribute file vnode.
3448 * Note that fdvp already has an iocount reference and
3449 * using DELETE will take an additional reference.
3450 */
3451 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3452 CAST_USER_ADDR_T(xfromname), context);
3453 fromnd.ni_dvp = fdvp;
3454 error = namei(&fromnd);
3455
3456 if (error) {
3457 /* When source doesn't exist there still may be a destination. */
3458 if (error == ENOENT) {
3459 killdest = 1;
3460 } else {
3461 goto out;
3462 }
3463 } else if (fromnd.ni_vp->v_type != VREG) {
3464 vnode_put(fromnd.ni_vp);
3465 nameidone(&fromnd);
3466 killdest = 1;
3467 }
3468 if (killdest) {
3469 struct vnop_remove_args args;
3470
3471 /*
3472 * Get destination attribute file vnode.
3473 * Note that tdvp already has an iocount reference.
3474 */
3475 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3476 CAST_USER_ADDR_T(xtoname), context);
3477 tond.ni_dvp = tdvp;
3478 error = namei(&tond);
3479 if (error) {
3480 goto out;
3481 }
3482 if (tond.ni_vp->v_type != VREG) {
3483 vnode_put(tond.ni_vp);
3484 nameidone(&tond);
3485 goto out;
3486 }
3487 args.a_desc = &vnop_remove_desc;
3488 args.a_dvp = tdvp;
3489 args.a_vp = tond.ni_vp;
3490 args.a_cnp = &tond.ni_cnd;
3491 args.a_context = context;
3492
3493 if (fdvp_unsafe != NULLVP)
3494 error = lock_fsnode(tond.ni_vp, NULL);
3495 if (error == 0) {
3496 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3497
3498 if (fdvp_unsafe != NULLVP)
3499 unlock_fsnode(tond.ni_vp, NULL);
3500
3501 if (error == 0)
3502 vnode_setneedinactive(tond.ni_vp);
3503 }
3504 vnode_put(tond.ni_vp);
3505 nameidone(&tond);
3506 goto out;
3507 }
3508
3509 /*
3510 * Get destination attribute file vnode.
3511 */
3512 NDINIT(&tond, RENAME,
3513 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3514 CAST_USER_ADDR_T(xtoname), context);
3515 tond.ni_dvp = tdvp;
3516 error = namei(&tond);
3517
3518 if (error) {
3519 vnode_put(fromnd.ni_vp);
3520 nameidone(&fromnd);
3521 goto out;
3522 }
3523 a.a_desc = &vnop_rename_desc;
3524 a.a_fdvp = fdvp;
3525 a.a_fvp = fromnd.ni_vp;
3526 a.a_fcnp = &fromnd.ni_cnd;
3527 a.a_tdvp = tdvp;
3528 a.a_tvp = tond.ni_vp;
3529 a.a_tcnp = &tond.ni_cnd;
3530 a.a_context = context;
3531
3532 if (fdvp_unsafe != NULLVP) {
3533 /*
3534 * Lock in vnode address order to avoid deadlocks
3535 */
3536 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3537 lock_first = fromnd.ni_vp;
3538 lock_second = NULL;
3539 } else if (fromnd.ni_vp < tond.ni_vp) {
3540 lock_first = fromnd.ni_vp;
3541 lock_second = tond.ni_vp;
3542 } else {
3543 lock_first = tond.ni_vp;
3544 lock_second = fromnd.ni_vp;
3545 }
3546 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3547 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3548 unlock_fsnode(lock_first, NULL);
3549 }
3550 }
3551 if (error == 0) {
3552 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3553
3554 if (fdvp_unsafe != NULLVP) {
3555 if (lock_second != NULL)
3556 unlock_fsnode(lock_second, NULL);
3557 unlock_fsnode(lock_first, NULL);
3558 }
3559 if (error == 0) {
3560 vnode_setneedinactive(fromnd.ni_vp);
3561
3562 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3563 vnode_setneedinactive(tond.ni_vp);
3564 }
3565 }
3566 vnode_put(fromnd.ni_vp);
3567 if (tond.ni_vp) {
3568 vnode_put(tond.ni_vp);
3569 }
3570 nameidone(&tond);
3571 nameidone(&fromnd);
3572 }
3573out:
3574 if (xfromname && xfromname != &smallname1[0]) {
3575 FREE(xfromname, M_TEMP);
3576 }
3577 if (xtoname && xtoname != &smallname2[0]) {
3578 FREE(xtoname, M_TEMP);
3579 }
3580out1:
3581 if (fdvp_unsafe != NULLVP) {
3582 if (tdvp_unsafe != NULLVP)
3583 unlock_fsnode(tdvp_unsafe, NULL);
3584 unlock_fsnode(fdvp_unsafe, &funnel_state);
3585 }
3586 return (_err);
3587}
3588
3589 #if 0
3590/*
3591 *#
3592 *#% mkdir dvp L U U
3593 *#% mkdir vpp - L -
3594 *#
3595 */
3596struct vnop_mkdir_args {
3597 struct vnodeop_desc *a_desc;
3598 vnode_t a_dvp;
3599 vnode_t *a_vpp;
3600 struct componentname *a_cnp;
3601 struct vnode_attr *a_vap;
3602 vfs_context_t a_context;
3603};
3604#endif /* 0*/
3605errno_t
3606VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3607 struct vnode_attr *vap, vfs_context_t context)
3608{
3609 int _err;
3610 struct vnop_mkdir_args a;
3611 int thread_safe;
3612 int funnel_state = 0;
3613
3614 a.a_desc = &vnop_mkdir_desc;
3615 a.a_dvp = dvp;
3616 a.a_vpp = vpp;
3617 a.a_cnp = cnp;
3618 a.a_vap = vap;
3619 a.a_context = context;
3620 thread_safe = THREAD_SAFE_FS(dvp);
3621
3622 if (!thread_safe) {
3623 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3624 return (_err);
3625 }
3626 }
3627 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3628 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3629 /*
3630 * Remove stale Apple Double file (if any).
3631 */
3632 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3633 }
3634 if (!thread_safe) {
3635 unlock_fsnode(dvp, &funnel_state);
3636 }
3637 return (_err);
3638}
3639
3640
3641#if 0
3642/*
3643 *#
3644 *#% rmdir dvp L U U
3645 *#% rmdir vp L U U
3646 *#
3647 */
3648struct vnop_rmdir_args {
3649 struct vnodeop_desc *a_desc;
3650 vnode_t a_dvp;
3651 vnode_t a_vp;
3652 struct componentname *a_cnp;
3653 vfs_context_t a_context;
3654};
3655
3656#endif /* 0*/
3657errno_t
3658VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3659{
3660 int _err;
3661 struct vnop_rmdir_args a;
3662 int thread_safe;
3663 int funnel_state = 0;
3664
3665 a.a_desc = &vnop_rmdir_desc;
3666 a.a_dvp = dvp;
3667 a.a_vp = vp;
3668 a.a_cnp = cnp;
3669 a.a_context = context;
3670 thread_safe = THREAD_SAFE_FS(dvp);
3671
3672 if (!thread_safe) {
3673 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3674 return (_err);
3675 }
3676 }
3677 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3678
3679 if (_err == 0) {
3680 vnode_setneedinactive(vp);
3681
3682 if ( !(NATIVE_XATTR(dvp)) ) {
3683 /*
3684 * Remove any associated extended attibute file (._ AppleDouble file).
3685 */
3686 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3687 }
3688 }
3689 if (!thread_safe) {
3690 unlock_fsnode(vp, &funnel_state);
3691 }
3692 return (_err);
3693}
3694
3695/*
3696 * Remove a ._ AppleDouble file
3697 */
3698#define AD_STALE_SECS (180)
3699static void
3700xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3701 vnode_t xvp;
3702 struct nameidata nd;
3703 char smallname[64];
3704 char *filename = NULL;
3705 size_t len;
3706
3707 if ((basename == NULL) || (basename[0] == '\0') ||
3708 (basename[0] == '.' && basename[1] == '_')) {
3709 return;
3710 }
3711 filename = &smallname[0];
3712 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3713 if (len >= sizeof(smallname)) {
3714 len++; /* snprintf result doesn't include '\0' */
3715 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3716 len = snprintf(filename, len, "._%s", basename);
3717 }
3718 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3719 CAST_USER_ADDR_T(filename), context);
3720 nd.ni_dvp = dvp;
3721 if (namei(&nd) != 0)
3722 goto out2;
3723
3724 xvp = nd.ni_vp;
3725 nameidone(&nd);
3726 if (xvp->v_type != VREG)
3727 goto out1;
3728
3729 /*
3730 * When creating a new object and a "._" file already
3731 * exists, check to see if its a stale "._" file.
3732 *
3733 */
3734 if (!force) {
3735 struct vnode_attr va;
3736
3737 VATTR_INIT(&va);
3738 VATTR_WANTED(&va, va_data_size);
3739 VATTR_WANTED(&va, va_modify_time);
3740 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3741 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3742 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3743 va.va_data_size != 0) {
3744 struct timeval tv;
3745
3746 microtime(&tv);
3747 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3748 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3749 force = 1; /* must be stale */
3750 }
3751 }
3752 }
3753 if (force) {
3754 struct vnop_remove_args a;
3755 int error;
3756
3757 a.a_desc = &vnop_remove_desc;
3758 a.a_dvp = nd.ni_dvp;
3759 a.a_vp = xvp;
3760 a.a_cnp = &nd.ni_cnd;
3761 a.a_context = context;
3762
3763 if (!thread_safe) {
3764 if ( (lock_fsnode(xvp, NULL)) )
3765 goto out1;
3766 }
3767 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3768
3769 if (!thread_safe)
3770 unlock_fsnode(xvp, NULL);
3771
3772 if (error == 0)
3773 vnode_setneedinactive(xvp);
3774 }
3775out1:
3776 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3777 vnode_put(xvp);
3778out2:
3779 if (filename && filename != &smallname[0]) {
3780 FREE(filename, M_TEMP);
3781 }
3782}
3783
3784/*
3785 * Shadow uid/gid/mod to a ._ AppleDouble file
3786 */
3787static void
3788xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3789 vfs_context_t context, int thread_safe) {
3790 vnode_t xvp;
3791 struct nameidata nd;
3792 char smallname[64];
3793 char *filename = NULL;
3794 size_t len;
3795
3796 if ((dvp == NULLVP) ||
3797 (basename == NULL) || (basename[0] == '\0') ||
3798 (basename[0] == '.' && basename[1] == '_')) {
3799 return;
3800 }
3801 filename = &smallname[0];
3802 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3803 if (len >= sizeof(smallname)) {
3804 len++; /* snprintf result doesn't include '\0' */
3805 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3806 len = snprintf(filename, len, "._%s", basename);
3807 }
3808 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3809 CAST_USER_ADDR_T(filename), context);
3810 nd.ni_dvp = dvp;
3811 if (namei(&nd) != 0)
3812 goto out2;
3813
3814 xvp = nd.ni_vp;
3815 nameidone(&nd);
3816
3817 if (xvp->v_type == VREG) {
3818 struct vnop_setattr_args a;
3819
3820 a.a_desc = &vnop_setattr_desc;
3821 a.a_vp = xvp;
3822 a.a_vap = vap;
3823 a.a_context = context;
3824
3825 if (!thread_safe) {
3826 if ( (lock_fsnode(xvp, NULL)) )
3827 goto out1;
3828 }
3829 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3830 if (!thread_safe) {
3831 unlock_fsnode(xvp, NULL);
3832 }
3833 }
3834out1:
3835 vnode_put(xvp);
3836out2:
3837 if (filename && filename != &smallname[0]) {
3838 FREE(filename, M_TEMP);
3839 }
3840}
3841
3842 #if 0
3843/*
3844 *#
3845 *#% symlink dvp L U U
3846 *#% symlink vpp - U -
3847 *#
3848 */
3849struct vnop_symlink_args {
3850 struct vnodeop_desc *a_desc;
3851 vnode_t a_dvp;
3852 vnode_t *a_vpp;
3853 struct componentname *a_cnp;
3854 struct vnode_attr *a_vap;
3855 char *a_target;
3856 vfs_context_t a_context;
3857};
3858
3859#endif /* 0*/
3860errno_t
3861VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3862 struct vnode_attr *vap, char *target, vfs_context_t context)
3863{
3864 int _err;
3865 struct vnop_symlink_args a;
3866 int thread_safe;
3867 int funnel_state = 0;
3868
3869 a.a_desc = &vnop_symlink_desc;
3870 a.a_dvp = dvp;
3871 a.a_vpp = vpp;
3872 a.a_cnp = cnp;
3873 a.a_vap = vap;
3874 a.a_target = target;
3875 a.a_context = context;
3876 thread_safe = THREAD_SAFE_FS(dvp);
3877
3878 if (!thread_safe) {
3879 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3880 return (_err);
3881 }
3882 }
3883 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3884 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3885 /*
3886 * Remove stale Apple Double file (if any).
3887 */
3888 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3889 }
3890 if (!thread_safe) {
3891 unlock_fsnode(dvp, &funnel_state);
3892 }
3893 return (_err);
3894}
3895
3896#if 0
3897/*
3898 *#
3899 *#% readdir vp L L L
3900 *#
3901 */
3902struct vnop_readdir_args {
3903 struct vnodeop_desc *a_desc;
3904 vnode_t a_vp;
3905 struct uio *a_uio;
3906 int a_flags;
3907 int *a_eofflag;
3908 int *a_numdirent;
3909 vfs_context_t a_context;
3910};
3911
3912#endif /* 0*/
3913errno_t
3914VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3915 int *numdirent, vfs_context_t context)
3916{
3917 int _err;
3918 struct vnop_readdir_args a;
3919 int thread_safe;
3920 int funnel_state = 0;
3921
3922 a.a_desc = &vnop_readdir_desc;
3923 a.a_vp = vp;
3924 a.a_uio = uio;
3925 a.a_flags = flags;
3926 a.a_eofflag = eofflag;
3927 a.a_numdirent = numdirent;
3928 a.a_context = context;
3929 thread_safe = THREAD_SAFE_FS(vp);
3930
3931 if (!thread_safe) {
3932 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3933 return (_err);
3934 }
3935 }
3936 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3937 if (!thread_safe) {
3938 unlock_fsnode(vp, &funnel_state);
3939 }
3940 return (_err);
3941}
3942
3943#if 0
3944/*
3945 *#
3946 *#% readdirattr vp L L L
3947 *#
3948 */
3949struct vnop_readdirattr_args {
3950 struct vnodeop_desc *a_desc;
3951 vnode_t a_vp;
3952 struct attrlist *a_alist;
3953 struct uio *a_uio;
3954 u_long a_maxcount;
3955 u_long a_options;
3956 u_long *a_newstate;
3957 int *a_eofflag;
3958 u_long *a_actualcount;
3959 vfs_context_t a_context;
3960};
3961
3962#endif /* 0*/
3963errno_t
3964VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3965 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3966{
3967 int _err;
3968 struct vnop_readdirattr_args a;
3969 int thread_safe;
3970 int funnel_state = 0;
3971
3972 a.a_desc = &vnop_readdirattr_desc;
3973 a.a_vp = vp;
3974 a.a_alist = alist;
3975 a.a_uio = uio;
3976 a.a_maxcount = maxcount;
3977 a.a_options = options;
3978 a.a_newstate = newstate;
3979 a.a_eofflag = eofflag;
3980 a.a_actualcount = actualcount;
3981 a.a_context = context;
3982 thread_safe = THREAD_SAFE_FS(vp);
3983
3984 if (!thread_safe) {
3985 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3986 return (_err);
3987 }
3988 }
3989 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3990 if (!thread_safe) {
3991 unlock_fsnode(vp, &funnel_state);
3992 }
3993 return (_err);
3994}
3995
3996#if 0
3997/*
3998 *#
3999 *#% readlink vp L L L
4000 *#
4001 */
4002struct vnop_readlink_args {
4003 struct vnodeop_desc *a_desc;
4004 vnode_t a_vp;
4005 struct uio *a_uio;
4006 vfs_context_t a_context;
4007};
4008#endif /* 0 */
4009
4010errno_t
4011VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
4012{
4013 int _err;
4014 struct vnop_readlink_args a;
4015 int thread_safe;
4016 int funnel_state = 0;
4017
4018 a.a_desc = &vnop_readlink_desc;
4019 a.a_vp = vp;
4020 a.a_uio = uio;
4021 a.a_context = context;
4022 thread_safe = THREAD_SAFE_FS(vp);
4023
4024 if (!thread_safe) {
4025 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4026 return (_err);
4027 }
4028 }
4029 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4030 if (!thread_safe) {
4031 unlock_fsnode(vp, &funnel_state);
4032 }
4033 return (_err);
4034}
4035
4036#if 0
4037/*
4038 *#
4039 *#% inactive vp L U U
4040 *#
4041 */
4042struct vnop_inactive_args {
4043 struct vnodeop_desc *a_desc;
4044 vnode_t a_vp;
4045 vfs_context_t a_context;
4046};
4047#endif /* 0*/
4048errno_t
4049VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
4050{
4051 int _err;
4052 struct vnop_inactive_args a;
4053 int thread_safe;
4054 int funnel_state = 0;
4055
4056 a.a_desc = &vnop_inactive_desc;
4057 a.a_vp = vp;
4058 a.a_context = context;
4059 thread_safe = THREAD_SAFE_FS(vp);
4060
4061 if (!thread_safe) {
4062 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4063 return (_err);
4064 }
4065 }
4066 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4067 if (!thread_safe) {
4068 unlock_fsnode(vp, &funnel_state);
4069 }
4070 return (_err);
4071}
4072
4073
4074#if 0
4075/*
4076 *#
4077 *#% reclaim vp U U U
4078 *#
4079 */
4080struct vnop_reclaim_args {
4081 struct vnodeop_desc *a_desc;
4082 vnode_t a_vp;
4083 vfs_context_t a_context;
4084};
4085#endif /* 0*/
4086errno_t
4087VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
4088{
4089 int _err;
4090 struct vnop_reclaim_args a;
4091 int thread_safe;
4092 int funnel_state = 0;
4093
4094 a.a_desc = &vnop_reclaim_desc;
4095 a.a_vp = vp;
4096 a.a_context = context;
4097 thread_safe = THREAD_SAFE_FS(vp);
4098
4099 if (!thread_safe) {
4100 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4101 }
4102 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4103 if (!thread_safe) {
4104 (void) thread_funnel_set(kernel_flock, funnel_state);
4105 }
4106 return (_err);
4107}
4108
4109
4110#if 0
4111/*
4112 *#
4113 *#% pathconf vp L L L
4114 *#
4115 */
4116struct vnop_pathconf_args {
4117 struct vnodeop_desc *a_desc;
4118 vnode_t a_vp;
4119 int a_name;
4120 register_t *a_retval;
4121 vfs_context_t a_context;
4122};
4123#endif /* 0*/
4124errno_t
4125VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
4126{
4127 int _err;
4128 struct vnop_pathconf_args a;
4129 int thread_safe;
4130 int funnel_state = 0;
4131
4132 a.a_desc = &vnop_pathconf_desc;
4133 a.a_vp = vp;
4134 a.a_name = name;
4135 a.a_retval = retval;
4136 a.a_context = context;
4137 thread_safe = THREAD_SAFE_FS(vp);
4138
4139 if (!thread_safe) {
4140 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4141 return (_err);
4142 }
4143 }
4144 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4145 if (!thread_safe) {
4146 unlock_fsnode(vp, &funnel_state);
4147 }
4148 return (_err);
4149}
4150
4151#if 0
4152/*
4153 *#
4154 *#% advlock vp U U U
4155 *#
4156 */
4157struct vnop_advlock_args {
4158 struct vnodeop_desc *a_desc;
4159 vnode_t a_vp;
4160 caddr_t a_id;
4161 int a_op;
4162 struct flock *a_fl;
4163 int a_flags;
4164 vfs_context_t a_context;
4165};
4166#endif /* 0*/
4167errno_t
4168VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4169{
4170 int _err;
4171 struct vnop_advlock_args a;
4172 int thread_safe;
4173 int funnel_state = 0;
4174 struct uthread * uth;
4175
4176 a.a_desc = &vnop_advlock_desc;
4177 a.a_vp = vp;
4178 a.a_id = id;
4179 a.a_op = op;
4180 a.a_fl = fl;
4181 a.a_flags = flags;
4182 a.a_context = context;
4183 thread_safe = THREAD_SAFE_FS(vp);
4184
4185 uth = get_bsdthread_info(current_thread());
4186 if (!thread_safe) {
4187 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4188 }
4189 /* Disallow advisory locking on non-seekable vnodes */
4190 if (vnode_isfifo(vp)) {
4191 _err = err_advlock(&a);
4192 } else {
4193 if ((vp->v_flag & VLOCKLOCAL)) {
4194 /* Advisory locking done at this layer */
4195 _err = lf_advlock(&a);
4196 } else {
4197 /* Advisory locking done by underlying filesystem */
4198 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4199 }
4200 }
4201 if (!thread_safe) {
4202 (void) thread_funnel_set(kernel_flock, funnel_state);
4203 }
4204 return (_err);
4205}
4206
4207
4208
4209#if 0
4210/*
4211 *#
4212 *#% allocate vp L L L
4213 *#
4214 */
4215struct vnop_allocate_args {
4216 struct vnodeop_desc *a_desc;
4217 vnode_t a_vp;
4218 off_t a_length;
4219 u_int32_t a_flags;
4220 off_t *a_bytesallocated;
4221 off_t a_offset;
4222 vfs_context_t a_context;
4223};
4224
4225#endif /* 0*/
4226errno_t
4227VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4228{
4229 int _err;
4230 struct vnop_allocate_args a;
4231 int thread_safe;
4232 int funnel_state = 0;
4233
4234 a.a_desc = &vnop_allocate_desc;
4235 a.a_vp = vp;
4236 a.a_length = length;
4237 a.a_flags = flags;
4238 a.a_bytesallocated = bytesallocated;
4239 a.a_offset = offset;
4240 a.a_context = context;
4241 thread_safe = THREAD_SAFE_FS(vp);
4242
4243 if (!thread_safe) {
4244 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4245 return (_err);
4246 }
4247 }
4248 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4249 if (!thread_safe) {
4250 unlock_fsnode(vp, &funnel_state);
4251 }
4252 return (_err);
4253}
4254
4255#if 0
4256/*
4257 *#
4258 *#% pagein vp = = =
4259 *#
4260 */
4261struct vnop_pagein_args {
4262 struct vnodeop_desc *a_desc;
4263 vnode_t a_vp;
4264 upl_t a_pl;
4265 vm_offset_t a_pl_offset;
4266 off_t a_f_offset;
4267 size_t a_size;
4268 int a_flags;
4269 vfs_context_t a_context;
4270};
4271#endif /* 0*/
4272errno_t
4273VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4274{
4275 int _err;
4276 struct vnop_pagein_args a;
4277 int thread_safe;
4278 int funnel_state = 0;
4279
4280 a.a_desc = &vnop_pagein_desc;
4281 a.a_vp = vp;
4282 a.a_pl = pl;
4283 a.a_pl_offset = pl_offset;
4284 a.a_f_offset = f_offset;
4285 a.a_size = size;
4286 a.a_flags = flags;
4287 a.a_context = context;
4288 thread_safe = THREAD_SAFE_FS(vp);
4289
4290 if (!thread_safe) {
4291 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4292 }
4293 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4294 if (!thread_safe) {
4295 (void) thread_funnel_set(kernel_flock, funnel_state);
4296 }
4297 return (_err);
4298}
4299
4300#if 0
4301/*
4302 *#
4303 *#% pageout vp = = =
4304 *#
4305 */
4306struct vnop_pageout_args {
4307 struct vnodeop_desc *a_desc;
4308 vnode_t a_vp;
4309 upl_t a_pl;
4310 vm_offset_t a_pl_offset;
4311 off_t a_f_offset;
4312 size_t a_size;
4313 int a_flags;
4314 vfs_context_t a_context;
4315};
4316
4317#endif /* 0*/
4318errno_t
4319VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4320{
4321 int _err;
4322 struct vnop_pageout_args a;
4323 int thread_safe;
4324 int funnel_state = 0;
4325
4326 a.a_desc = &vnop_pageout_desc;
4327 a.a_vp = vp;
4328 a.a_pl = pl;
4329 a.a_pl_offset = pl_offset;
4330 a.a_f_offset = f_offset;
4331 a.a_size = size;
4332 a.a_flags = flags;
4333 a.a_context = context;
4334 thread_safe = THREAD_SAFE_FS(vp);
4335
4336 if (!thread_safe) {
4337 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4338 }
4339 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4340 if (!thread_safe) {
4341 (void) thread_funnel_set(kernel_flock, funnel_state);
4342 }
4343 return (_err);
4344}
4345
4346
4347#if 0
4348/*
4349 *#
4350 *#% searchfs vp L L L
4351 *#
4352 */
4353struct vnop_searchfs_args {
4354 struct vnodeop_desc *a_desc;
4355 vnode_t a_vp;
4356 void *a_searchparams1;
4357 void *a_searchparams2;
4358 struct attrlist *a_searchattrs;
4359 u_long a_maxmatches;
4360 struct timeval *a_timelimit;
4361 struct attrlist *a_returnattrs;
4362 u_long *a_nummatches;
4363 u_long a_scriptcode;
4364 u_long a_options;
4365 struct uio *a_uio;
4366 struct searchstate *a_searchstate;
4367 vfs_context_t a_context;
4368};
4369
4370#endif /* 0*/
4371errno_t
4372VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4373{
4374 int _err;
4375 struct vnop_searchfs_args a;
4376 int thread_safe;
4377 int funnel_state = 0;
4378
4379 a.a_desc = &vnop_searchfs_desc;
4380 a.a_vp = vp;
4381 a.a_searchparams1 = searchparams1;
4382 a.a_searchparams2 = searchparams2;
4383 a.a_searchattrs = searchattrs;
4384 a.a_maxmatches = maxmatches;
4385 a.a_timelimit = timelimit;
4386 a.a_returnattrs = returnattrs;
4387 a.a_nummatches = nummatches;
4388 a.a_scriptcode = scriptcode;
4389 a.a_options = options;
4390 a.a_uio = uio;
4391 a.a_searchstate = searchstate;
4392 a.a_context = context;
4393 thread_safe = THREAD_SAFE_FS(vp);
4394
4395 if (!thread_safe) {
4396 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4397 return (_err);
4398 }
4399 }
4400 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4401 if (!thread_safe) {
4402 unlock_fsnode(vp, &funnel_state);
4403 }
4404 return (_err);
4405}
4406
4407#if 0
4408/*
4409 *#
4410 *#% copyfile fvp U U U
4411 *#% copyfile tdvp L U U
4412 *#% copyfile tvp X U U
4413 *#
4414 */
4415struct vnop_copyfile_args {
4416 struct vnodeop_desc *a_desc;
4417 vnode_t a_fvp;
4418 vnode_t a_tdvp;
4419 vnode_t a_tvp;
4420 struct componentname *a_tcnp;
4421 int a_mode;
4422 int a_flags;
4423 vfs_context_t a_context;
4424};
4425#endif /* 0*/
4426errno_t
4427VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4428 int mode, int flags, vfs_context_t context)
4429{
4430 int _err;
4431 struct vnop_copyfile_args a;
4432 a.a_desc = &vnop_copyfile_desc;
4433 a.a_fvp = fvp;
4434 a.a_tdvp = tdvp;
4435 a.a_tvp = tvp;
4436 a.a_tcnp = tcnp;
4437 a.a_mode = mode;
4438 a.a_flags = flags;
4439 a.a_context = context;
4440 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4441 return (_err);
4442}
4443
4444
4445errno_t
4446VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4447{
4448 struct vnop_getxattr_args a;
4449 int error;
4450 int thread_safe;
4451 int funnel_state = 0;
4452
4453 a.a_desc = &vnop_getxattr_desc;
4454 a.a_vp = vp;
4455 a.a_name = name;
4456 a.a_uio = uio;
4457 a.a_size = size;
4458 a.a_options = options;
4459 a.a_context = context;
4460
4461 thread_safe = THREAD_SAFE_FS(vp);
4462 if (!thread_safe) {
4463 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4464 return (error);
4465 }
4466 }
4467 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4468 if (!thread_safe) {
4469 unlock_fsnode(vp, &funnel_state);
4470 }
4471 return (error);
4472}
4473
4474errno_t
4475VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4476{
4477 struct vnop_setxattr_args a;
4478 int error;
4479 int thread_safe;
4480 int funnel_state = 0;
4481
4482 a.a_desc = &vnop_setxattr_desc;
4483 a.a_vp = vp;
4484 a.a_name = name;
4485 a.a_uio = uio;
4486 a.a_options = options;
4487 a.a_context = context;
4488
4489 thread_safe = THREAD_SAFE_FS(vp);
4490 if (!thread_safe) {
4491 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4492 return (error);
4493 }
4494 }
4495 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4496 if (!thread_safe) {
4497 unlock_fsnode(vp, &funnel_state);
4498 }
4499 return (error);
4500}
4501
4502errno_t
4503VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4504{
4505 struct vnop_removexattr_args a;
4506 int error;
4507 int thread_safe;
4508 int funnel_state = 0;
4509
4510 a.a_desc = &vnop_removexattr_desc;
4511 a.a_vp = vp;
4512 a.a_name = name;
4513 a.a_options = options;
4514 a.a_context = context;
4515
4516 thread_safe = THREAD_SAFE_FS(vp);
4517 if (!thread_safe) {
4518 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4519 return (error);
4520 }
4521 }
4522 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4523 if (!thread_safe) {
4524 unlock_fsnode(vp, &funnel_state);
4525 }
4526 return (error);
4527}
4528
4529errno_t
4530VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4531{
4532 struct vnop_listxattr_args a;
4533 int error;
4534 int thread_safe;
4535 int funnel_state = 0;
4536
4537 a.a_desc = &vnop_listxattr_desc;
4538 a.a_vp = vp;
4539 a.a_uio = uio;
4540 a.a_size = size;
4541 a.a_options = options;
4542 a.a_context = context;
4543
4544 thread_safe = THREAD_SAFE_FS(vp);
4545 if (!thread_safe) {
4546 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4547 return (error);
4548 }
4549 }
4550 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4551 if (!thread_safe) {
4552 unlock_fsnode(vp, &funnel_state);
4553 }
4554 return (error);
4555}
4556
4557
4558#if 0
4559/*
4560 *#
4561 *#% blktooff vp = = =
4562 *#
4563 */
4564struct vnop_blktooff_args {
4565 struct vnodeop_desc *a_desc;
4566 vnode_t a_vp;
4567 daddr64_t a_lblkno;
4568 off_t *a_offset;
4569};
4570#endif /* 0*/
4571errno_t
4572VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4573{
4574 int _err;
4575 struct vnop_blktooff_args a;
4576 int thread_safe;
4577 int funnel_state = 0;
4578
4579 a.a_desc = &vnop_blktooff_desc;
4580 a.a_vp = vp;
4581 a.a_lblkno = lblkno;
4582 a.a_offset = offset;
4583 thread_safe = THREAD_SAFE_FS(vp);
4584
4585 if (!thread_safe) {
4586 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4587 }
4588 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4589 if (!thread_safe) {
4590 (void) thread_funnel_set(kernel_flock, funnel_state);
4591 }
4592 return (_err);
4593}
4594
4595#if 0
4596/*
4597 *#
4598 *#% offtoblk vp = = =
4599 *#
4600 */
4601struct vnop_offtoblk_args {
4602 struct vnodeop_desc *a_desc;
4603 vnode_t a_vp;
4604 off_t a_offset;
4605 daddr64_t *a_lblkno;
4606};
4607#endif /* 0*/
4608errno_t
4609VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4610{
4611 int _err;
4612 struct vnop_offtoblk_args a;
4613 int thread_safe;
4614 int funnel_state = 0;
4615
4616 a.a_desc = &vnop_offtoblk_desc;
4617 a.a_vp = vp;
4618 a.a_offset = offset;
4619 a.a_lblkno = lblkno;
4620 thread_safe = THREAD_SAFE_FS(vp);
4621
4622 if (!thread_safe) {
4623 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4624 }
4625 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4626 if (!thread_safe) {
4627 (void) thread_funnel_set(kernel_flock, funnel_state);
4628 }
4629 return (_err);
4630}
4631
4632#if 0
4633/*
4634 *#
4635 *#% blockmap vp L L L
4636 *#
4637 */
4638struct vnop_blockmap_args {
4639 struct vnodeop_desc *a_desc;
4640 vnode_t a_vp;
4641 off_t a_foffset;
4642 size_t a_size;
4643 daddr64_t *a_bpn;
4644 size_t *a_run;
4645 void *a_poff;
4646 int a_flags;
4647 vfs_context_t a_context;
4648};
4649#endif /* 0*/
4650errno_t
4651VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4652{
4653 int _err;
4654 struct vnop_blockmap_args a;
4655 int thread_safe;
4656 int funnel_state = 0;
4657 struct vfs_context acontext;
4658
4659 if (context == NULL) {
4660 acontext.vc_proc = current_proc();
4661 acontext.vc_ucred = kauth_cred_get();
4662 context = &acontext;
4663 }
4664 a.a_desc = &vnop_blockmap_desc;
4665 a.a_vp = vp;
4666 a.a_foffset = foffset;
4667 a.a_size = size;
4668 a.a_bpn = bpn;
4669 a.a_run = run;
4670 a.a_poff = poff;
4671 a.a_flags = flags;
4672 a.a_context = context;
4673 thread_safe = THREAD_SAFE_FS(vp);
4674
4675 if (!thread_safe) {
4676 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4677 }
4678 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4679 if (!thread_safe) {
4680 (void) thread_funnel_set(kernel_flock, funnel_state);
4681 }
4682 return (_err);
4683}
4684
4685#if 0
4686struct vnop_strategy_args {
4687 struct vnodeop_desc *a_desc;
4688 struct buf *a_bp;
4689};
4690
4691#endif /* 0*/
4692errno_t
4693VNOP_STRATEGY(struct buf *bp)
4694{
4695 int _err;
4696 struct vnop_strategy_args a;
4697 a.a_desc = &vnop_strategy_desc;
4698 a.a_bp = bp;
4699 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4700 return (_err);
4701}
4702
4703#if 0
4704struct vnop_bwrite_args {
4705 struct vnodeop_desc *a_desc;
4706 buf_t a_bp;
4707};
4708#endif /* 0*/
4709errno_t
4710VNOP_BWRITE(struct buf *bp)
4711{
4712 int _err;
4713 struct vnop_bwrite_args a;
4714 a.a_desc = &vnop_bwrite_desc;
4715 a.a_bp = bp;
4716 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4717 return (_err);
4718}
4719
4720#if 0
4721struct vnop_kqfilt_add_args {
4722 struct vnodeop_desc *a_desc;
4723 struct vnode *a_vp;
4724 struct knote *a_kn;
4725 vfs_context_t a_context;
4726};
4727#endif
4728errno_t
4729VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4730{
4731 int _err;
4732 struct vnop_kqfilt_add_args a;
4733 int thread_safe;
4734 int funnel_state = 0;
4735
4736 a.a_desc = VDESC(vnop_kqfilt_add);
4737 a.a_vp = vp;
4738 a.a_kn = kn;
4739 a.a_context = context;
4740 thread_safe = THREAD_SAFE_FS(vp);
4741
4742 if (!thread_safe) {
4743 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4744 return (_err);
4745 }
4746 }
4747 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4748 if (!thread_safe) {
4749 unlock_fsnode(vp, &funnel_state);
4750 }
4751 return(_err);
4752}
4753
4754#if 0
4755struct vnop_kqfilt_remove_args {
4756 struct vnodeop_desc *a_desc;
4757 struct vnode *a_vp;
4758 uintptr_t a_ident;
4759 vfs_context_t a_context;
4760};
4761#endif
4762errno_t
4763VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4764{
4765 int _err;
4766 struct vnop_kqfilt_remove_args a;
4767 int thread_safe;
4768 int funnel_state = 0;
4769
4770 a.a_desc = VDESC(vnop_kqfilt_remove);
4771 a.a_vp = vp;
4772 a.a_ident = ident;
4773 a.a_context = context;
4774 thread_safe = THREAD_SAFE_FS(vp);
4775
4776 if (!thread_safe) {
4777 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4778 return (_err);
4779 }
4780 }
4781 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4782 if (!thread_safe) {
4783 unlock_fsnode(vp, &funnel_state);
4784 }
4785 return(_err);
4786}
4787