]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/kpi_vfs.c
xnu-1504.9.37.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
CommitLineData
91447636 1/*
cf7d32b8 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
2d21ac55
A
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
91447636
A
74
75/*
76 * External virtual filesystem routines
77 */
78
91447636
A
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/proc_internal.h>
83#include <sys/kauth.h>
84#include <sys/mount.h>
85#include <sys/mount_internal.h>
86#include <sys/time.h>
87#include <sys/vnode_internal.h>
88#include <sys/stat.h>
89#include <sys/namei.h>
90#include <sys/ucred.h>
91#include <sys/buf.h>
92#include <sys/errno.h>
93#include <sys/malloc.h>
94#include <sys/domain.h>
95#include <sys/mbuf.h>
96#include <sys/syslog.h>
97#include <sys/ubc.h>
98#include <sys/vm.h>
99#include <sys/sysctl.h>
100#include <sys/filedesc.h>
b0d623f7 101#include <sys/event.h>
91447636
A
102#include <sys/fsevents.h>
103#include <sys/user.h>
104#include <sys/lockf.h>
105#include <sys/xattr.h>
106
107#include <kern/assert.h>
108#include <kern/kalloc.h>
2d21ac55 109#include <kern/task.h>
91447636 110
0c530ab8
A
111#include <libkern/OSByteOrder.h>
112
91447636
A
113#include <miscfs/specfs/specdev.h>
114
115#include <mach/mach_types.h>
116#include <mach/memory_object_types.h>
2d21ac55
A
117#include <mach/task.h>
118
119#if CONFIG_MACF
120#include <security/mac_framework.h>
121#endif
91447636
A
122
123#define ESUCCESS 0
124#undef mount_t
125#undef vnode_t
126
127#define COMPAT_ONLY
128
129
b0d623f7 130#ifndef __LP64__
91447636
A
131#define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
b0d623f7 133#endif /* __LP64__ */
91447636
A
134
135#define NATIVE_XATTR(VP) \
2d21ac55 136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
91447636 137
2d21ac55 138static void xattrfile_remove(vnode_t dvp, const char *basename,
b0d623f7 139 vfs_context_t ctx, int force);
2d21ac55 140static void xattrfile_setattr(vnode_t dvp, const char * basename,
b0d623f7 141 struct vnode_attr * vap, vfs_context_t ctx);
91447636 142
b0d623f7
A
143/*
144 * vnode_setneedinactive
145 *
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
148 * via VNOP_INACTIVE.
149 *
150 * Parameters: vnode_t vnode to mark
151 *
152 * Returns: Nothing
153 *
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
157 */
158void
91447636
A
159vnode_setneedinactive(vnode_t vp)
160{
161 cache_purge(vp);
162
2d21ac55 163 vnode_lock_spin(vp);
91447636
A
164 vp->v_lflag |= VL_NEEDINACTIVE;
165 vnode_unlock(vp);
166}
167
168
b0d623f7 169#ifndef __LP64__
91447636
A
170int
171lock_fsnode(vnode_t vp, int *funnel_state)
172{
173 if (funnel_state)
174 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
175
176 if (vp->v_unsafefs) {
177 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
178 vp->v_unsafefs->fsnode_count++;
179 } else {
180 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
181
182 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
183 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
184
185 if (funnel_state)
186 (void) thread_funnel_set(kernel_flock, *funnel_state);
187 return (ENOENT);
188 }
189 vp->v_unsafefs->fsnodeowner = current_thread();
190 vp->v_unsafefs->fsnode_count = 1;
191 }
192 }
193 return (0);
194}
195
196
197void
198unlock_fsnode(vnode_t vp, int *funnel_state)
199{
200 if (vp->v_unsafefs) {
201 if (--vp->v_unsafefs->fsnode_count == 0) {
202 vp->v_unsafefs->fsnodeowner = NULL;
203 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
204 }
205 }
206 if (funnel_state)
207 (void) thread_funnel_set(kernel_flock, *funnel_state);
208}
b0d623f7 209#endif /* __LP64__ */
91447636
A
210
211
212
213/* ====================================================================== */
214/* ************ EXTERNAL KERNEL APIS ********************************** */
215/* ====================================================================== */
216
217/*
b0d623f7 218 * implementations of exported VFS operations
91447636
A
219 */
220int
2d21ac55 221VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
91447636
A
222{
223 int error;
b0d623f7 224#ifndef __LP64__
91447636
A
225 int thread_safe;
226 int funnel_state = 0;
b0d623f7 227#endif /* __LP64__ */
91447636
A
228
229 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
230 return(ENOTSUP);
231
b0d623f7
A
232#ifndef __LP64__
233 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
234 if (!thread_safe) {
235 funnel_state = thread_funnel_set(kernel_flock, TRUE);
236 }
b0d623f7 237#endif /* __LP64__ */
91447636 238
2d21ac55 239 if (vfs_context_is64bit(ctx)) {
91447636 240 if (vfs_64bitready(mp)) {
2d21ac55 241 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
91447636
A
242 }
243 else {
244 error = ENOTSUP;
245 }
246 }
247 else {
2d21ac55 248 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
91447636
A
249 }
250
b0d623f7 251#ifndef __LP64__
91447636
A
252 if (!thread_safe) {
253 (void) thread_funnel_set(kernel_flock, funnel_state);
254 }
b0d623f7
A
255#endif /* __LP64__ */
256
91447636
A
257 return (error);
258}
259
260int
2d21ac55 261VFS_START(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
262{
263 int error;
b0d623f7 264#ifndef __LP64__
91447636
A
265 int thread_safe;
266 int funnel_state = 0;
b0d623f7 267#endif /* __LP64__ */
91447636
A
268
269 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
270 return(ENOTSUP);
271
b0d623f7
A
272#ifndef __LP64__
273 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
274
275 if (!thread_safe) {
276 funnel_state = thread_funnel_set(kernel_flock, TRUE);
277 }
b0d623f7
A
278#endif /* __LP64__ */
279
2d21ac55 280 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
b0d623f7
A
281
282#ifndef __LP64__
91447636
A
283 if (!thread_safe) {
284 (void) thread_funnel_set(kernel_flock, funnel_state);
285 }
b0d623f7
A
286#endif /* __LP64__ */
287
91447636
A
288 return (error);
289}
290
291int
2d21ac55 292VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
293{
294 int error;
b0d623f7 295#ifndef __LP64__
91447636
A
296 int thread_safe;
297 int funnel_state = 0;
b0d623f7 298#endif /* __LP64__ */
91447636
A
299
300 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
301 return(ENOTSUP);
302
b0d623f7
A
303#ifndef __LP64__
304 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
305
306 if (!thread_safe) {
307 funnel_state = thread_funnel_set(kernel_flock, TRUE);
308 }
b0d623f7
A
309#endif /* __LP64__ */
310
2d21ac55 311 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
b0d623f7
A
312
313#ifndef __LP64__
91447636
A
314 if (!thread_safe) {
315 (void) thread_funnel_set(kernel_flock, funnel_state);
316 }
b0d623f7
A
317#endif /* __LP64__ */
318
91447636
A
319 return (error);
320}
321
2d21ac55
A
322/*
323 * Returns: 0 Success
324 * ENOTSUP Not supported
325 * <vfs_root>:ENOENT
326 * <vfs_root>:???
327 *
328 * Note: The return codes from the underlying VFS's root routine can't
329 * be fully enumerated here, since third party VFS authors may not
330 * limit their error returns to the ones documented here, even
331 * though this may result in some programs functioning incorrectly.
332 *
333 * The return codes documented above are those which may currently
334 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
335 * for a call to hfs_vget on the volume mount poit, not including
336 * additional error codes which may be propagated from underlying
337 * routines called by hfs_vget.
338 */
91447636 339int
2d21ac55 340VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
91447636
A
341{
342 int error;
b0d623f7 343#ifndef __LP64__
91447636
A
344 int thread_safe;
345 int funnel_state = 0;
b0d623f7 346#endif /* __LP64__ */
91447636
A
347
348 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
349 return(ENOTSUP);
350
2d21ac55
A
351 if (ctx == NULL) {
352 ctx = vfs_context_current();
91447636 353 }
91447636 354
b0d623f7
A
355#ifndef __LP64__
356 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
357 if (!thread_safe) {
358 funnel_state = thread_funnel_set(kernel_flock, TRUE);
359 }
b0d623f7
A
360#endif /* __LP64__ */
361
2d21ac55 362 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
b0d623f7
A
363
364#ifndef __LP64__
91447636
A
365 if (!thread_safe) {
366 (void) thread_funnel_set(kernel_flock, funnel_state);
367 }
b0d623f7
A
368#endif /* __LP64__ */
369
91447636
A
370 return (error);
371}
372
373int
2d21ac55 374VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
91447636
A
375{
376 int error;
b0d623f7 377#ifndef __LP64__
91447636
A
378 int thread_safe;
379 int funnel_state = 0;
b0d623f7 380#endif /* __LP64__ */
91447636
A
381
382 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
383 return(ENOTSUP);
384
b0d623f7
A
385#ifndef __LP64__
386 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
387 if (!thread_safe) {
388 funnel_state = thread_funnel_set(kernel_flock, TRUE);
389 }
b0d623f7
A
390#endif /* __LP64__ */
391
2d21ac55 392 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
b0d623f7
A
393
394#ifndef __LP64__
91447636
A
395 if (!thread_safe) {
396 (void) thread_funnel_set(kernel_flock, funnel_state);
397 }
b0d623f7
A
398#endif /* __LP64__ */
399
91447636
A
400 return (error);
401}
402
403int
2d21ac55 404VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
91447636
A
405{
406 int error;
b0d623f7 407#ifndef __LP64__
91447636
A
408 int thread_safe;
409 int funnel_state = 0;
b0d623f7 410#endif /* __LP64__ */
91447636
A
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
413 return(ENOTSUP);
414
2d21ac55
A
415 if (ctx == NULL) {
416 ctx = vfs_context_current();
91447636 417 }
2d21ac55 418
b0d623f7
A
419#ifndef __LP64__
420 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
421 if (!thread_safe) {
422 funnel_state = thread_funnel_set(kernel_flock, TRUE);
423 }
b0d623f7
A
424#endif /* __LP64__ */
425
2d21ac55 426 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
b0d623f7
A
427
428#ifndef __LP64__
91447636
A
429 if (!thread_safe) {
430 (void) thread_funnel_set(kernel_flock, funnel_state);
431 }
b0d623f7
A
432#endif /* __LP64__ */
433
91447636
A
434 return(error);
435}
436
437int
2d21ac55 438VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
91447636
A
439{
440 int error;
b0d623f7 441#ifndef __LP64__
91447636
A
442 int thread_safe;
443 int funnel_state = 0;
b0d623f7 444#endif /* __LP64__ */
91447636
A
445
446 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
447 return(ENOTSUP);
448
2d21ac55
A
449 if (ctx == NULL) {
450 ctx = vfs_context_current();
91447636 451 }
2d21ac55 452
b0d623f7
A
453#ifndef __LP64__
454 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
455 if (!thread_safe) {
456 funnel_state = thread_funnel_set(kernel_flock, TRUE);
457 }
b0d623f7
A
458#endif /* __LP64__ */
459
2d21ac55 460 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
b0d623f7
A
461
462#ifndef __LP64__
91447636
A
463 if (!thread_safe) {
464 (void) thread_funnel_set(kernel_flock, funnel_state);
465 }
b0d623f7
A
466#endif /* __LP64__ */
467
91447636
A
468 return(error);
469}
470
471int
2d21ac55 472VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
473{
474 int error;
b0d623f7 475#ifndef __LP64__
91447636
A
476 int thread_safe;
477 int funnel_state = 0;
b0d623f7 478#endif /* __LP64__ */
91447636
A
479
480 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
481 return(ENOTSUP);
482
2d21ac55
A
483 if (ctx == NULL) {
484 ctx = vfs_context_current();
91447636 485 }
91447636 486
b0d623f7
A
487#ifndef __LP64__
488 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
489 if (!thread_safe) {
490 funnel_state = thread_funnel_set(kernel_flock, TRUE);
491 }
b0d623f7
A
492#endif /* __LP64__ */
493
2d21ac55 494 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
b0d623f7
A
495
496#ifndef __LP64__
91447636
A
497 if (!thread_safe) {
498 (void) thread_funnel_set(kernel_flock, funnel_state);
499 }
b0d623f7
A
500#endif /* __LP64__ */
501
91447636
A
502 return(error);
503}
504
505int
2d21ac55 506VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
91447636
A
507{
508 int error;
b0d623f7 509#ifndef __LP64__
91447636
A
510 int thread_safe;
511 int funnel_state = 0;
b0d623f7 512#endif /* __LP64__ */
91447636
A
513
514 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
515 return(ENOTSUP);
516
2d21ac55
A
517 if (ctx == NULL) {
518 ctx = vfs_context_current();
91447636 519 }
91447636 520
b0d623f7
A
521#ifndef __LP64__
522 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
523 if (!thread_safe) {
524 funnel_state = thread_funnel_set(kernel_flock, TRUE);
525 }
b0d623f7
A
526#endif /* __LP64__ */
527
2d21ac55 528 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
b0d623f7
A
529
530#ifndef __LP64__
91447636
A
531 if (!thread_safe) {
532 (void) thread_funnel_set(kernel_flock, funnel_state);
533 }
b0d623f7
A
534#endif /* __LP64__ */
535
91447636
A
536 return(error);
537}
538
539int
2d21ac55 540VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
91447636
A
541{
542 int error;
b0d623f7 543#ifndef __LP64__
91447636
A
544 int thread_safe;
545 int funnel_state = 0;
b0d623f7 546#endif /* __LP64__ */
91447636
A
547
548 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
549 return(ENOTSUP);
550
2d21ac55
A
551 if (ctx == NULL) {
552 ctx = vfs_context_current();
91447636 553 }
91447636 554
b0d623f7
A
555#ifndef __LP64__
556 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
557 if (!thread_safe) {
558 funnel_state = thread_funnel_set(kernel_flock, TRUE);
559 }
b0d623f7
A
560#endif /* __LP64__ */
561
2d21ac55 562 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
b0d623f7
A
563
564#ifndef __LP64__
91447636
A
565 if (!thread_safe) {
566 (void) thread_funnel_set(kernel_flock, funnel_state);
567 }
b0d623f7
A
568#endif /* __LP64__ */
569
91447636
A
570 return(error);
571}
572
573int
2d21ac55 574VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
91447636
A
575{
576 int error;
b0d623f7 577#ifndef __LP64__
91447636
A
578 int thread_safe;
579 int funnel_state = 0;
b0d623f7 580#endif /* __LP64__ */
91447636
A
581
582 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
583 return(ENOTSUP);
584
2d21ac55
A
585 if (ctx == NULL) {
586 ctx = vfs_context_current();
91447636 587 }
91447636 588
b0d623f7
A
589#ifndef __LP64__
590 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
591 if (!thread_safe) {
592 funnel_state = thread_funnel_set(kernel_flock, TRUE);
593 }
b0d623f7
A
594#endif /* __LP64__ */
595
2d21ac55 596 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
b0d623f7
A
597
598#ifndef __LP64__
91447636
A
599 if (!thread_safe) {
600 (void) thread_funnel_set(kernel_flock, funnel_state);
601 }
b0d623f7
A
602#endif /* __LP64__ */
603
91447636
A
604 return(error);
605}
606
607
608/* returns a copy of vfs type name for the mount_t */
609void
610vfs_name(mount_t mp, char * buffer)
611{
612 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
613}
614
615/* returns vfs type number for the mount_t */
616int
617vfs_typenum(mount_t mp)
618{
619 return(mp->mnt_vtable->vfc_typenum);
620}
621
b0d623f7
A
622/* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
623void*
624vfs_mntlabel(mount_t mp)
625{
626 return (void*)mp->mnt_mntlabel;
627}
91447636
A
628
629/* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
630uint64_t
631vfs_flags(mount_t mp)
632{
633 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
634}
635
636/* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
637void
638vfs_setflags(mount_t mp, uint64_t flags)
639{
640 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
641
2d21ac55 642 mount_lock(mp);
91447636 643 mp->mnt_flag |= lflags;
2d21ac55 644 mount_unlock(mp);
91447636
A
645}
646
647/* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
648void
649vfs_clearflags(mount_t mp , uint64_t flags)
650{
651 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
652
2d21ac55 653 mount_lock(mp);
91447636 654 mp->mnt_flag &= ~lflags;
2d21ac55 655 mount_unlock(mp);
91447636
A
656}
657
658/* Is the mount_t ronly and upgrade read/write requested? */
659int
660vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
661{
662 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
663}
664
665
666/* Is the mount_t mounted ronly */
667int
668vfs_isrdonly(mount_t mp)
669{
670 return (mp->mnt_flag & MNT_RDONLY);
671}
672
673/* Is the mount_t mounted for filesystem synchronous writes? */
674int
675vfs_issynchronous(mount_t mp)
676{
677 return (mp->mnt_flag & MNT_SYNCHRONOUS);
678}
679
680/* Is the mount_t mounted read/write? */
681int
682vfs_isrdwr(mount_t mp)
683{
684 return ((mp->mnt_flag & MNT_RDONLY) == 0);
685}
686
687
688/* Is mount_t marked for update (ie MNT_UPDATE) */
689int
690vfs_isupdate(mount_t mp)
691{
692 return (mp->mnt_flag & MNT_UPDATE);
693}
694
695
696/* Is mount_t marked for reload (ie MNT_RELOAD) */
697int
698vfs_isreload(mount_t mp)
699{
700 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
701}
702
b0d623f7 703/* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
91447636
A
704int
705vfs_isforce(mount_t mp)
706{
2d21ac55 707 if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
91447636
A
708 return(1);
709 else
710 return(0);
711}
712
b0d623f7
A
713int
714vfs_isunmount(mount_t mp)
715{
716 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
717 return 1;
718 } else {
719 return 0;
720 }
721}
722
91447636
A
723int
724vfs_64bitready(mount_t mp)
725{
b0d623f7 726 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
91447636
A
727 return(1);
728 else
729 return(0);
730}
731
2d21ac55
A
732
733int
734vfs_authcache_ttl(mount_t mp)
735{
736 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
737 return (mp->mnt_authcache_ttl);
738 else
739 return (CACHED_RIGHT_INFINITE_TTL);
740}
741
742void
743vfs_setauthcache_ttl(mount_t mp, int ttl)
744{
745 mount_lock(mp);
746 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
747 mp->mnt_authcache_ttl = ttl;
748 mount_unlock(mp);
749}
750
751void
752vfs_clearauthcache_ttl(mount_t mp)
753{
754 mount_lock(mp);
755 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
756 /*
757 * back to the default TTL value in case
758 * MNTK_AUTH_OPAQUE is set on this mount
759 */
760 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
761 mount_unlock(mp);
762}
763
764void
765vfs_markdependency(mount_t mp)
766{
767 proc_t p = current_proc();
768 mount_lock(mp);
769 mp->mnt_dependent_process = p;
770 mp->mnt_dependent_pid = proc_pid(p);
771 mount_unlock(mp);
772}
773
774
91447636
A
775int
776vfs_authopaque(mount_t mp)
777{
778 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
779 return(1);
780 else
781 return(0);
782}
783
784int
785vfs_authopaqueaccess(mount_t mp)
786{
787 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
788 return(1);
789 else
790 return(0);
791}
792
793void
794vfs_setauthopaque(mount_t mp)
795{
796 mount_lock(mp);
797 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
798 mount_unlock(mp);
799}
800
801void
802vfs_setauthopaqueaccess(mount_t mp)
803{
804 mount_lock(mp);
805 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
806 mount_unlock(mp);
807}
808
809void
810vfs_clearauthopaque(mount_t mp)
811{
812 mount_lock(mp);
813 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
814 mount_unlock(mp);
815}
816
817void
818vfs_clearauthopaqueaccess(mount_t mp)
819{
820 mount_lock(mp);
821 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
822 mount_unlock(mp);
823}
824
825void
826vfs_setextendedsecurity(mount_t mp)
827{
828 mount_lock(mp);
829 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
830 mount_unlock(mp);
831}
832
833void
834vfs_clearextendedsecurity(mount_t mp)
835{
836 mount_lock(mp);
837 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
838 mount_unlock(mp);
839}
840
841int
842vfs_extendedsecurity(mount_t mp)
843{
844 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
845}
846
847/* returns the max size of short symlink in this mount_t */
848uint32_t
849vfs_maxsymlen(mount_t mp)
850{
851 return(mp->mnt_maxsymlinklen);
852}
853
854/* set max size of short symlink on mount_t */
855void
856vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
857{
858 mp->mnt_maxsymlinklen = symlen;
859}
860
861/* return a pointer to the RO vfs_statfs associated with mount_t */
862struct vfsstatfs *
863vfs_statfs(mount_t mp)
864{
865 return(&mp->mnt_vfsstat);
866}
867
868int
869vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
870{
871 int error;
91447636
A
872
873 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
874 return(error);
875
876 /*
877 * If we have a filesystem create time, use it to default some others.
878 */
879 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
880 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
881 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
882 }
883
884 return(0);
885}
886
887int
888vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
889{
890 int error;
891
892 if (vfs_isrdonly(mp))
893 return EROFS;
894
895 error = VFS_SETATTR(mp, vfa, ctx);
896
897 /*
898 * If we had alternate ways of setting vfs attributes, we'd
899 * fall back here.
900 */
901
902 return error;
903}
904
905/* return the private data handle stored in mount_t */
906void *
907vfs_fsprivate(mount_t mp)
908{
909 return(mp->mnt_data);
910}
911
912/* set the private data handle in mount_t */
913void
914vfs_setfsprivate(mount_t mp, void *mntdata)
915{
2d21ac55 916 mount_lock(mp);
91447636 917 mp->mnt_data = mntdata;
2d21ac55 918 mount_unlock(mp);
91447636
A
919}
920
921
922/*
923 * return the block size of the underlying
924 * device associated with mount_t
925 */
926int
927vfs_devblocksize(mount_t mp) {
928
929 return(mp->mnt_devblocksize);
930}
931
b0d623f7
A
932/*
933 * Returns vnode with an iocount that must be released with vnode_put()
934 */
935vnode_t
936vfs_vnodecovered(mount_t mp)
937{
938 vnode_t vp = mp->mnt_vnodecovered;
939 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
940 return NULL;
941 } else {
942 return vp;
943 }
944}
91447636
A
945
946/*
947 * return the io attributes associated with mount_t
948 */
949void
950vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
951{
952 if (mp == NULL) {
953 ioattrp->io_maxreadcnt = MAXPHYS;
954 ioattrp->io_maxwritecnt = MAXPHYS;
955 ioattrp->io_segreadcnt = 32;
956 ioattrp->io_segwritecnt = 32;
957 ioattrp->io_maxsegreadsize = MAXPHYS;
958 ioattrp->io_maxsegwritesize = MAXPHYS;
959 ioattrp->io_devblocksize = DEV_BSIZE;
2d21ac55 960 ioattrp->io_flags = 0;
91447636
A
961 } else {
962 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
963 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
964 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
965 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
966 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
967 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
968 ioattrp->io_devblocksize = mp->mnt_devblocksize;
2d21ac55 969 ioattrp->io_flags = mp->mnt_ioflags;
91447636 970 }
2d21ac55
A
971 ioattrp->io_reserved[0] = NULL;
972 ioattrp->io_reserved[1] = NULL;
91447636
A
973}
974
975
976/*
977 * set the IO attributes associated with mount_t
978 */
979void
980vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
981{
982 if (mp == NULL)
983 return;
984 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
985 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
986 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
987 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
988 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
989 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
990 mp->mnt_devblocksize = ioattrp->io_devblocksize;
2d21ac55 991 mp->mnt_ioflags = ioattrp->io_flags;
91447636
A
992}
993
994/*
995 * Add a new filesystem into the kernel specified in passed in
996 * vfstable structure. It fills in the vnode
997 * dispatch vector that is to be passed to when vnodes are created.
998 * It returns a handle which is to be used to when the FS is to be removed
999 */
1000typedef int (*PFI)(void *);
1001extern int vfs_opv_numops;
1002errno_t
1003vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
1004{
1005#pragma unused(data)
1006 struct vfstable *newvfstbl = NULL;
1007 int i,j;
1008 int (***opv_desc_vector_p)(void *);
1009 int (**opv_desc_vector)(void *);
1010 struct vnodeopv_entry_desc *opve_descp;
1011 int desccount;
1012 int descsize;
1013 PFI *descptr;
1014
1015 /*
1016 * This routine is responsible for all the initialization that would
1017 * ordinarily be done as part of the system startup;
1018 */
1019
1020 if (vfe == (struct vfs_fsentry *)0)
1021 return(EINVAL);
1022
1023 desccount = vfe->vfe_vopcnt;
b0d623f7 1024 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
91447636
A
1025 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
1026 return(EINVAL);
1027
b0d623f7
A
1028#ifdef __LP64__
1029 /* Non-threadsafe filesystems are not supported for K64 */
1030 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
1031 return (EINVAL);
1032 }
1033#endif /* __LP64__ */
91447636
A
1034
1035 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
1036 M_WAITOK);
1037 bzero(newvfstbl, sizeof(struct vfstable));
1038 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
1039 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
1040 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
1041 newvfstbl->vfc_typenum = maxvfsconf++;
1042 else
1043 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
1044
1045 newvfstbl->vfc_refcount = 0;
1046 newvfstbl->vfc_flags = 0;
1047 newvfstbl->vfc_mountroot = NULL;
1048 newvfstbl->vfc_next = NULL;
91447636
A
1049 newvfstbl->vfc_vfsflags = 0;
1050 if (vfe->vfe_flags & VFS_TBL64BITREADY)
b0d623f7
A
1051 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1052 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
1053 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1054 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
1055 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
1056#ifndef __LP64__
91447636 1057 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
b0d623f7 1058 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
91447636 1059 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
b0d623f7
A
1060 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
1061#endif /* __LP64__ */
91447636
A
1062 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
1063 newvfstbl->vfc_flags |= MNT_LOCAL;
2d21ac55 1064 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
91447636
A
1065 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1066 else
1067 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
2d21ac55
A
1068
1069 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
1070 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1071 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
1072 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1073 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
1074 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1075 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
1076 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
91447636
A
1077
1078 /*
1079 * Allocate and init the vectors.
1080 * Also handle backwards compatibility.
1081 *
1082 * We allocate one large block to hold all <desccount>
1083 * vnode operation vectors stored contiguously.
1084 */
1085 /* XXX - shouldn't be M_TEMP */
1086
1087 descsize = desccount * vfs_opv_numops * sizeof(PFI);
1088 MALLOC(descptr, PFI *, descsize,
1089 M_TEMP, M_WAITOK);
1090 bzero(descptr, descsize);
1091
1092 newvfstbl->vfc_descptr = descptr;
1093 newvfstbl->vfc_descsize = descsize;
1094
1095
1096 for (i= 0; i< desccount; i++ ) {
1097 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1098 /*
1099 * Fill in the caller's pointer to the start of the i'th vector.
1100 * They'll need to supply it when calling vnode_create.
1101 */
1102 opv_desc_vector = descptr + i * vfs_opv_numops;
1103 *opv_desc_vector_p = opv_desc_vector;
1104
1105 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1106 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1107
1108 /*
1109 * Sanity check: is this operation listed
1110 * in the list of operations? We check this
b0d623f7 1111 * by seeing if its offset is zero. Since
91447636
A
1112 * the default routine should always be listed
1113 * first, it should be the only one with a zero
1114 * offset. Any other operation with a zero
1115 * offset is probably not listed in
1116 * vfs_op_descs, and so is probably an error.
1117 *
1118 * A panic here means the layer programmer
1119 * has committed the all-too common bug
1120 * of adding a new operation to the layer's
1121 * list of vnode operations but
1122 * not adding the operation to the system-wide
1123 * list of supported operations.
1124 */
1125 if (opve_descp->opve_op->vdesc_offset == 0 &&
1126 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
1127 printf("vfs_fsadd: operation %s not listed in %s.\n",
1128 opve_descp->opve_op->vdesc_name,
1129 "vfs_op_descs");
1130 panic("vfs_fsadd: bad operation");
1131 }
1132 /*
1133 * Fill in this entry.
1134 */
1135 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1136 opve_descp->opve_impl;
1137 }
1138
1139
1140 /*
1141 * Finally, go back and replace unfilled routines
1142 * with their default. (Sigh, an O(n^3) algorithm. I
1143 * could make it better, but that'd be work, and n is small.)
1144 */
1145 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1146
1147 /*
1148 * Force every operations vector to have a default routine.
1149 */
1150 opv_desc_vector = *opv_desc_vector_p;
1151 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
1152 panic("vfs_fsadd: operation vector without default routine.");
1153 for (j = 0; j < vfs_opv_numops; j++)
1154 if (opv_desc_vector[j] == NULL)
1155 opv_desc_vector[j] =
1156 opv_desc_vector[VOFFSET(vnop_default)];
1157
1158 } /* end of each vnodeopv_desc parsing */
1159
1160
1161
1162 *handle = vfstable_add(newvfstbl);
1163
1164 if (newvfstbl->vfc_typenum <= maxvfsconf )
1165 maxvfsconf = newvfstbl->vfc_typenum + 1;
91447636 1166
b0d623f7
A
1167 if (newvfstbl->vfc_vfsops->vfs_init) {
1168 struct vfsconf vfsc;
1169 bzero(&vfsc, sizeof(struct vfsconf));
1170 vfsc.vfc_reserved1 = 0;
1171 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1172 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1173 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1174 vfsc.vfc_flags = (*handle)->vfc_flags;
1175 vfsc.vfc_reserved2 = 0;
1176 vfsc.vfc_reserved3 = 0;
1177
1178 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1179 }
91447636
A
1180
1181 FREE(newvfstbl, M_TEMP);
1182
1183 return(0);
1184}
1185
1186/*
1187 * Removes the filesystem from kernel.
1188 * The argument passed in is the handle that was given when
1189 * file system was added
1190 */
1191errno_t
1192vfs_fsremove(vfstable_t handle)
1193{
1194 struct vfstable * vfstbl = (struct vfstable *)handle;
1195 void *old_desc = NULL;
1196 errno_t err;
1197
1198 /* Preflight check for any mounts */
1199 mount_list_lock();
1200 if ( vfstbl->vfc_refcount != 0 ) {
1201 mount_list_unlock();
1202 return EBUSY;
1203 }
91447636
A
1204
1205 /*
1206 * save the old descriptor; the free cannot occur unconditionally,
1207 * since vfstable_del() may fail.
1208 */
1209 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1210 old_desc = vfstbl->vfc_descptr;
1211 }
1212 err = vfstable_del(vfstbl);
1213
b0d623f7
A
1214 mount_list_unlock();
1215
91447636
A
1216 /* free the descriptor if the delete was successful */
1217 if (err == 0 && old_desc) {
1218 FREE(old_desc, M_TEMP);
1219 }
1220
1221 return(err);
1222}
1223
91447636 1224int
2d21ac55 1225vfs_context_pid(vfs_context_t ctx)
91447636 1226{
2d21ac55 1227 return (proc_pid(vfs_context_proc(ctx)));
91447636
A
1228}
1229
1230int
2d21ac55 1231vfs_context_suser(vfs_context_t ctx)
91447636 1232{
2d21ac55 1233 return (suser(ctx->vc_ucred, NULL));
91447636 1234}
2d21ac55
A
1235
1236/*
b0d623f7
A
1237 * Return bit field of signals posted to all threads in the context's process.
1238 *
2d21ac55
A
1239 * XXX Signals should be tied to threads, not processes, for most uses of this
1240 * XXX call.
1241 */
91447636 1242int
2d21ac55 1243vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
91447636 1244{
2d21ac55
A
1245 proc_t p = vfs_context_proc(ctx);
1246 if (p)
1247 return(proc_pendingsignals(p, mask));
91447636
A
1248 return(0);
1249}
1250
1251int
2d21ac55 1252vfs_context_is64bit(vfs_context_t ctx)
91447636 1253{
2d21ac55
A
1254 proc_t proc = vfs_context_proc(ctx);
1255
1256 if (proc)
1257 return(proc_is64bit(proc));
91447636
A
1258 return(0);
1259}
1260
2d21ac55
A
1261
1262/*
1263 * vfs_context_proc
1264 *
1265 * Description: Given a vfs_context_t, return the proc_t associated with it.
1266 *
1267 * Parameters: vfs_context_t The context to use
1268 *
1269 * Returns: proc_t The process for this context
1270 *
1271 * Notes: This function will return the current_proc() if any of the
1272 * following conditions are true:
1273 *
1274 * o The supplied context pointer is NULL
1275 * o There is no Mach thread associated with the context
1276 * o There is no Mach task associated with the Mach thread
1277 * o There is no proc_t associated with the Mach task
1278 * o The proc_t has no per process open file table
1279 * o The proc_t is post-vfork()
1280 *
1281 * This causes this function to return a value matching as
1282 * closely as possible the previous behaviour, while at the
1283 * same time avoiding the task lending that results from vfork()
1284 */
91447636 1285proc_t
2d21ac55
A
1286vfs_context_proc(vfs_context_t ctx)
1287{
1288 proc_t proc = NULL;
1289
1290 if (ctx != NULL && ctx->vc_thread != NULL)
1291 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1292 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1293 proc = NULL;
1294
1295 return(proc == NULL ? current_proc() : proc);
1296}
1297
1298/*
1299 * vfs_context_get_special_port
1300 *
1301 * Description: Return the requested special port from the task associated
1302 * with the given context.
1303 *
1304 * Parameters: vfs_context_t The context to use
1305 * int Index of special port
1306 * ipc_port_t * Pointer to returned port
1307 *
1308 * Returns: kern_return_t see task_get_special_port()
1309 */
1310kern_return_t
1311vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1312{
1313 task_t task = NULL;
1314
1315 if (ctx != NULL && ctx->vc_thread != NULL)
1316 task = get_threadtask(ctx->vc_thread);
1317
1318 return task_get_special_port(task, which, portp);
1319}
1320
1321/*
1322 * vfs_context_set_special_port
1323 *
1324 * Description: Set the requested special port in the task associated
1325 * with the given context.
1326 *
1327 * Parameters: vfs_context_t The context to use
1328 * int Index of special port
1329 * ipc_port_t New special port
1330 *
1331 * Returns: kern_return_t see task_set_special_port()
1332 */
1333kern_return_t
1334vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1335{
1336 task_t task = NULL;
1337
1338 if (ctx != NULL && ctx->vc_thread != NULL)
1339 task = get_threadtask(ctx->vc_thread);
1340
1341 return task_set_special_port(task, which, port);
1342}
1343
1344/*
1345 * vfs_context_thread
1346 *
1347 * Description: Return the Mach thread associated with a vfs_context_t
1348 *
1349 * Parameters: vfs_context_t The context to use
1350 *
1351 * Returns: thread_t The thread for this context, or
1352 * NULL, if there is not one.
1353 *
1354 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1355 * as a result of a static vfs_context_t declaration in a function
1356 * and will result in this function returning NULL.
1357 *
1358 * This is intentional; this function should NOT return the
1359 * current_thread() in this case.
1360 */
1361thread_t
1362vfs_context_thread(vfs_context_t ctx)
91447636 1363{
2d21ac55
A
1364 return(ctx->vc_thread);
1365}
1366
1367
1368/*
1369 * vfs_context_cwd
1370 *
1371 * Description: Returns a reference on the vnode for the current working
1372 * directory for the supplied context
1373 *
1374 * Parameters: vfs_context_t The context to use
1375 *
1376 * Returns: vnode_t The current working directory
1377 * for this context
1378 *
1379 * Notes: The function first attempts to obtain the current directory
1380 * from the thread, and if it is not present there, falls back
1381 * to obtaining it from the process instead. If it can't be
1382 * obtained from either place, we return NULLVP.
1383 */
1384vnode_t
1385vfs_context_cwd(vfs_context_t ctx)
1386{
1387 vnode_t cwd = NULLVP;
1388
1389 if(ctx != NULL && ctx->vc_thread != NULL) {
1390 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1391 proc_t proc;
1392
1393 /*
1394 * Get the cwd from the thread; if there isn't one, get it
1395 * from the process, instead.
1396 */
1397 if ((cwd = uth->uu_cdir) == NULLVP &&
1398 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1399 proc->p_fd != NULL)
1400 cwd = proc->p_fd->fd_cdir;
1401 }
1402
1403 return(cwd);
91447636
A
1404}
1405
b0d623f7
A
1406/*
1407 * vfs_context_create
1408 *
1409 * Description: Allocate and initialize a new context.
1410 *
1411 * Parameters: vfs_context_t: Context to copy, or NULL for new
1412 *
1413 * Returns: Pointer to new context
1414 *
1415 * Notes: Copy cred and thread from argument, if available; else
1416 * initialize with current thread and new cred. Returns
1417 * with a reference held on the credential.
1418 */
91447636 1419vfs_context_t
2d21ac55 1420vfs_context_create(vfs_context_t ctx)
91447636 1421{
2d21ac55 1422 vfs_context_t newcontext;
91447636 1423
2d21ac55 1424 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
91447636
A
1425
1426 if (newcontext) {
0c530ab8 1427 kauth_cred_t safecred;
2d21ac55
A
1428 if (ctx) {
1429 newcontext->vc_thread = ctx->vc_thread;
1430 safecred = ctx->vc_ucred;
91447636 1431 } else {
2d21ac55 1432 newcontext->vc_thread = current_thread();
0c530ab8 1433 safecred = kauth_cred_get();
91447636 1434 }
0c530ab8
A
1435 if (IS_VALID_CRED(safecred))
1436 kauth_cred_ref(safecred);
1437 newcontext->vc_ucred = safecred;
1438 return(newcontext);
91447636 1439 }
2d21ac55
A
1440 return(NULL);
1441}
1442
1443
1444vfs_context_t
1445vfs_context_current(void)
1446{
1447 vfs_context_t ctx = NULL;
1448 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1449
1450 if (ut != NULL ) {
1451 if (ut->uu_context.vc_ucred != NULL) {
1452 ctx = &ut->uu_context;
1453 }
1454 }
1455
1456 return(ctx == NULL ? vfs_context_kernel() : ctx);
1457}
1458
1459
1460/*
1461 * XXX Do not ask
1462 *
1463 * Dangerous hack - adopt the first kernel thread as the current thread, to
1464 * get to the vfs_context_t in the uthread associated with a kernel thread.
1465 * This is used by UDF to make the call into IOCDMediaBSDClient,
1466 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1467 * ioctl() is being called from kernel or user space (and all this because
1468 * we do not pass threads into our ioctl()'s, instead of processes).
1469 *
1470 * This is also used by imageboot_setup(), called early from bsd_init() after
1471 * kernproc has been given a credential.
1472 *
1473 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1474 * of many Mach headers to do the reference directly rather than indirectly;
1475 * we will need to forego this convenience when we reture proc_thread().
1476 */
1477static struct vfs_context kerncontext;
1478vfs_context_t
1479vfs_context_kernel(void)
1480{
1481 if (kerncontext.vc_ucred == NOCRED)
1482 kerncontext.vc_ucred = kernproc->p_ucred;
1483 if (kerncontext.vc_thread == NULL)
1484 kerncontext.vc_thread = proc_thread(kernproc);
1485
1486 return(&kerncontext);
91447636
A
1487}
1488
2d21ac55 1489
91447636 1490int
2d21ac55 1491vfs_context_rele(vfs_context_t ctx)
91447636 1492{
2d21ac55
A
1493 if (ctx) {
1494 if (IS_VALID_CRED(ctx->vc_ucred))
1495 kauth_cred_unref(&ctx->vc_ucred);
1496 kfree(ctx, sizeof(struct vfs_context));
0c530ab8 1497 }
91447636
A
1498 return(0);
1499}
1500
1501
b0d623f7 1502kauth_cred_t
2d21ac55 1503vfs_context_ucred(vfs_context_t ctx)
91447636 1504{
2d21ac55 1505 return (ctx->vc_ucred);
91447636
A
1506}
1507
1508/*
1509 * Return true if the context is owned by the superuser.
1510 */
1511int
2d21ac55 1512vfs_context_issuser(vfs_context_t ctx)
91447636 1513{
2d21ac55 1514 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
91447636
A
1515}
1516
b0d623f7
A
1517/*
1518 * Given a context, for all fields of vfs_context_t which
1519 * are not held with a reference, set those fields to the
1520 * values for the current execution context. Currently, this
1521 * just means the vc_thread.
1522 *
1523 * Returns: 0 for success, nonzero for failure
1524 *
1525 * The intended use is:
1526 * 1. vfs_context_create() gets the caller a context
1527 * 2. vfs_context_bind() sets the unrefcounted data
1528 * 3. vfs_context_rele() releases the context
1529 *
1530 */
1531int
1532vfs_context_bind(vfs_context_t ctx)
1533{
1534 ctx->vc_thread = current_thread();
1535 return 0;
1536}
91447636
A
1537
1538/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1539
1540
1541/*
1542 * Convert between vnode types and inode formats (since POSIX.1
1543 * defines mode word of stat structure in terms of inode formats).
1544 */
1545enum vtype
1546vnode_iftovt(int mode)
1547{
1548 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1549}
1550
1551int
1552vnode_vttoif(enum vtype indx)
1553{
1554 return(vttoif_tab[(int)(indx)]);
1555}
1556
1557int
1558vnode_makeimode(int indx, int mode)
1559{
1560 return (int)(VTTOIF(indx) | (mode));
1561}
1562
1563
1564/*
1565 * vnode manipulation functions.
1566 */
1567
b0d623f7 1568/* returns system root vnode iocount; It should be released using vnode_put() */
91447636
A
1569vnode_t
1570vfs_rootvnode(void)
1571{
1572 int error;
1573
1574 error = vnode_get(rootvnode);
1575 if (error)
1576 return ((vnode_t)0);
1577 else
1578 return rootvnode;
1579}
1580
1581
1582uint32_t
1583vnode_vid(vnode_t vp)
1584{
1585 return ((uint32_t)(vp->v_id));
1586}
1587
91447636
A
1588mount_t
1589vnode_mount(vnode_t vp)
1590{
1591 return (vp->v_mount);
1592}
1593
91447636
A
1594mount_t
1595vnode_mountedhere(vnode_t vp)
1596{
1597 mount_t mp;
1598
1599 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1600 (mp->mnt_vnodecovered == vp))
1601 return (mp);
1602 else
1603 return (mount_t)NULL;
1604}
1605
1606/* returns vnode type of vnode_t */
1607enum vtype
1608vnode_vtype(vnode_t vp)
1609{
1610 return (vp->v_type);
1611}
1612
1613/* returns FS specific node saved in vnode */
1614void *
1615vnode_fsnode(vnode_t vp)
1616{
1617 return (vp->v_data);
1618}
1619
1620void
1621vnode_clearfsnode(vnode_t vp)
1622{
2d21ac55 1623 vp->v_data = NULL;
91447636
A
1624}
1625
1626dev_t
1627vnode_specrdev(vnode_t vp)
1628{
1629 return(vp->v_rdev);
1630}
1631
1632
1633/* Accessor functions */
1634/* is vnode_t a root vnode */
1635int
1636vnode_isvroot(vnode_t vp)
1637{
1638 return ((vp->v_flag & VROOT)? 1 : 0);
1639}
1640
1641/* is vnode_t a system vnode */
1642int
1643vnode_issystem(vnode_t vp)
1644{
1645 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1646}
1647
2d21ac55
A
1648/* is vnode_t a swap file vnode */
1649int
1650vnode_isswap(vnode_t vp)
1651{
1652 return ((vp->v_flag & VSWAP)? 1 : 0);
1653}
1654
b0d623f7
A
1655/* is vnode_t a tty */
1656int
1657vnode_istty(vnode_t vp)
1658{
1659 return ((vp->v_flag & VISTTY) ? 1 : 0);
1660}
1661
91447636
A
1662/* if vnode_t mount operation in progress */
1663int
1664vnode_ismount(vnode_t vp)
1665{
1666 return ((vp->v_flag & VMOUNT)? 1 : 0);
1667}
1668
1669/* is this vnode under recyle now */
1670int
1671vnode_isrecycled(vnode_t vp)
1672{
1673 int ret;
1674
2d21ac55 1675 vnode_lock_spin(vp);
91447636
A
1676 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1677 vnode_unlock(vp);
1678 return(ret);
1679}
1680
b0d623f7
A
1681/* vnode was created by background task requesting rapid aging
1682 and has not since been referenced by a normal task */
1683int
1684vnode_israge(vnode_t vp)
1685{
1686 return ((vp->v_flag & VRAGE)? 1 : 0);
1687}
1688
91447636
A
1689/* is vnode_t marked to not keep data cached once it's been consumed */
1690int
1691vnode_isnocache(vnode_t vp)
1692{
1693 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1694}
1695
1696/*
1697 * has sequential readahead been disabled on this vnode
1698 */
1699int
1700vnode_isnoreadahead(vnode_t vp)
1701{
1702 return ((vp->v_flag & VRAOFF)? 1 : 0);
1703}
1704
2d21ac55
A
1705int
1706vnode_is_openevt(vnode_t vp)
1707{
1708 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1709}
1710
91447636
A
1711/* is vnode_t a standard one? */
1712int
1713vnode_isstandard(vnode_t vp)
1714{
1715 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1716}
1717
1718/* don't vflush() if SKIPSYSTEM */
1719int
1720vnode_isnoflush(vnode_t vp)
1721{
1722 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1723}
1724
1725/* is vnode_t a regular file */
1726int
1727vnode_isreg(vnode_t vp)
1728{
1729 return ((vp->v_type == VREG)? 1 : 0);
1730}
1731
1732/* is vnode_t a directory? */
1733int
1734vnode_isdir(vnode_t vp)
1735{
1736 return ((vp->v_type == VDIR)? 1 : 0);
1737}
1738
1739/* is vnode_t a symbolic link ? */
1740int
1741vnode_islnk(vnode_t vp)
1742{
1743 return ((vp->v_type == VLNK)? 1 : 0);
1744}
1745
1746/* is vnode_t a fifo ? */
1747int
1748vnode_isfifo(vnode_t vp)
1749{
1750 return ((vp->v_type == VFIFO)? 1 : 0);
1751}
1752
1753/* is vnode_t a block device? */
1754int
1755vnode_isblk(vnode_t vp)
1756{
1757 return ((vp->v_type == VBLK)? 1 : 0);
1758}
1759
b0d623f7
A
1760int
1761vnode_isspec(vnode_t vp)
1762{
1763 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1764}
1765
91447636
A
1766/* is vnode_t a char device? */
1767int
1768vnode_ischr(vnode_t vp)
1769{
1770 return ((vp->v_type == VCHR)? 1 : 0);
1771}
1772
1773/* is vnode_t a socket? */
1774int
1775vnode_issock(vnode_t vp)
1776{
1777 return ((vp->v_type == VSOCK)? 1 : 0);
1778}
1779
b0d623f7
A
1780/* is vnode_t a device with multiple active vnodes referring to it? */
1781int
1782vnode_isaliased(vnode_t vp)
1783{
1784 enum vtype vt = vp->v_type;
1785 if (!((vt == VCHR) || (vt == VBLK))) {
1786 return 0;
1787 } else {
1788 return (vp->v_specflags & SI_ALIASED);
1789 }
1790}
1791
2d21ac55
A
1792/* is vnode_t a named stream? */
1793int
1794vnode_isnamedstream(
1795#if NAMEDSTREAMS
1796 vnode_t vp
1797#else
1798 __unused vnode_t vp
1799#endif
1800 )
1801{
1802#if NAMEDSTREAMS
1803 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1804#else
1805 return (0);
1806#endif
1807}
91447636 1808
b0d623f7 1809int
c910b4d9
A
1810vnode_isshadow(
1811#if NAMEDSTREAMS
b0d623f7 1812 vnode_t vp
c910b4d9 1813#else
b0d623f7 1814 __unused vnode_t vp
c910b4d9 1815#endif
b0d623f7 1816 )
c910b4d9
A
1817{
1818#if NAMEDSTREAMS
b0d623f7 1819 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
c910b4d9 1820#else
b0d623f7 1821 return (0);
c910b4d9
A
1822#endif
1823}
1824
b0d623f7
A
1825/* does vnode have associated named stream vnodes ? */
1826int
1827vnode_hasnamedstreams(
1828#if NAMEDSTREAMS
1829 vnode_t vp
1830#else
1831 __unused vnode_t vp
1832#endif
1833 )
1834{
1835#if NAMEDSTREAMS
1836 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1837#else
1838 return (0);
1839#endif
1840}
91447636
A
1841/* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1842void
1843vnode_setnocache(vnode_t vp)
1844{
2d21ac55 1845 vnode_lock_spin(vp);
91447636
A
1846 vp->v_flag |= VNOCACHE_DATA;
1847 vnode_unlock(vp);
1848}
1849
1850void
1851vnode_clearnocache(vnode_t vp)
1852{
2d21ac55 1853 vnode_lock_spin(vp);
91447636
A
1854 vp->v_flag &= ~VNOCACHE_DATA;
1855 vnode_unlock(vp);
1856}
1857
2d21ac55
A
1858void
1859vnode_set_openevt(vnode_t vp)
1860{
1861 vnode_lock_spin(vp);
1862 vp->v_flag |= VOPENEVT;
1863 vnode_unlock(vp);
1864}
1865
1866void
1867vnode_clear_openevt(vnode_t vp)
1868{
1869 vnode_lock_spin(vp);
1870 vp->v_flag &= ~VOPENEVT;
1871 vnode_unlock(vp);
1872}
1873
1874
91447636
A
1875void
1876vnode_setnoreadahead(vnode_t vp)
1877{
2d21ac55 1878 vnode_lock_spin(vp);
91447636
A
1879 vp->v_flag |= VRAOFF;
1880 vnode_unlock(vp);
1881}
1882
1883void
1884vnode_clearnoreadahead(vnode_t vp)
1885{
2d21ac55 1886 vnode_lock_spin(vp);
91447636
A
1887 vp->v_flag &= ~VRAOFF;
1888 vnode_unlock(vp);
1889}
1890
1891
1892/* mark vnode_t to skip vflush() is SKIPSYSTEM */
1893void
1894vnode_setnoflush(vnode_t vp)
1895{
2d21ac55 1896 vnode_lock_spin(vp);
91447636
A
1897 vp->v_flag |= VNOFLUSH;
1898 vnode_unlock(vp);
1899}
1900
1901void
1902vnode_clearnoflush(vnode_t vp)
1903{
2d21ac55 1904 vnode_lock_spin(vp);
91447636
A
1905 vp->v_flag &= ~VNOFLUSH;
1906 vnode_unlock(vp);
1907}
1908
1909
1910/* is vnode_t a blkdevice and has a FS mounted on it */
1911int
1912vnode_ismountedon(vnode_t vp)
1913{
1914 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1915}
1916
1917void
1918vnode_setmountedon(vnode_t vp)
1919{
2d21ac55 1920 vnode_lock_spin(vp);
91447636
A
1921 vp->v_specflags |= SI_MOUNTEDON;
1922 vnode_unlock(vp);
1923}
1924
1925void
1926vnode_clearmountedon(vnode_t vp)
1927{
2d21ac55 1928 vnode_lock_spin(vp);
91447636
A
1929 vp->v_specflags &= ~SI_MOUNTEDON;
1930 vnode_unlock(vp);
1931}
1932
1933
1934void
1935vnode_settag(vnode_t vp, int tag)
1936{
1937 vp->v_tag = tag;
1938
1939}
1940
1941int
1942vnode_tag(vnode_t vp)
1943{
1944 return(vp->v_tag);
1945}
1946
1947vnode_t
1948vnode_parent(vnode_t vp)
1949{
1950
1951 return(vp->v_parent);
1952}
1953
1954void
1955vnode_setparent(vnode_t vp, vnode_t dvp)
1956{
1957 vp->v_parent = dvp;
1958}
1959
2d21ac55 1960const char *
91447636
A
1961vnode_name(vnode_t vp)
1962{
1963 /* we try to keep v_name a reasonable name for the node */
1964 return(vp->v_name);
1965}
1966
1967void
1968vnode_setname(vnode_t vp, char * name)
1969{
1970 vp->v_name = name;
1971}
1972
1973/* return the registered FS name when adding the FS to kernel */
1974void
1975vnode_vfsname(vnode_t vp, char * buf)
1976{
1977 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1978}
1979
1980/* return the FS type number */
1981int
1982vnode_vfstypenum(vnode_t vp)
1983{
1984 return(vp->v_mount->mnt_vtable->vfc_typenum);
1985}
1986
1987int
1988vnode_vfs64bitready(vnode_t vp)
1989{
1990
b0d623f7
A
1991 /*
1992 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1993 */
1994 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
91447636
A
1995 return(1);
1996 else
1997 return(0);
1998}
1999
2000
2001
2002/* return the visible flags on associated mount point of vnode_t */
2003uint32_t
2004vnode_vfsvisflags(vnode_t vp)
2005{
2006 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
2007}
2008
2009/* return the command modifier flags on associated mount point of vnode_t */
2010uint32_t
2011vnode_vfscmdflags(vnode_t vp)
2012{
2013 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
2014}
2015
2016/* return the max symlink of short links of vnode_t */
2017uint32_t
2018vnode_vfsmaxsymlen(vnode_t vp)
2019{
2020 return(vp->v_mount->mnt_maxsymlinklen);
2021}
2022
2023/* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2024struct vfsstatfs *
2025vnode_vfsstatfs(vnode_t vp)
2026{
2027 return(&vp->v_mount->mnt_vfsstat);
2028}
2029
2030/* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2031void *
2032vnode_vfsfsprivate(vnode_t vp)
2033{
2034 return(vp->v_mount->mnt_data);
2035}
2036
2037/* is vnode_t in a rdonly mounted FS */
2038int
2039vnode_vfsisrdonly(vnode_t vp)
2040{
2041 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
2042}
2043
2044
2d21ac55
A
2045/*
2046 * Returns vnode ref to current working directory; if a per-thread current
2047 * working directory is in effect, return that instead of the per process one.
2048 *
2049 * XXX Published, but not used.
2050 */
91447636
A
2051vnode_t
2052current_workingdir(void)
2053{
2d21ac55 2054 return vfs_context_cwd(vfs_context_current());
91447636
A
2055}
2056
2057/* returns vnode ref to current root(chroot) directory */
2058vnode_t
2059current_rootdir(void)
2060{
2d21ac55 2061 proc_t proc = current_proc();
91447636
A
2062 struct vnode * vp ;
2063
2d21ac55 2064 if ( (vp = proc->p_fd->fd_rdir) ) {
91447636
A
2065 if ( (vnode_getwithref(vp)) )
2066 return (NULL);
2067 }
2068 return vp;
2069}
2070
0c530ab8
A
2071/*
2072 * Get a filesec and optional acl contents from an extended attribute.
2073 * Function will attempt to retrive ACL, UUID, and GUID information using a
2074 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2075 *
2076 * Parameters: vp The vnode on which to operate.
2077 * fsecp The filesec (and ACL, if any) being
2078 * retrieved.
2079 * ctx The vnode context in which the
2080 * operation is to be attempted.
2081 *
2082 * Returns: 0 Success
2083 * !0 errno value
2084 *
2085 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2086 * host byte order, as will be the ACL contents, if any.
2087 * Internally, we will cannonize these values from network (PPC)
2088 * byte order after we retrieve them so that the on-disk contents
2089 * of the extended attribute are identical for both PPC and Intel
2090 * (if we were not being required to provide this service via
2091 * fallback, this would be the job of the filesystem
2092 * 'VNOP_GETATTR' call).
2093 *
2094 * We use ntohl() because it has a transitive property on Intel
2095 * machines and no effect on PPC mancines. This guarantees us
2096 *
2097 * XXX: Deleting rather than ignoreing a corrupt security structure is
2098 * probably the only way to reset it without assistance from an
2099 * file system integrity checking tool. Right now we ignore it.
2100 *
2101 * XXX: We should enummerate the possible errno values here, and where
2102 * in the code they originated.
2103 */
91447636
A
2104static int
2105vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2106{
2107 kauth_filesec_t fsec;
2108 uio_t fsec_uio;
2109 size_t fsec_size;
2110 size_t xsize, rsize;
2111 int error;
0c530ab8
A
2112 uint32_t host_fsec_magic;
2113 uint32_t host_acl_entrycount;
91447636
A
2114
2115 fsec = NULL;
2116 fsec_uio = NULL;
2117 error = 0;
2118
2119 /* find out how big the EA is */
2120 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
2121 /* no EA, no filesec */
2122 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2123 error = 0;
2124 /* either way, we are done */
2125 goto out;
2126 }
0c530ab8
A
2127
2128 /*
2129 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2130 * ACE entrly ACL, and if it's larger than that, it must have the right
2131 * number of bytes such that it contains an atomic number of ACEs,
2132 * rather than partial entries. Otherwise, we ignore it.
2133 */
2134 if (!KAUTH_FILESEC_VALID(xsize)) {
2135 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2136 error = 0;
2137 goto out;
2138 }
91447636
A
2139
2140 /* how many entries would fit? */
2141 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2142
2143 /* get buffer and uio */
2144 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2145 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2146 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2147 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2148 error = ENOMEM;
2149 goto out;
2150 }
2151
2152 /* read security attribute */
2153 rsize = xsize;
2154 if ((error = vn_getxattr(vp,
2155 KAUTH_FILESEC_XATTR,
2156 fsec_uio,
2157 &rsize,
2158 XATTR_NOSECURITY,
2159 ctx)) != 0) {
2160
2161 /* no attribute - no security data */
2162 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2163 error = 0;
2164 /* either way, we are done */
2165 goto out;
2166 }
2167
2168 /*
0c530ab8
A
2169 * Validate security structure; the validation must take place in host
2170 * byte order. If it's corrupt, we will just ignore it.
91447636 2171 */
0c530ab8
A
2172
2173 /* Validate the size before trying to convert it */
91447636
A
2174 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2175 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2176 goto out;
2177 }
0c530ab8
A
2178
2179 /* Validate the magic number before trying to convert it */
2180 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2181 if (fsec->fsec_magic != host_fsec_magic) {
2182 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
6601e61a
A
2183 goto out;
2184 }
0c530ab8
A
2185
2186 /* Validate the entry count before trying to convert it. */
2187 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2188 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2189 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2190 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2191 goto out;
2192 }
2193 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2194 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2195 goto out;
2196 }
91447636 2197 }
4452a7af 2198
0c530ab8
A
2199 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2200
91447636
A
2201 *fsecp = fsec;
2202 fsec = NULL;
2203 error = 0;
2204out:
2205 if (fsec != NULL)
2206 kauth_filesec_free(fsec);
2207 if (fsec_uio != NULL)
2208 uio_free(fsec_uio);
2209 if (error)
2210 *fsecp = NULL;
2211 return(error);
2212}
2213
0c530ab8
A
2214/*
2215 * Set a filesec and optional acl contents into an extended attribute.
2216 * function will attempt to store ACL, UUID, and GUID information using a
2217 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2218 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2219 * original caller supplied an acl.
2220 *
2221 * Parameters: vp The vnode on which to operate.
2222 * fsec The filesec being set.
2223 * acl The acl to be associated with 'fsec'.
2224 * ctx The vnode context in which the
2225 * operation is to be attempted.
2226 *
2227 * Returns: 0 Success
2228 * !0 errno value
2229 *
2230 * Notes: Both the fsec and the acl are always valid.
2231 *
2232 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2233 * as are the acl contents, if they are used. Internally, we will
2234 * cannonize these values into network (PPC) byte order before we
2235 * attempt to write them so that the on-disk contents of the
2236 * extended attribute are identical for both PPC and Intel (if we
2237 * were not being required to provide this service via fallback,
2238 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2239 * We reverse this process on the way out, so we leave with the
2240 * same byte order we started with.
2241 *
2242 * XXX: We should enummerate the possible errno values here, and where
2243 * in the code they originated.
2244 */
91447636
A
2245static int
2246vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2247{
0c530ab8
A
2248 uio_t fsec_uio;
2249 int error;
0c530ab8 2250 uint32_t saved_acl_copysize;
91447636
A
2251
2252 fsec_uio = NULL;
2253
2254 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2255 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2256 error = ENOMEM;
2257 goto out;
2258 }
0c530ab8
A
2259 /*
2260 * Save the pre-converted ACL copysize, because it gets swapped too
2261 * if we are running with the wrong endianness.
2262 */
2263 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2264
2265 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2266
b0d623f7 2267 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
0c530ab8 2268 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
91447636
A
2269 error = vn_setxattr(vp,
2270 KAUTH_FILESEC_XATTR,
2271 fsec_uio,
2272 XATTR_NOSECURITY, /* we have auth'ed already */
2273 ctx);
2274 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2275
0c530ab8
A
2276 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2277
91447636
A
2278out:
2279 if (fsec_uio != NULL)
2280 uio_free(fsec_uio);
2281 return(error);
2282}
2283
2284
2d21ac55
A
2285/*
2286 * Returns: 0 Success
2287 * ENOMEM Not enough space [only if has filesec]
2288 * VNOP_GETATTR: ???
2289 * vnode_get_filesec: ???
2290 * kauth_cred_guid2uid: ???
2291 * kauth_cred_guid2gid: ???
2292 * vfs_update_vfsstat: ???
2293 */
91447636
A
2294int
2295vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2296{
2297 kauth_filesec_t fsec;
2298 kauth_acl_t facl;
2299 int error;
2300 uid_t nuid;
2301 gid_t ngid;
2302
2303 /* don't ask for extended security data if the filesystem doesn't support it */
2304 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2305 VATTR_CLEAR_ACTIVE(vap, va_acl);
2306 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2307 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2308 }
2309
2310 /*
2311 * If the caller wants size values we might have to synthesise, give the
2312 * filesystem the opportunity to supply better intermediate results.
2313 */
2314 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2315 VATTR_IS_ACTIVE(vap, va_total_size) ||
2316 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2317 VATTR_SET_ACTIVE(vap, va_data_size);
2318 VATTR_SET_ACTIVE(vap, va_data_alloc);
2319 VATTR_SET_ACTIVE(vap, va_total_size);
2320 VATTR_SET_ACTIVE(vap, va_total_alloc);
2321 }
2322
2323 error = VNOP_GETATTR(vp, vap, ctx);
2324 if (error) {
2325 KAUTH_DEBUG("ERROR - returning %d", error);
2326 goto out;
2327 }
2328
2329 /*
2330 * If extended security data was requested but not returned, try the fallback
2331 * path.
2332 */
2333 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2334 fsec = NULL;
2335
2336 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
2337 /* try to get the filesec */
2338 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2339 goto out;
2340 }
2341 /* if no filesec, no attributes */
2342 if (fsec == NULL) {
2343 VATTR_RETURN(vap, va_acl, NULL);
2344 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2345 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2346 } else {
2347
2348 /* looks good, try to return what we were asked for */
2349 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2350 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2351
2352 /* only return the ACL if we were actually asked for it */
2353 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2354 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2355 VATTR_RETURN(vap, va_acl, NULL);
2356 } else {
2357 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2358 if (facl == NULL) {
2359 kauth_filesec_free(fsec);
2360 error = ENOMEM;
2361 goto out;
2362 }
2363 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2364 VATTR_RETURN(vap, va_acl, facl);
2365 }
2366 }
2367 kauth_filesec_free(fsec);
2368 }
2369 }
2370 /*
2371 * If someone gave us an unsolicited filesec, toss it. We promise that
2372 * we're OK with a filesystem giving us anything back, but our callers
2373 * only expect what they asked for.
2374 */
2375 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2376 if (vap->va_acl != NULL)
2377 kauth_acl_free(vap->va_acl);
2378 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2379 }
2380
2381#if 0 /* enable when we have a filesystem only supporting UUIDs */
2382 /*
2383 * Handle the case where we need a UID/GID, but only have extended
2384 * security information.
2385 */
2386 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2387 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2388 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2389 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2390 VATTR_RETURN(vap, va_uid, nuid);
2391 }
2392 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2393 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2394 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2395 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2396 VATTR_RETURN(vap, va_gid, ngid);
2397 }
2398#endif
2399
2400 /*
2401 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2402 */
2403 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2d21ac55
A
2404 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2405 nuid = vap->va_uid;
2406 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
91447636
A
2407 nuid = vp->v_mount->mnt_fsowner;
2408 if (nuid == KAUTH_UID_NONE)
2409 nuid = 99;
2410 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2411 nuid = vap->va_uid;
2412 } else {
2413 /* this will always be something sensible */
2414 nuid = vp->v_mount->mnt_fsowner;
2415 }
2416 if ((nuid == 99) && !vfs_context_issuser(ctx))
2417 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2418 VATTR_RETURN(vap, va_uid, nuid);
2419 }
2420 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2d21ac55
A
2421 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2422 ngid = vap->va_gid;
2423 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
91447636
A
2424 ngid = vp->v_mount->mnt_fsgroup;
2425 if (ngid == KAUTH_GID_NONE)
2426 ngid = 99;
2427 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2428 ngid = vap->va_gid;
2429 } else {
2430 /* this will always be something sensible */
2431 ngid = vp->v_mount->mnt_fsgroup;
2432 }
2433 if ((ngid == 99) && !vfs_context_issuser(ctx))
2434 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2435 VATTR_RETURN(vap, va_gid, ngid);
2436 }
2437
2438 /*
2439 * Synthesise some values that can be reasonably guessed.
2440 */
2441 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2442 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2443
2444 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2445 VATTR_RETURN(vap, va_flags, 0);
2446
2447 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2448 VATTR_RETURN(vap, va_filerev, 0);
2449
2450 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2451 VATTR_RETURN(vap, va_gen, 0);
2452
2453 /*
2454 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2455 */
2456 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2457 VATTR_RETURN(vap, va_data_size, 0);
2458
2459 /* do we want any of the possibly-computed values? */
2460 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2461 VATTR_IS_ACTIVE(vap, va_total_size) ||
2462 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2463 /* make sure f_bsize is valid */
2464 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2d21ac55 2465 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
91447636
A
2466 goto out;
2467 }
2468
2469 /* default va_data_alloc from va_data_size */
2470 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2471 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2472
2473 /* default va_total_size from va_data_size */
2474 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2475 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2476
2477 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2478 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2479 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2480 }
2481
2482 /*
2483 * If we don't have a change time, pull it from the modtime.
2484 */
2485 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2486 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2487
2488 /*
2489 * This is really only supported for the creation VNOPs, but since the field is there
2490 * we should populate it correctly.
2491 */
2492 VATTR_RETURN(vap, va_type, vp->v_type);
2493
2494 /*
2495 * The fsid can be obtained from the mountpoint directly.
2496 */
2497 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2498
2499out:
2500
2501 return(error);
2502}
2503
0c530ab8
A
2504/*
2505 * Set the attributes on a vnode in a vnode context.
2506 *
2507 * Parameters: vp The vnode whose attributes to set.
2508 * vap A pointer to the attributes to set.
2509 * ctx The vnode context in which the
2510 * operation is to be attempted.
2511 *
2512 * Returns: 0 Success
2513 * !0 errno value
2514 *
2515 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2516 *
2517 * The contents of the data area pointed to by 'vap' may be
2518 * modified if the vnode is on a filesystem which has been
2519 * mounted with ingore ownership flags, or by the underlyng
2520 * VFS itself, or by the fallback code, if the underlying VFS
2521 * does not support ACL, UUID, or GUUID attributes directly.
2522 *
2523 * XXX: We should enummerate the possible errno values here, and where
2524 * in the code they originated.
2525 */
91447636
A
2526int
2527vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2528{
2d21ac55 2529 int error, is_perm_change=0;
91447636
A
2530
2531 /*
2532 * Make sure the filesystem is mounted R/W.
2533 * If not, return an error.
2534 */
0c530ab8
A
2535 if (vfs_isrdonly(vp->v_mount)) {
2536 error = EROFS;
2537 goto out;
2538 }
2d21ac55
A
2539#if NAMEDSTREAMS
2540 /* For streams, va_data_size is the only setable attribute. */
2541 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2542 error = EPERM;
2543 goto out;
2544 }
2545#endif
91447636
A
2546
2547 /*
2548 * If ownership is being ignored on this volume, we silently discard
2549 * ownership changes.
2550 */
2551 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2552 VATTR_CLEAR_ACTIVE(vap, va_uid);
2553 VATTR_CLEAR_ACTIVE(vap, va_gid);
2554 }
2555
2d21ac55
A
2556 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2557 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2558 is_perm_change = 1;
91447636
A
2559 }
2560
2561 /*
2562 * Make sure that extended security is enabled if we're going to try
2563 * to set any.
2564 */
2565 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2566 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2567 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
0c530ab8
A
2568 error = ENOTSUP;
2569 goto out;
91447636
A
2570 }
2571
2572 error = VNOP_SETATTR(vp, vap, ctx);
2573
2574 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2575 error = vnode_setattr_fallback(vp, vap, ctx);
2576
2d21ac55 2577#if CONFIG_FSE
91447636 2578 // only send a stat_changed event if this is more than
b0d623f7
A
2579 // just an access or backup time update
2580 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2d21ac55
A
2581 if (is_perm_change) {
2582 if (need_fsevent(FSE_CHOWN, vp)) {
2583 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2584 }
2585 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2586 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
91447636
A
2587 }
2588 }
2d21ac55 2589#endif
0c530ab8
A
2590
2591out:
91447636
A
2592 return(error);
2593}
2594
2595/*
0c530ab8
A
2596 * Fallback for setting the attributes on a vnode in a vnode context. This
2597 * Function will attempt to store ACL, UUID, and GUID information utilizing
2598 * a read/modify/write operation against an EA used as a backing store for
2599 * the object.
2600 *
2601 * Parameters: vp The vnode whose attributes to set.
2602 * vap A pointer to the attributes to set.
2603 * ctx The vnode context in which the
2604 * operation is to be attempted.
2605 *
2606 * Returns: 0 Success
2607 * !0 errno value
2608 *
2609 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2610 * as are the fsec and lfsec, if they are used.
2611 *
2612 * The contents of the data area pointed to by 'vap' may be
2613 * modified to indicate that the attribute is supported for
2614 * any given requested attribute.
2615 *
2616 * XXX: We should enummerate the possible errno values here, and where
2617 * in the code they originated.
2618 */
91447636
A
2619int
2620vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2621{
2622 kauth_filesec_t fsec;
2623 kauth_acl_t facl;
2624 struct kauth_filesec lfsec;
2625 int error;
2626
2627 error = 0;
2628
2629 /*
2630 * Extended security fallback via extended attributes.
2631 *
0c530ab8
A
2632 * Note that we do not free the filesec; the caller is expected to
2633 * do this.
91447636
A
2634 */
2635 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2636 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2637 VATTR_NOT_RETURNED(vap, va_guuid)) {
2638 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2639
2640 /*
0c530ab8
A
2641 * Fail for file types that we don't permit extended security
2642 * to be set on.
91447636
A
2643 */
2644 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2645 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2646 error = EINVAL;
2647 goto out;
2648 }
2649
2650 /*
0c530ab8
A
2651 * If we don't have all the extended security items, we need
2652 * to fetch the existing data to perform a read-modify-write
2653 * operation.
91447636
A
2654 */
2655 fsec = NULL;
2656 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2657 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2658 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2659 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2660 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2661 goto out;
2662 }
2663 }
2664 /* if we didn't get a filesec, use our local one */
2665 if (fsec == NULL) {
2666 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2667 fsec = &lfsec;
2668 } else {
2669 KAUTH_DEBUG("SETATTR - updating existing filesec");
2670 }
2671 /* find the ACL */
2672 facl = &fsec->fsec_acl;
2673
2674 /* if we're using the local filesec, we need to initialise it */
2675 if (fsec == &lfsec) {
2676 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2677 fsec->fsec_owner = kauth_null_guid;
2678 fsec->fsec_group = kauth_null_guid;
2679 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2680 facl->acl_flags = 0;
2681 }
2682
2683 /*
2684 * Update with the supplied attributes.
2685 */
2686 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2687 KAUTH_DEBUG("SETATTR - updating owner UUID");
2688 fsec->fsec_owner = vap->va_uuuid;
2689 VATTR_SET_SUPPORTED(vap, va_uuuid);
2690 }
2691 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2692 KAUTH_DEBUG("SETATTR - updating group UUID");
2693 fsec->fsec_group = vap->va_guuid;
2694 VATTR_SET_SUPPORTED(vap, va_guuid);
2695 }
2696 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2697 if (vap->va_acl == NULL) {
2698 KAUTH_DEBUG("SETATTR - removing ACL");
2699 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2700 } else {
2701 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2702 facl = vap->va_acl;
2703 }
2704 VATTR_SET_SUPPORTED(vap, va_acl);
2705 }
2706
2707 /*
0c530ab8
A
2708 * If the filesec data is all invalid, we can just remove
2709 * the EA completely.
91447636
A
2710 */
2711 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2712 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2713 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2714 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2715 /* no attribute is ok, nothing to delete */
2716 if (error == ENOATTR)
2717 error = 0;
2718 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2719 } else {
2720 /* write the EA */
2721 error = vnode_set_filesec(vp, fsec, facl, ctx);
2722 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2723 }
2724
2725 /* if we fetched a filesec, dispose of the buffer */
2726 if (fsec != &lfsec)
2727 kauth_filesec_free(fsec);
2728 }
2729out:
2730
2731 return(error);
2732}
2733
b0d623f7
A
2734/*
2735 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2736 * event on a vnode.
2737 */
2738int
2739vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2740{
2741 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2742 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2743 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2744 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2745 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2746 uint32_t knote_events = (events & knote_mask);
2747
2748 /* Permissions are not explicitly part of the kqueue model */
2749 if (events & VNODE_EVENT_PERMS) {
2750 knote_events |= NOTE_ATTRIB;
2751 }
2752
2753 /* Directory contents information just becomes NOTE_WRITE */
2754 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2755 knote_events |= NOTE_WRITE;
2756 }
2757
2758 if (knote_events) {
2759 lock_vnode_and_post(vp, knote_events);
2760#if CONFIG_FSE
2761 if (vap != NULL) {
2762 create_fsevent_from_kevent(vp, events, vap);
2763 }
2764#else
2765 (void)vap;
2766#endif
2767 }
2768
2769 return 0;
2770}
2771
2772/*
2773 * For a filesystem that isn't tracking its own vnode watchers:
2774 * check whether a vnode is being monitored.
2775 */
2776int
2777vnode_ismonitored(vnode_t vp) {
2778 return (vp->v_knotes.slh_first != NULL);
2779}
2780
2781/*
2782 * Conceived as a function available only in BSD kernel so that if kevent_register
2783 * changes what a knote of type EVFILT_VNODE is watching, it can push
2784 * that updated information down to a networked filesystem that may
2785 * need to update server-side monitoring.
2786 *
2787 * Blunted to do nothing--because we want to get both kqueue and fsevents support
2788 * from the VNOP_MONITOR design, we always want all the events a filesystem can provide us.
2789 */
2790void
2791vnode_knoteupdate(__unused struct knote *kn)
2792{
2793#if 0
2794 vnode_t vp = (vnode_t)kn->kn_hook;
2795 if (vnode_getwithvid(vp, kn->kn_hookid) == 0) {
2796 VNOP_MONITOR(vp, kn->kn_sfflags, VNODE_MONITOR_UPDATE, (void*)kn, NULL);
2797 vnode_put(vp);
2798 }
2799#endif
2800}
2801
2802/*
2803 * Initialize a struct vnode_attr and activate the attributes required
2804 * by the vnode_notify() call.
2805 */
2806int
2807vfs_get_notify_attributes(struct vnode_attr *vap)
2808{
2809 VATTR_INIT(vap);
2810 vap->va_active = VNODE_NOTIFY_ATTRS;
2811 return 0;
2812}
2813
91447636
A
2814/*
2815 * Definition of vnode operations.
2816 */
2817
2818#if 0
2819/*
2820 *#
2821 *#% lookup dvp L ? ?
2822 *#% lookup vpp - L -
2823 */
2824struct vnop_lookup_args {
2825 struct vnodeop_desc *a_desc;
2826 vnode_t a_dvp;
2827 vnode_t *a_vpp;
2828 struct componentname *a_cnp;
2829 vfs_context_t a_context;
2830};
2831#endif /* 0*/
2832
2d21ac55
A
2833/*
2834 * Returns: 0 Success
2835 * lock_fsnode:ENOENT No such file or directory [only for VFS
2836 * that is not thread safe & vnode is
2837 * currently being/has been terminated]
2838 * <vfs_lookup>:ENAMETOOLONG
2839 * <vfs_lookup>:ENOENT
2840 * <vfs_lookup>:EJUSTRETURN
2841 * <vfs_lookup>:EPERM
2842 * <vfs_lookup>:EISDIR
2843 * <vfs_lookup>:ENOTDIR
2844 * <vfs_lookup>:???
2845 *
2846 * Note: The return codes from the underlying VFS's lookup routine can't
2847 * be fully enumerated here, since third party VFS authors may not
2848 * limit their error returns to the ones documented here, even
2849 * though this may result in some programs functioning incorrectly.
2850 *
2851 * The return codes documented above are those which may currently
2852 * be returned by HFS from hfs_lookup, not including additional
2853 * error code which may be propagated from underlying routines.
2854 */
91447636 2855errno_t
2d21ac55 2856VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
91447636
A
2857{
2858 int _err;
2859 struct vnop_lookup_args a;
2860 vnode_t vp;
b0d623f7 2861#ifndef __LP64__
91447636
A
2862 int thread_safe;
2863 int funnel_state = 0;
b0d623f7 2864#endif /* __LP64__ */
91447636
A
2865
2866 a.a_desc = &vnop_lookup_desc;
2867 a.a_dvp = dvp;
2868 a.a_vpp = vpp;
2869 a.a_cnp = cnp;
2d21ac55 2870 a.a_context = ctx;
91447636 2871
b0d623f7
A
2872#ifndef __LP64__
2873 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
2874 if (!thread_safe) {
2875 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2876 return (_err);
2877 }
2878 }
b0d623f7
A
2879#endif /* __LP64__ */
2880
91447636
A
2881 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2882
2883 vp = *vpp;
2884
b0d623f7 2885#ifndef __LP64__
91447636
A
2886 if (!thread_safe) {
2887 if ( (cnp->cn_flags & ISLASTCN) ) {
2888 if ( (cnp->cn_flags & LOCKPARENT) ) {
2889 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2890 /*
2891 * leave the fsnode lock held on
2892 * the directory, but restore the funnel...
2893 * also indicate that we need to drop the
2894 * fsnode_lock when we're done with the
2895 * system call processing for this path
2896 */
2897 cnp->cn_flags |= FSNODELOCKHELD;
2898
2899 (void) thread_funnel_set(kernel_flock, funnel_state);
2900 return (_err);
2901 }
2902 }
2903 }
2904 unlock_fsnode(dvp, &funnel_state);
2905 }
b0d623f7
A
2906#endif /* __LP64__ */
2907
91447636
A
2908 return (_err);
2909}
2910
2911#if 0
2912/*
2913 *#
2914 *#% create dvp L L L
2915 *#% create vpp - L -
2916 *#
2917 */
2918
2919struct vnop_create_args {
2920 struct vnodeop_desc *a_desc;
2921 vnode_t a_dvp;
2922 vnode_t *a_vpp;
2923 struct componentname *a_cnp;
2924 struct vnode_attr *a_vap;
2925 vfs_context_t a_context;
2926};
2927#endif /* 0*/
2928errno_t
2d21ac55 2929VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
2930{
2931 int _err;
2932 struct vnop_create_args a;
b0d623f7 2933#ifndef __LP64__
91447636
A
2934 int thread_safe;
2935 int funnel_state = 0;
b0d623f7 2936#endif /* __LP64__ */
91447636
A
2937
2938 a.a_desc = &vnop_create_desc;
2939 a.a_dvp = dvp;
2940 a.a_vpp = vpp;
2941 a.a_cnp = cnp;
2942 a.a_vap = vap;
2d21ac55 2943 a.a_context = ctx;
91447636 2944
b0d623f7
A
2945#ifndef __LP64__
2946 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
2947 if (!thread_safe) {
2948 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2949 return (_err);
2950 }
2951 }
b0d623f7
A
2952#endif /* __LP64__ */
2953
91447636
A
2954 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2955 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2956 /*
2957 * Remove stale Apple Double file (if any).
2958 */
b0d623f7 2959 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 2960 }
b0d623f7
A
2961
2962#ifndef __LP64__
91447636
A
2963 if (!thread_safe) {
2964 unlock_fsnode(dvp, &funnel_state);
2965 }
b0d623f7
A
2966#endif /* __LP64__ */
2967
2968 post_event_if_success(dvp, _err, NOTE_WRITE);
2969
91447636
A
2970 return (_err);
2971}
2972
2973#if 0
2974/*
2975 *#
2976 *#% whiteout dvp L L L
2977 *#% whiteout cnp - - -
2978 *#% whiteout flag - - -
2979 *#
2980 */
2981struct vnop_whiteout_args {
2982 struct vnodeop_desc *a_desc;
2983 vnode_t a_dvp;
2984 struct componentname *a_cnp;
2985 int a_flags;
2986 vfs_context_t a_context;
2987};
2988#endif /* 0*/
2989errno_t
2d21ac55 2990VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
91447636
A
2991{
2992 int _err;
2993 struct vnop_whiteout_args a;
b0d623f7 2994#ifndef __LP64__
91447636
A
2995 int thread_safe;
2996 int funnel_state = 0;
b0d623f7 2997#endif /* __LP64__ */
91447636
A
2998
2999 a.a_desc = &vnop_whiteout_desc;
3000 a.a_dvp = dvp;
3001 a.a_cnp = cnp;
3002 a.a_flags = flags;
2d21ac55 3003 a.a_context = ctx;
91447636 3004
b0d623f7
A
3005#ifndef __LP64__
3006 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3007 if (!thread_safe) {
3008 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3009 return (_err);
3010 }
3011 }
b0d623f7
A
3012#endif /* __LP64__ */
3013
91447636 3014 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
b0d623f7
A
3015
3016#ifndef __LP64__
91447636
A
3017 if (!thread_safe) {
3018 unlock_fsnode(dvp, &funnel_state);
3019 }
b0d623f7
A
3020#endif /* __LP64__ */
3021
3022 post_event_if_success(dvp, _err, NOTE_WRITE);
3023
91447636
A
3024 return (_err);
3025}
3026
3027 #if 0
3028/*
3029 *#
3030 *#% mknod dvp L U U
3031 *#% mknod vpp - X -
3032 *#
3033 */
3034struct vnop_mknod_args {
3035 struct vnodeop_desc *a_desc;
3036 vnode_t a_dvp;
3037 vnode_t *a_vpp;
3038 struct componentname *a_cnp;
3039 struct vnode_attr *a_vap;
3040 vfs_context_t a_context;
3041};
3042#endif /* 0*/
3043errno_t
2d21ac55 3044VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3045{
3046
3047 int _err;
3048 struct vnop_mknod_args a;
b0d623f7
A
3049#ifndef __LP64__
3050 int thread_safe;
3051 int funnel_state = 0;
3052#endif /* __LP64__ */
91447636
A
3053
3054 a.a_desc = &vnop_mknod_desc;
3055 a.a_dvp = dvp;
3056 a.a_vpp = vpp;
3057 a.a_cnp = cnp;
3058 a.a_vap = vap;
2d21ac55 3059 a.a_context = ctx;
91447636 3060
b0d623f7
A
3061#ifndef __LP64__
3062 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3063 if (!thread_safe) {
3064 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3065 return (_err);
3066 }
3067 }
b0d623f7
A
3068#endif /* __LP64__ */
3069
91447636 3070 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
b0d623f7
A
3071
3072#ifndef __LP64__
91447636
A
3073 if (!thread_safe) {
3074 unlock_fsnode(dvp, &funnel_state);
3075 }
b0d623f7
A
3076#endif /* __LP64__ */
3077
3078 post_event_if_success(dvp, _err, NOTE_WRITE);
3079
91447636
A
3080 return (_err);
3081}
3082
3083#if 0
3084/*
3085 *#
3086 *#% open vp L L L
3087 *#
3088 */
3089struct vnop_open_args {
3090 struct vnodeop_desc *a_desc;
3091 vnode_t a_vp;
3092 int a_mode;
3093 vfs_context_t a_context;
3094};
3095#endif /* 0*/
3096errno_t
2d21ac55 3097VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
91447636
A
3098{
3099 int _err;
3100 struct vnop_open_args a;
b0d623f7 3101#ifndef __LP64__
91447636
A
3102 int thread_safe;
3103 int funnel_state = 0;
b0d623f7 3104#endif /* __LP64__ */
91447636 3105
2d21ac55
A
3106 if (ctx == NULL) {
3107 ctx = vfs_context_current();
91447636
A
3108 }
3109 a.a_desc = &vnop_open_desc;
3110 a.a_vp = vp;
3111 a.a_mode = mode;
2d21ac55 3112 a.a_context = ctx;
91447636 3113
b0d623f7
A
3114#ifndef __LP64__
3115 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3116 if (!thread_safe) {
3117 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3118 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3119 if ( (_err = lock_fsnode(vp, NULL)) ) {
3120 (void) thread_funnel_set(kernel_flock, funnel_state);
3121 return (_err);
3122 }
3123 }
3124 }
b0d623f7
A
3125#endif /* __LP64__ */
3126
91447636 3127 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
b0d623f7
A
3128
3129#ifndef __LP64__
91447636
A
3130 if (!thread_safe) {
3131 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3132 unlock_fsnode(vp, NULL);
3133 }
3134 (void) thread_funnel_set(kernel_flock, funnel_state);
3135 }
b0d623f7
A
3136#endif /* __LP64__ */
3137
91447636
A
3138 return (_err);
3139}
3140
3141#if 0
3142/*
3143 *#
3144 *#% close vp U U U
3145 *#
3146 */
3147struct vnop_close_args {
3148 struct vnodeop_desc *a_desc;
3149 vnode_t a_vp;
3150 int a_fflag;
3151 vfs_context_t a_context;
3152};
3153#endif /* 0*/
3154errno_t
2d21ac55 3155VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
91447636
A
3156{
3157 int _err;
3158 struct vnop_close_args a;
b0d623f7 3159#ifndef __LP64__
91447636
A
3160 int thread_safe;
3161 int funnel_state = 0;
b0d623f7 3162#endif /* __LP64__ */
91447636 3163
2d21ac55
A
3164 if (ctx == NULL) {
3165 ctx = vfs_context_current();
91447636
A
3166 }
3167 a.a_desc = &vnop_close_desc;
3168 a.a_vp = vp;
3169 a.a_fflag = fflag;
2d21ac55 3170 a.a_context = ctx;
91447636 3171
b0d623f7
A
3172#ifndef __LP64__
3173 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3174 if (!thread_safe) {
3175 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3176 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3177 if ( (_err = lock_fsnode(vp, NULL)) ) {
3178 (void) thread_funnel_set(kernel_flock, funnel_state);
3179 return (_err);
3180 }
3181 }
3182 }
b0d623f7
A
3183#endif /* __LP64__ */
3184
91447636 3185 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
b0d623f7
A
3186
3187#ifndef __LP64__
91447636
A
3188 if (!thread_safe) {
3189 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3190 unlock_fsnode(vp, NULL);
3191 }
3192 (void) thread_funnel_set(kernel_flock, funnel_state);
3193 }
b0d623f7
A
3194#endif /* __LP64__ */
3195
91447636
A
3196 return (_err);
3197}
3198
3199#if 0
3200/*
3201 *#
3202 *#% access vp L L L
3203 *#
3204 */
3205struct vnop_access_args {
3206 struct vnodeop_desc *a_desc;
3207 vnode_t a_vp;
3208 int a_action;
3209 vfs_context_t a_context;
3210};
3211#endif /* 0*/
3212errno_t
2d21ac55 3213VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
91447636
A
3214{
3215 int _err;
3216 struct vnop_access_args a;
b0d623f7 3217#ifndef __LP64__
91447636
A
3218 int thread_safe;
3219 int funnel_state = 0;
b0d623f7 3220#endif /* __LP64__ */
91447636 3221
2d21ac55
A
3222 if (ctx == NULL) {
3223 ctx = vfs_context_current();
91447636
A
3224 }
3225 a.a_desc = &vnop_access_desc;
3226 a.a_vp = vp;
3227 a.a_action = action;
2d21ac55 3228 a.a_context = ctx;
91447636 3229
b0d623f7
A
3230#ifndef __LP64__
3231 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3232 if (!thread_safe) {
3233 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3234 return (_err);
3235 }
3236 }
b0d623f7
A
3237#endif /* __LP64__ */
3238
91447636 3239 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
b0d623f7
A
3240
3241#ifndef __LP64__
91447636
A
3242 if (!thread_safe) {
3243 unlock_fsnode(vp, &funnel_state);
3244 }
b0d623f7
A
3245#endif /* __LP64__ */
3246
91447636
A
3247 return (_err);
3248}
3249
3250#if 0
3251/*
3252 *#
3253 *#% getattr vp = = =
3254 *#
3255 */
3256struct vnop_getattr_args {
3257 struct vnodeop_desc *a_desc;
3258 vnode_t a_vp;
3259 struct vnode_attr *a_vap;
3260 vfs_context_t a_context;
3261};
3262#endif /* 0*/
3263errno_t
2d21ac55 3264VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3265{
3266 int _err;
3267 struct vnop_getattr_args a;
b0d623f7 3268#ifndef __LP64__
91447636 3269 int thread_safe;
b0d623f7
A
3270 int funnel_state = 0;
3271#endif /* __LP64__ */
91447636
A
3272
3273 a.a_desc = &vnop_getattr_desc;
3274 a.a_vp = vp;
3275 a.a_vap = vap;
2d21ac55 3276 a.a_context = ctx;
91447636 3277
b0d623f7
A
3278#ifndef __LP64__
3279 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3280 if (!thread_safe) {
3281 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3282 return (_err);
3283 }
3284 }
b0d623f7
A
3285#endif /* __LP64__ */
3286
91447636 3287 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
b0d623f7
A
3288
3289#ifndef __LP64__
91447636
A
3290 if (!thread_safe) {
3291 unlock_fsnode(vp, &funnel_state);
3292 }
b0d623f7
A
3293#endif /* __LP64__ */
3294
91447636
A
3295 return (_err);
3296}
3297
3298#if 0
3299/*
3300 *#
3301 *#% setattr vp L L L
3302 *#
3303 */
3304struct vnop_setattr_args {
3305 struct vnodeop_desc *a_desc;
3306 vnode_t a_vp;
3307 struct vnode_attr *a_vap;
3308 vfs_context_t a_context;
3309};
3310#endif /* 0*/
3311errno_t
2d21ac55 3312VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3313{
3314 int _err;
3315 struct vnop_setattr_args a;
b0d623f7 3316#ifndef __LP64__
91447636 3317 int thread_safe;
b0d623f7
A
3318 int funnel_state = 0;
3319#endif /* __LP64__ */
91447636
A
3320
3321 a.a_desc = &vnop_setattr_desc;
3322 a.a_vp = vp;
3323 a.a_vap = vap;
2d21ac55 3324 a.a_context = ctx;
91447636 3325
b0d623f7
A
3326#ifndef __LP64__
3327 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3328 if (!thread_safe) {
3329 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3330 return (_err);
3331 }
3332 }
b0d623f7
A
3333#endif /* __LP64__ */
3334
91447636
A
3335 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3336
3337 /*
2d21ac55 3338 * Shadow uid/gid/mod change to extended attribute file.
91447636
A
3339 */
3340 if (_err == 0 && !NATIVE_XATTR(vp)) {
3341 struct vnode_attr va;
3342 int change = 0;
3343
3344 VATTR_INIT(&va);
3345 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3346 VATTR_SET(&va, va_uid, vap->va_uid);
3347 change = 1;
3348 }
3349 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3350 VATTR_SET(&va, va_gid, vap->va_gid);
3351 change = 1;
3352 }
3353 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3354 VATTR_SET(&va, va_mode, vap->va_mode);
3355 change = 1;
3356 }
3357 if (change) {
3358 vnode_t dvp;
2d21ac55 3359 const char *vname;
91447636
A
3360
3361 dvp = vnode_getparent(vp);
3362 vname = vnode_getname(vp);
3363
b0d623f7 3364 xattrfile_setattr(dvp, vname, &va, ctx);
91447636
A
3365 if (dvp != NULLVP)
3366 vnode_put(dvp);
3367 if (vname != NULL)
3368 vnode_putname(vname);
3369 }
3370 }
b0d623f7
A
3371
3372#ifndef __LP64__
91447636
A
3373 if (!thread_safe) {
3374 unlock_fsnode(vp, &funnel_state);
3375 }
b0d623f7
A
3376#endif /* __LP64__ */
3377
2d21ac55
A
3378 /*
3379 * If we have changed any of the things about the file that are likely
3380 * to result in changes to authorization results, blow the vnode auth
3381 * cache
3382 */
3383 if (_err == 0 && (
3384 VATTR_IS_SUPPORTED(vap, va_mode) ||
3385 VATTR_IS_SUPPORTED(vap, va_uid) ||
3386 VATTR_IS_SUPPORTED(vap, va_gid) ||
3387 VATTR_IS_SUPPORTED(vap, va_flags) ||
3388 VATTR_IS_SUPPORTED(vap, va_acl) ||
3389 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
b0d623f7 3390 VATTR_IS_SUPPORTED(vap, va_guuid))) {
2d21ac55 3391 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
91447636 3392
b0d623f7
A
3393#if NAMEDSTREAMS
3394 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3395 vnode_t svp;
3396 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3397 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3398 vnode_put(svp);
3399 }
3400 }
3401#endif /* NAMEDSTREAMS */
3402 }
3403
3404
3405 post_event_if_success(vp, _err, NOTE_ATTRIB);
3406
91447636
A
3407 return (_err);
3408}
3409
3410
3411#if 0
3412/*
3413 *#
3414 *#% read vp L L L
3415 *#
3416 */
3417struct vnop_read_args {
3418 struct vnodeop_desc *a_desc;
3419 vnode_t a_vp;
3420 struct uio *a_uio;
3421 int a_ioflag;
3422 vfs_context_t a_context;
3423};
3424#endif /* 0*/
3425errno_t
2d21ac55 3426VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
91447636
A
3427{
3428 int _err;
3429 struct vnop_read_args a;
b0d623f7 3430#ifndef __LP64__
91447636
A
3431 int thread_safe;
3432 int funnel_state = 0;
b0d623f7 3433#endif /* __LP64__ */
91447636 3434
2d21ac55
A
3435 if (ctx == NULL) {
3436 ctx = vfs_context_current();
91447636
A
3437 }
3438
3439 a.a_desc = &vnop_read_desc;
3440 a.a_vp = vp;
3441 a.a_uio = uio;
3442 a.a_ioflag = ioflag;
2d21ac55 3443 a.a_context = ctx;
91447636 3444
b0d623f7
A
3445#ifndef __LP64__
3446 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3447 if (!thread_safe) {
3448 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3449 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3450 if ( (_err = lock_fsnode(vp, NULL)) ) {
3451 (void) thread_funnel_set(kernel_flock, funnel_state);
3452 return (_err);
3453 }
3454 }
3455 }
b0d623f7
A
3456#endif /* __LP64__ */
3457
91447636
A
3458 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3459
b0d623f7 3460#ifndef __LP64__
91447636
A
3461 if (!thread_safe) {
3462 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3463 unlock_fsnode(vp, NULL);
3464 }
3465 (void) thread_funnel_set(kernel_flock, funnel_state);
3466 }
b0d623f7
A
3467#endif /* __LP64__ */
3468
91447636
A
3469 return (_err);
3470}
3471
3472
3473#if 0
3474/*
3475 *#
3476 *#% write vp L L L
3477 *#
3478 */
3479struct vnop_write_args {
3480 struct vnodeop_desc *a_desc;
3481 vnode_t a_vp;
3482 struct uio *a_uio;
3483 int a_ioflag;
3484 vfs_context_t a_context;
3485};
3486#endif /* 0*/
3487errno_t
2d21ac55 3488VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
91447636
A
3489{
3490 struct vnop_write_args a;
3491 int _err;
b0d623f7 3492#ifndef __LP64__
91447636
A
3493 int thread_safe;
3494 int funnel_state = 0;
b0d623f7 3495#endif /* __LP64__ */
91447636 3496
2d21ac55
A
3497 if (ctx == NULL) {
3498 ctx = vfs_context_current();
91447636
A
3499 }
3500
3501 a.a_desc = &vnop_write_desc;
3502 a.a_vp = vp;
3503 a.a_uio = uio;
3504 a.a_ioflag = ioflag;
2d21ac55 3505 a.a_context = ctx;
91447636 3506
b0d623f7
A
3507#ifndef __LP64__
3508 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3509 if (!thread_safe) {
3510 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3511 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3512 if ( (_err = lock_fsnode(vp, NULL)) ) {
3513 (void) thread_funnel_set(kernel_flock, funnel_state);
3514 return (_err);
3515 }
3516 }
3517 }
b0d623f7
A
3518#endif /* __LP64__ */
3519
91447636
A
3520 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3521
b0d623f7 3522#ifndef __LP64__
91447636
A
3523 if (!thread_safe) {
3524 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3525 unlock_fsnode(vp, NULL);
3526 }
3527 (void) thread_funnel_set(kernel_flock, funnel_state);
3528 }
b0d623f7
A
3529#endif /* __LP64__ */
3530
3531 post_event_if_success(vp, _err, NOTE_WRITE);
3532
91447636
A
3533 return (_err);
3534}
3535
3536
3537#if 0
3538/*
3539 *#
3540 *#% ioctl vp U U U
3541 *#
3542 */
3543struct vnop_ioctl_args {
3544 struct vnodeop_desc *a_desc;
3545 vnode_t a_vp;
3546 u_long a_command;
3547 caddr_t a_data;
3548 int a_fflag;
3549 vfs_context_t a_context;
3550};
3551#endif /* 0*/
3552errno_t
2d21ac55 3553VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
91447636
A
3554{
3555 int _err;
3556 struct vnop_ioctl_args a;
b0d623f7 3557#ifndef __LP64__
91447636
A
3558 int thread_safe;
3559 int funnel_state = 0;
b0d623f7 3560#endif /* __LP64__ */
91447636 3561
2d21ac55
A
3562 if (ctx == NULL) {
3563 ctx = vfs_context_current();
91447636
A
3564 }
3565
b0d623f7
A
3566 /*
3567 * This check should probably have been put in the TTY code instead...
3568 *
3569 * We have to be careful about what we assume during startup and shutdown.
3570 * We have to be able to use the root filesystem's device vnode even when
3571 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3572 * structure. If there is no data pointer, it doesn't matter whether
3573 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3574 * which passes NULL for its data pointer can therefore be used during
3575 * mount or unmount of the root filesystem.
3576 *
3577 * Depending on what root filesystems need to do during mount/unmount, we
3578 * may need to loosen this check again in the future.
3579 */
3580 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3581 if (data != NULL && !vnode_vfs64bitready(vp)) {
91447636
A
3582 return(ENOTTY);
3583 }
3584 }
3585
3586 a.a_desc = &vnop_ioctl_desc;
3587 a.a_vp = vp;
3588 a.a_command = command;
3589 a.a_data = data;
3590 a.a_fflag = fflag;
2d21ac55 3591 a.a_context= ctx;
91447636 3592
b0d623f7
A
3593#ifndef __LP64__
3594 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3595 if (!thread_safe) {
3596 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3597 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3598 if ( (_err = lock_fsnode(vp, NULL)) ) {
3599 (void) thread_funnel_set(kernel_flock, funnel_state);
3600 return (_err);
3601 }
3602 }
3603 }
b0d623f7
A
3604#endif /* __LP64__ */
3605
91447636 3606 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
b0d623f7
A
3607
3608#ifndef __LP64__
91447636
A
3609 if (!thread_safe) {
3610 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3611 unlock_fsnode(vp, NULL);
3612 }
3613 (void) thread_funnel_set(kernel_flock, funnel_state);
3614 }
b0d623f7
A
3615#endif /* __LP64__ */
3616
91447636
A
3617 return (_err);
3618}
3619
3620
3621#if 0
3622/*
3623 *#
3624 *#% select vp U U U
3625 *#
3626 */
3627struct vnop_select_args {
3628 struct vnodeop_desc *a_desc;
3629 vnode_t a_vp;
3630 int a_which;
3631 int a_fflags;
3632 void *a_wql;
3633 vfs_context_t a_context;
3634};
3635#endif /* 0*/
3636errno_t
2d21ac55 3637VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
91447636
A
3638{
3639 int _err;
3640 struct vnop_select_args a;
b0d623f7 3641#ifndef __LP64__
91447636
A
3642 int thread_safe;
3643 int funnel_state = 0;
b0d623f7 3644#endif /* __LP64__ */
91447636 3645
2d21ac55
A
3646 if (ctx == NULL) {
3647 ctx = vfs_context_current();
91447636
A
3648 }
3649 a.a_desc = &vnop_select_desc;
3650 a.a_vp = vp;
3651 a.a_which = which;
3652 a.a_fflags = fflags;
2d21ac55 3653 a.a_context = ctx;
91447636 3654 a.a_wql = wql;
91447636 3655
b0d623f7
A
3656#ifndef __LP64__
3657 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3658 if (!thread_safe) {
3659 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3660 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3661 if ( (_err = lock_fsnode(vp, NULL)) ) {
3662 (void) thread_funnel_set(kernel_flock, funnel_state);
3663 return (_err);
3664 }
3665 }
3666 }
b0d623f7
A
3667#endif /* __LP64__ */
3668
91447636 3669 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
b0d623f7
A
3670
3671#ifndef __LP64__
91447636
A
3672 if (!thread_safe) {
3673 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3674 unlock_fsnode(vp, NULL);
3675 }
3676 (void) thread_funnel_set(kernel_flock, funnel_state);
3677 }
b0d623f7
A
3678#endif /* __LP64__ */
3679
91447636
A
3680 return (_err);
3681}
3682
3683
3684#if 0
3685/*
3686 *#
3687 *#% exchange fvp L L L
3688 *#% exchange tvp L L L
3689 *#
3690 */
3691struct vnop_exchange_args {
3692 struct vnodeop_desc *a_desc;
3693 vnode_t a_fvp;
3694 vnode_t a_tvp;
3695 int a_options;
3696 vfs_context_t a_context;
3697};
3698#endif /* 0*/
3699errno_t
2d21ac55 3700VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
91447636
A
3701{
3702 int _err;
3703 struct vnop_exchange_args a;
b0d623f7 3704#ifndef __LP64__
91447636
A
3705 int thread_safe;
3706 int funnel_state = 0;
3707 vnode_t lock_first = NULL, lock_second = NULL;
b0d623f7 3708#endif /* __LP64__ */
91447636
A
3709
3710 a.a_desc = &vnop_exchange_desc;
3711 a.a_fvp = fvp;
3712 a.a_tvp = tvp;
3713 a.a_options = options;
2d21ac55 3714 a.a_context = ctx;
91447636 3715
b0d623f7
A
3716#ifndef __LP64__
3717 thread_safe = THREAD_SAFE_FS(fvp);
91447636
A
3718 if (!thread_safe) {
3719 /*
3720 * Lock in vnode address order to avoid deadlocks
3721 */
3722 if (fvp < tvp) {
3723 lock_first = fvp;
3724 lock_second = tvp;
3725 } else {
3726 lock_first = tvp;
3727 lock_second = fvp;
3728 }
3729 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3730 return (_err);
3731 }
3732 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3733 unlock_fsnode(lock_first, &funnel_state);
3734 return (_err);
3735 }
3736 }
b0d623f7
A
3737#endif /* __LP64__ */
3738
91447636 3739 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
b0d623f7
A
3740
3741#ifndef __LP64__
91447636
A
3742 if (!thread_safe) {
3743 unlock_fsnode(lock_second, NULL);
3744 unlock_fsnode(lock_first, &funnel_state);
3745 }
b0d623f7
A
3746#endif /* __LP64__ */
3747
3748 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3749 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3750 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3751
91447636
A
3752 return (_err);
3753}
3754
3755
3756#if 0
3757/*
3758 *#
3759 *#% revoke vp U U U
3760 *#
3761 */
3762struct vnop_revoke_args {
3763 struct vnodeop_desc *a_desc;
3764 vnode_t a_vp;
3765 int a_flags;
3766 vfs_context_t a_context;
3767};
3768#endif /* 0*/
3769errno_t
2d21ac55 3770VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
91447636
A
3771{
3772 struct vnop_revoke_args a;
3773 int _err;
b0d623f7 3774#ifndef __LP64__
91447636
A
3775 int thread_safe;
3776 int funnel_state = 0;
b0d623f7 3777#endif /* __LP64__ */
91447636
A
3778
3779 a.a_desc = &vnop_revoke_desc;
3780 a.a_vp = vp;
3781 a.a_flags = flags;
2d21ac55 3782 a.a_context = ctx;
91447636 3783
b0d623f7
A
3784#ifndef __LP64__
3785 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3786 if (!thread_safe) {
3787 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3788 }
b0d623f7
A
3789#endif /* __LP64__ */
3790
91447636 3791 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
b0d623f7
A
3792
3793#ifndef __LP64__
91447636
A
3794 if (!thread_safe) {
3795 (void) thread_funnel_set(kernel_flock, funnel_state);
3796 }
b0d623f7
A
3797#endif /* __LP64__ */
3798
91447636
A
3799 return (_err);
3800}
3801
3802
3803#if 0
3804/*
3805 *#
3806 *# mmap - vp U U U
3807 *#
3808 */
3809struct vnop_mmap_args {
3810 struct vnodeop_desc *a_desc;
3811 vnode_t a_vp;
3812 int a_fflags;
3813 vfs_context_t a_context;
3814};
3815#endif /* 0*/
3816errno_t
2d21ac55 3817VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
91447636
A
3818{
3819 int _err;
3820 struct vnop_mmap_args a;
b0d623f7 3821#ifndef __LP64__
91447636
A
3822 int thread_safe;
3823 int funnel_state = 0;
b0d623f7 3824#endif /* __LP64__ */
91447636
A
3825
3826 a.a_desc = &vnop_mmap_desc;
3827 a.a_vp = vp;
3828 a.a_fflags = fflags;
2d21ac55 3829 a.a_context = ctx;
91447636 3830
b0d623f7
A
3831#ifndef __LP64__
3832 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3833 if (!thread_safe) {
3834 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3835 return (_err);
3836 }
3837 }
b0d623f7
A
3838#endif /* __LP64__ */
3839
91447636 3840 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
b0d623f7
A
3841
3842#ifndef __LP64__
91447636
A
3843 if (!thread_safe) {
3844 unlock_fsnode(vp, &funnel_state);
3845 }
b0d623f7
A
3846#endif /* __LP64__ */
3847
91447636
A
3848 return (_err);
3849}
3850
3851
3852#if 0
3853/*
3854 *#
3855 *# mnomap - vp U U U
3856 *#
3857 */
3858struct vnop_mnomap_args {
3859 struct vnodeop_desc *a_desc;
3860 vnode_t a_vp;
3861 vfs_context_t a_context;
3862};
3863#endif /* 0*/
3864errno_t
2d21ac55 3865VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
91447636
A
3866{
3867 int _err;
3868 struct vnop_mnomap_args a;
b0d623f7 3869#ifndef __LP64__
91447636
A
3870 int thread_safe;
3871 int funnel_state = 0;
b0d623f7 3872#endif /* __LP64__ */
91447636
A
3873
3874 a.a_desc = &vnop_mnomap_desc;
3875 a.a_vp = vp;
2d21ac55 3876 a.a_context = ctx;
91447636 3877
b0d623f7
A
3878#ifndef __LP64__
3879 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3880 if (!thread_safe) {
3881 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3882 return (_err);
3883 }
3884 }
b0d623f7
A
3885#endif /* __LP64__ */
3886
91447636 3887 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
b0d623f7
A
3888
3889#ifndef __LP64__
91447636
A
3890 if (!thread_safe) {
3891 unlock_fsnode(vp, &funnel_state);
3892 }
b0d623f7
A
3893#endif /* __LP64__ */
3894
91447636
A
3895 return (_err);
3896}
3897
3898
3899#if 0
3900/*
3901 *#
3902 *#% fsync vp L L L
3903 *#
3904 */
3905struct vnop_fsync_args {
3906 struct vnodeop_desc *a_desc;
3907 vnode_t a_vp;
3908 int a_waitfor;
3909 vfs_context_t a_context;
3910};
3911#endif /* 0*/
3912errno_t
2d21ac55 3913VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
91447636
A
3914{
3915 struct vnop_fsync_args a;
3916 int _err;
b0d623f7 3917#ifndef __LP64__
91447636
A
3918 int thread_safe;
3919 int funnel_state = 0;
b0d623f7 3920#endif /* __LP64__ */
91447636
A
3921
3922 a.a_desc = &vnop_fsync_desc;
3923 a.a_vp = vp;
3924 a.a_waitfor = waitfor;
2d21ac55 3925 a.a_context = ctx;
91447636 3926
b0d623f7
A
3927#ifndef __LP64__
3928 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3929 if (!thread_safe) {
3930 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3931 return (_err);
3932 }
3933 }
b0d623f7
A
3934#endif /* __LP64__ */
3935
91447636 3936 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
b0d623f7
A
3937
3938#ifndef __LP64__
91447636
A
3939 if (!thread_safe) {
3940 unlock_fsnode(vp, &funnel_state);
3941 }
b0d623f7
A
3942#endif /* __LP64__ */
3943
91447636
A
3944 return (_err);
3945}
3946
3947
3948#if 0
3949/*
3950 *#
3951 *#% remove dvp L U U
3952 *#% remove vp L U U
3953 *#
3954 */
3955struct vnop_remove_args {
3956 struct vnodeop_desc *a_desc;
3957 vnode_t a_dvp;
3958 vnode_t a_vp;
3959 struct componentname *a_cnp;
3960 int a_flags;
3961 vfs_context_t a_context;
3962};
3963#endif /* 0*/
3964errno_t
2d21ac55 3965VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
91447636
A
3966{
3967 int _err;
3968 struct vnop_remove_args a;
b0d623f7 3969#ifndef __LP64__
91447636
A
3970 int thread_safe;
3971 int funnel_state = 0;
b0d623f7 3972#endif /* __LP64__ */
91447636
A
3973
3974 a.a_desc = &vnop_remove_desc;
3975 a.a_dvp = dvp;
3976 a.a_vp = vp;
3977 a.a_cnp = cnp;
3978 a.a_flags = flags;
2d21ac55 3979 a.a_context = ctx;
91447636 3980
b0d623f7
A
3981#ifndef __LP64__
3982 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3983 if (!thread_safe) {
3984 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3985 return (_err);
3986 }
3987 }
b0d623f7
A
3988#endif /* __LP64__ */
3989
91447636
A
3990 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3991
3992 if (_err == 0) {
3993 vnode_setneedinactive(vp);
3994
3995 if ( !(NATIVE_XATTR(dvp)) ) {
3996 /*
2d21ac55 3997 * Remove any associated extended attribute file (._ AppleDouble file).
91447636 3998 */
b0d623f7 3999 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
91447636
A
4000 }
4001 }
b0d623f7
A
4002
4003#ifndef __LP64__
91447636
A
4004 if (!thread_safe) {
4005 unlock_fsnode(vp, &funnel_state);
4006 }
b0d623f7
A
4007#endif /* __LP64__ */
4008
4009 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4010 post_event_if_success(dvp, _err, NOTE_WRITE);
4011
91447636
A
4012 return (_err);
4013}
4014
4015
4016#if 0
4017/*
4018 *#
4019 *#% link vp U U U
4020 *#% link tdvp L U U
4021 *#
4022 */
4023struct vnop_link_args {
4024 struct vnodeop_desc *a_desc;
4025 vnode_t a_vp;
4026 vnode_t a_tdvp;
4027 struct componentname *a_cnp;
4028 vfs_context_t a_context;
4029};
4030#endif /* 0*/
4031errno_t
2d21ac55 4032VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
91447636
A
4033{
4034 int _err;
4035 struct vnop_link_args a;
b0d623f7 4036#ifndef __LP64__
91447636
A
4037 int thread_safe;
4038 int funnel_state = 0;
b0d623f7 4039#endif /* __LP64__ */
91447636
A
4040
4041 /*
4042 * For file systems with non-native extended attributes,
4043 * disallow linking to an existing "._" Apple Double file.
4044 */
4045 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
2d21ac55 4046 const char *vname;
91447636
A
4047
4048 vname = vnode_getname(vp);
4049 if (vname != NULL) {
4050 _err = 0;
4051 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4052 _err = EPERM;
4053 }
4054 vnode_putname(vname);
4055 if (_err)
4056 return (_err);
4057 }
4058 }
4059 a.a_desc = &vnop_link_desc;
4060 a.a_vp = vp;
4061 a.a_tdvp = tdvp;
4062 a.a_cnp = cnp;
2d21ac55 4063 a.a_context = ctx;
91447636 4064
b0d623f7
A
4065#ifndef __LP64__
4066 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4067 if (!thread_safe) {
4068 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4069 return (_err);
4070 }
4071 }
b0d623f7
A
4072#endif /* __LP64__ */
4073
91447636 4074 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
b0d623f7
A
4075
4076#ifndef __LP64__
91447636
A
4077 if (!thread_safe) {
4078 unlock_fsnode(vp, &funnel_state);
4079 }
b0d623f7
A
4080#endif /* __LP64__ */
4081
4082 post_event_if_success(vp, _err, NOTE_LINK);
4083 post_event_if_success(tdvp, _err, NOTE_WRITE);
4084
91447636
A
4085 return (_err);
4086}
4087
4088
4089#if 0
4090/*
4091 *#
4092 *#% rename fdvp U U U
4093 *#% rename fvp U U U
4094 *#% rename tdvp L U U
4095 *#% rename tvp X U U
4096 *#
4097 */
4098struct vnop_rename_args {
4099 struct vnodeop_desc *a_desc;
4100 vnode_t a_fdvp;
4101 vnode_t a_fvp;
4102 struct componentname *a_fcnp;
4103 vnode_t a_tdvp;
4104 vnode_t a_tvp;
4105 struct componentname *a_tcnp;
4106 vfs_context_t a_context;
4107};
4108#endif /* 0*/
4109errno_t
4110VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4111 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
2d21ac55 4112 vfs_context_t ctx)
91447636 4113{
b0d623f7
A
4114 int _err = 0;
4115 int events;
91447636 4116 struct vnop_rename_args a;
91447636
A
4117 char smallname1[48];
4118 char smallname2[48];
4119 char *xfromname = NULL;
4120 char *xtoname = NULL;
b0d623f7
A
4121#ifndef __LP64__
4122 int funnel_state = 0;
91447636
A
4123 vnode_t lock_first = NULL, lock_second = NULL;
4124 vnode_t fdvp_unsafe = NULLVP;
4125 vnode_t tdvp_unsafe = NULLVP;
b0d623f7
A
4126#endif /* __LP64__ */
4127 vnode_t src_attr_vp = NULLVP;
4128 vnode_t dst_attr_vp = NULLVP;
4129 struct nameidata fromnd;
4130 struct nameidata tond;
91447636
A
4131
4132 a.a_desc = &vnop_rename_desc;
4133 a.a_fdvp = fdvp;
4134 a.a_fvp = fvp;
4135 a.a_fcnp = fcnp;
4136 a.a_tdvp = tdvp;
4137 a.a_tvp = tvp;
4138 a.a_tcnp = tcnp;
2d21ac55 4139 a.a_context = ctx;
91447636 4140
b0d623f7 4141#ifndef __LP64__
91447636
A
4142 if (!THREAD_SAFE_FS(fdvp))
4143 fdvp_unsafe = fdvp;
4144 if (!THREAD_SAFE_FS(tdvp))
4145 tdvp_unsafe = tdvp;
4146
4147 if (fdvp_unsafe != NULLVP) {
4148 /*
4149 * Lock parents in vnode address order to avoid deadlocks
4150 * note that it's possible for the fdvp to be unsafe,
4151 * but the tdvp to be safe because tvp could be a directory
4152 * in the root of a filesystem... in that case, tdvp is the
4153 * in the filesystem that this root is mounted on
4154 */
b0d623f7
A
4155 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
4156 lock_first = fdvp_unsafe;
91447636
A
4157 lock_second = NULL;
4158 } else if (fdvp_unsafe < tdvp_unsafe) {
b0d623f7 4159 lock_first = fdvp_unsafe;
91447636
A
4160 lock_second = tdvp_unsafe;
4161 } else {
b0d623f7 4162 lock_first = tdvp_unsafe;
91447636
A
4163 lock_second = fdvp_unsafe;
4164 }
4165 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
b0d623f7 4166 return (_err);
91447636
A
4167
4168 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
b0d623f7 4169 unlock_fsnode(lock_first, &funnel_state);
91447636
A
4170 return (_err);
4171 }
4172
4173 /*
4174 * Lock both children in vnode address order to avoid deadlocks
4175 */
b0d623f7
A
4176 if (tvp == NULL || tvp == fvp) {
4177 lock_first = fvp;
91447636
A
4178 lock_second = NULL;
4179 } else if (fvp < tvp) {
b0d623f7 4180 lock_first = fvp;
91447636
A
4181 lock_second = tvp;
4182 } else {
b0d623f7 4183 lock_first = tvp;
91447636
A
4184 lock_second = fvp;
4185 }
4186 if ( (_err = lock_fsnode(lock_first, NULL)) )
b0d623f7 4187 goto out1;
91447636
A
4188
4189 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
4190 unlock_fsnode(lock_first, NULL);
4191 goto out1;
4192 }
4193 }
b0d623f7
A
4194#endif /* __LP64__ */
4195
91447636 4196 /*
b0d623f7
A
4197 * We need to preflight any potential AppleDouble file for the source file
4198 * before doing the rename operation, since we could potentially be doing
4199 * this operation on a network filesystem, and would end up duplicating
4200 * the work. Also, save the source and destination names. Skip it if the
4201 * source has a "._" prefix.
91447636 4202 */
b0d623f7 4203
91447636
A
4204 if (!NATIVE_XATTR(fdvp) &&
4205 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4206 size_t len;
b0d623f7 4207 int error;
91447636
A
4208
4209 /* Get source attribute file name. */
4210 len = fcnp->cn_namelen + 3;
4211 if (len > sizeof(smallname1)) {
4212 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
4213 } else {
4214 xfromname = &smallname1[0];
4215 }
2d21ac55 4216 strlcpy(xfromname, "._", min(sizeof smallname1, len));
91447636
A
4217 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
4218 xfromname[len-1] = '\0';
4219
4220 /* Get destination attribute file name. */
4221 len = tcnp->cn_namelen + 3;
4222 if (len > sizeof(smallname2)) {
4223 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
4224 } else {
4225 xtoname = &smallname2[0];
4226 }
2d21ac55 4227 strlcpy(xtoname, "._", min(sizeof smallname2, len));
91447636
A
4228 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
4229 xtoname[len-1] = '\0';
b0d623f7
A
4230
4231 /*
4232 * Look up source attribute file, keep reference on it if exists.
4233 * Note that we do the namei with the nameiop of RENAME, which is different than
4234 * in the rename syscall. It's OK if the source file does not exist, since this
4235 * is only for AppleDouble files.
4236 */
4237 if (xfromname != NULL) {
4238 NDINIT(&fromnd, RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4239 CAST_USER_ADDR_T(xfromname), ctx);
4240 fromnd.ni_dvp = fdvp;
4241 error = namei(&fromnd);
4242
4243 /*
4244 * If there was an error looking up source attribute file,
4245 * we'll behave as if it didn't exist.
4246 */
4247
4248 if (error == 0) {
4249 if (fromnd.ni_vp) {
4250 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4251 src_attr_vp = fromnd.ni_vp;
4252
4253 if (fromnd.ni_vp->v_type != VREG) {
4254 src_attr_vp = NULLVP;
4255 vnode_put(fromnd.ni_vp);
4256 }
4257 }
4258 /*
4259 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4260 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4261 * have a vnode here, so we drop our namei buffer for the source attribute file
4262 */
4263 if (src_attr_vp == NULLVP) {
4264 nameidone(&fromnd);
4265 }
4266 }
4267 }
91447636
A
4268 }
4269
b0d623f7
A
4270
4271 /* do the rename of the main file. */
91447636
A
4272 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4273
b0d623f7 4274#ifndef __LP64__
91447636
A
4275 if (fdvp_unsafe != NULLVP) {
4276 if (lock_second != NULL)
4277 unlock_fsnode(lock_second, NULL);
4278 unlock_fsnode(lock_first, NULL);
4279 }
b0d623f7
A
4280#endif /* __LP64__ */
4281
91447636
A
4282 if (_err == 0) {
4283 if (tvp && tvp != fvp)
4284 vnode_setneedinactive(tvp);
4285 }
4286
4287 /*
2d21ac55 4288 * Rename any associated extended attribute file (._ AppleDouble file).
91447636
A
4289 */
4290 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
b0d623f7
A
4291 int error = 0;
4292
91447636 4293 /*
b0d623f7
A
4294 * Get destination attribute file vnode.
4295 * Note that tdvp already has an iocount reference. Make sure to check that we
4296 * get a valid vnode from namei.
91447636 4297 */
b0d623f7
A
4298 NDINIT(&tond, RENAME,
4299 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4300 CAST_USER_ADDR_T(xtoname), ctx);
4301 tond.ni_dvp = tdvp;
4302 error = namei(&tond);
4303
4304 if (error)
4305 goto out;
4306
4307 if (tond.ni_vp) {
4308 dst_attr_vp = tond.ni_vp;
91447636 4309 }
b0d623f7
A
4310
4311 if (src_attr_vp) {
4312 /* attempt to rename src -> dst */
4313
4314 a.a_desc = &vnop_rename_desc;
4315 a.a_fdvp = fdvp;
4316 a.a_fvp = src_attr_vp;
4317 a.a_fcnp = &fromnd.ni_cnd;
4318 a.a_tdvp = tdvp;
4319 a.a_tvp = dst_attr_vp;
4320 a.a_tcnp = &tond.ni_cnd;
4321 a.a_context = ctx;
4322
4323#ifndef __LP64__
4324 if (fdvp_unsafe != NULLVP) {
4325 /*
4326 * Lock in vnode address order to avoid deadlocks
4327 */
4328 if (dst_attr_vp == NULL || dst_attr_vp == src_attr_vp) {
4329 lock_first = src_attr_vp;
4330 lock_second = NULL;
4331 } else if (src_attr_vp < dst_attr_vp) {
4332 lock_first = src_attr_vp;
4333 lock_second = dst_attr_vp;
4334 } else {
4335 lock_first = dst_attr_vp;
4336 lock_second = src_attr_vp;
4337 }
4338 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
4339 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
4340 unlock_fsnode(lock_first, NULL);
4341 }
4342 }
4343#endif /* __LP64__ */
4344 if (error == 0) {
4345 const char *oname;
4346 vnode_t oparent;
4347
4348 /* Save these off so we can later verify them (fix up below) */
4349 oname = src_attr_vp->v_name;
4350 oparent = src_attr_vp->v_parent;
4351
4352 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4353
4354#ifndef __LP64__
4355 if (fdvp_unsafe != NULLVP) {
4356 if (lock_second != NULL)
4357 unlock_fsnode(lock_second, NULL);
4358 unlock_fsnode(lock_first, NULL);
4359 }
4360#endif /* __LP64__ */
4361
4362 if (error == 0) {
4363 vnode_setneedinactive(src_attr_vp);
4364
4365 if (dst_attr_vp && dst_attr_vp != src_attr_vp)
4366 vnode_setneedinactive(dst_attr_vp);
4367 /*
4368 * Fix up name & parent pointers on ._ file
4369 */
4370 if (oname == src_attr_vp->v_name &&
4371 oparent == src_attr_vp->v_parent) {
4372 int update_flags;
4373
4374 update_flags = VNODE_UPDATE_NAME;
91447636 4375
b0d623f7
A
4376 if (fdvp != tdvp)
4377 update_flags |= VNODE_UPDATE_PARENT;
4378
4379 vnode_update_identity(src_attr_vp, tdvp,
4380 tond.ni_cnd.cn_nameptr,
4381 tond.ni_cnd.cn_namelen,
4382 tond.ni_cnd.cn_hash,
4383 update_flags);
4384 }
4385 }
4386 }
4387 /* kevent notifications for moving resource files
4388 * _err is zero if we're here, so no need to notify directories, code
4389 * below will do that. only need to post the rename on the source and
4390 * possibly a delete on the dest
4391 */
4392 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4393 if (dst_attr_vp) {
4394 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4395 }
4396
4397 } else if (dst_attr_vp) {
91447636 4398 /*
b0d623f7
A
4399 * Just delete destination attribute file vnode if it exists, since
4400 * we didn't have a source attribute file.
91447636
A
4401 * Note that tdvp already has an iocount reference.
4402 */
b0d623f7
A
4403
4404 struct vnop_remove_args args;
4405
91447636
A
4406 args.a_desc = &vnop_remove_desc;
4407 args.a_dvp = tdvp;
b0d623f7 4408 args.a_vp = dst_attr_vp;
91447636 4409 args.a_cnp = &tond.ni_cnd;
2d21ac55 4410 args.a_context = ctx;
91447636 4411
b0d623f7 4412#ifndef __LP64__
91447636 4413 if (fdvp_unsafe != NULLVP)
b0d623f7
A
4414 error = lock_fsnode(dst_attr_vp, NULL);
4415#endif /* __LP64__ */
91447636 4416 if (error == 0) {
b0d623f7 4417 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
91447636 4418
b0d623f7 4419#ifndef __LP64__
91447636 4420 if (fdvp_unsafe != NULLVP)
b0d623f7
A
4421 unlock_fsnode(dst_attr_vp, NULL);
4422#endif /* __LP64__ */
91447636
A
4423
4424 if (error == 0)
b0d623f7 4425 vnode_setneedinactive(dst_attr_vp);
91447636 4426 }
2d21ac55 4427
b0d623f7
A
4428 /* kevent notification for deleting the destination's attribute file
4429 * if it existed. Only need to post the delete on the destination, since
4430 * the code below will handle the directories.
4431 */
4432 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
91447636 4433 }
91447636
A
4434 }
4435out:
b0d623f7
A
4436 if (src_attr_vp) {
4437 vnode_put(src_attr_vp);
4438 nameidone(&fromnd);
4439 }
4440 if (dst_attr_vp) {
4441 vnode_put(dst_attr_vp);
4442 nameidone(&tond);
4443 }
4444
91447636
A
4445 if (xfromname && xfromname != &smallname1[0]) {
4446 FREE(xfromname, M_TEMP);
4447 }
4448 if (xtoname && xtoname != &smallname2[0]) {
4449 FREE(xtoname, M_TEMP);
4450 }
b0d623f7
A
4451
4452#ifndef __LP64__
91447636
A
4453out1:
4454 if (fdvp_unsafe != NULLVP) {
4455 if (tdvp_unsafe != NULLVP)
4456 unlock_fsnode(tdvp_unsafe, NULL);
4457 unlock_fsnode(fdvp_unsafe, &funnel_state);
4458 }
b0d623f7
A
4459#endif /* __LP64__ */
4460
4461 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4462 if (0 == _err) {
4463 events = NOTE_WRITE;
4464 if (vnode_isdir(fvp)) {
4465 /* Link count on dir changed only if we are moving a dir and...
4466 * --Moved to new dir, not overwriting there
4467 * --Kept in same dir and DID overwrite
4468 */
4469 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4470 events |= NOTE_LINK;
4471 }
4472 }
4473
4474 lock_vnode_and_post(fdvp, events);
4475 if (fdvp != tdvp) {
4476 lock_vnode_and_post(tdvp, events);
4477 }
4478
4479 /* If you're replacing the target, post a deletion for it */
4480 if (tvp)
4481 {
4482 lock_vnode_and_post(tvp, NOTE_DELETE);
4483 }
4484
4485 lock_vnode_and_post(fvp, NOTE_RENAME);
4486 }
4487
91447636
A
4488 return (_err);
4489}
4490
4491 #if 0
4492/*
4493 *#
4494 *#% mkdir dvp L U U
4495 *#% mkdir vpp - L -
4496 *#
4497 */
4498struct vnop_mkdir_args {
4499 struct vnodeop_desc *a_desc;
4500 vnode_t a_dvp;
4501 vnode_t *a_vpp;
4502 struct componentname *a_cnp;
4503 struct vnode_attr *a_vap;
4504 vfs_context_t a_context;
4505};
4506#endif /* 0*/
4507errno_t
4508VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
2d21ac55 4509 struct vnode_attr *vap, vfs_context_t ctx)
91447636
A
4510{
4511 int _err;
4512 struct vnop_mkdir_args a;
b0d623f7 4513#ifndef __LP64__
91447636
A
4514 int thread_safe;
4515 int funnel_state = 0;
b0d623f7 4516#endif /* __LP64__ */
91447636
A
4517
4518 a.a_desc = &vnop_mkdir_desc;
4519 a.a_dvp = dvp;
4520 a.a_vpp = vpp;
4521 a.a_cnp = cnp;
4522 a.a_vap = vap;
2d21ac55 4523 a.a_context = ctx;
91447636 4524
b0d623f7
A
4525#ifndef __LP64__
4526 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
4527 if (!thread_safe) {
4528 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
4529 return (_err);
4530 }
4531 }
b0d623f7
A
4532#endif /* __LP64__ */
4533
91447636
A
4534 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4535 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4536 /*
4537 * Remove stale Apple Double file (if any).
4538 */
b0d623f7 4539 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 4540 }
b0d623f7
A
4541
4542#ifndef __LP64__
91447636
A
4543 if (!thread_safe) {
4544 unlock_fsnode(dvp, &funnel_state);
4545 }
b0d623f7
A
4546#endif /* __LP64__ */
4547
4548 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4549
91447636
A
4550 return (_err);
4551}
4552
4553
4554#if 0
4555/*
4556 *#
4557 *#% rmdir dvp L U U
4558 *#% rmdir vp L U U
4559 *#
4560 */
4561struct vnop_rmdir_args {
4562 struct vnodeop_desc *a_desc;
4563 vnode_t a_dvp;
4564 vnode_t a_vp;
4565 struct componentname *a_cnp;
4566 vfs_context_t a_context;
4567};
4568
4569#endif /* 0*/
4570errno_t
2d21ac55 4571VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
91447636
A
4572{
4573 int _err;
4574 struct vnop_rmdir_args a;
b0d623f7 4575#ifndef __LP64__
91447636
A
4576 int thread_safe;
4577 int funnel_state = 0;
b0d623f7 4578#endif /* __LP64__ */
91447636
A
4579
4580 a.a_desc = &vnop_rmdir_desc;
4581 a.a_dvp = dvp;
4582 a.a_vp = vp;
4583 a.a_cnp = cnp;
2d21ac55 4584 a.a_context = ctx;
91447636 4585
b0d623f7
A
4586#ifndef __LP64__
4587 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
4588 if (!thread_safe) {
4589 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4590 return (_err);
4591 }
4592 }
b0d623f7
A
4593#endif /* __LP64__ */
4594
91447636
A
4595 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4596
4597 if (_err == 0) {
4598 vnode_setneedinactive(vp);
4599
4600 if ( !(NATIVE_XATTR(dvp)) ) {
4601 /*
2d21ac55 4602 * Remove any associated extended attribute file (._ AppleDouble file).
91447636 4603 */
b0d623f7 4604 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
91447636
A
4605 }
4606 }
b0d623f7
A
4607
4608#ifndef __LP64__
91447636
A
4609 if (!thread_safe) {
4610 unlock_fsnode(vp, &funnel_state);
4611 }
b0d623f7
A
4612#endif /* __LP64__ */
4613
4614 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4615 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4616 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4617
91447636
A
4618 return (_err);
4619}
4620
4621/*
4622 * Remove a ._ AppleDouble file
4623 */
4624#define AD_STALE_SECS (180)
4625static void
b0d623f7
A
4626xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4627{
91447636
A
4628 vnode_t xvp;
4629 struct nameidata nd;
4630 char smallname[64];
4631 char *filename = NULL;
4632 size_t len;
4633
4634 if ((basename == NULL) || (basename[0] == '\0') ||
4635 (basename[0] == '.' && basename[1] == '_')) {
4636 return;
4637 }
4638 filename = &smallname[0];
4639 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4640 if (len >= sizeof(smallname)) {
4641 len++; /* snprintf result doesn't include '\0' */
4642 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4643 len = snprintf(filename, len, "._%s", basename);
4644 }
2d21ac55
A
4645 NDINIT(&nd, DELETE, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4646 CAST_USER_ADDR_T(filename), ctx);
91447636
A
4647 nd.ni_dvp = dvp;
4648 if (namei(&nd) != 0)
4649 goto out2;
4650
4651 xvp = nd.ni_vp;
4652 nameidone(&nd);
4653 if (xvp->v_type != VREG)
4654 goto out1;
4655
4656 /*
4657 * When creating a new object and a "._" file already
4658 * exists, check to see if its a stale "._" file.
4659 *
4660 */
4661 if (!force) {
4662 struct vnode_attr va;
4663
4664 VATTR_INIT(&va);
4665 VATTR_WANTED(&va, va_data_size);
4666 VATTR_WANTED(&va, va_modify_time);
2d21ac55 4667 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
91447636
A
4668 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4669 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4670 va.va_data_size != 0) {
4671 struct timeval tv;
4672
4673 microtime(&tv);
4674 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4675 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4676 force = 1; /* must be stale */
4677 }
4678 }
4679 }
4680 if (force) {
4681 struct vnop_remove_args a;
4682 int error;
b0d623f7
A
4683#ifndef __LP64__
4684 int thread_safe = THREAD_SAFE_FS(dvp);
4685#endif /* __LP64__ */
91447636
A
4686
4687 a.a_desc = &vnop_remove_desc;
4688 a.a_dvp = nd.ni_dvp;
4689 a.a_vp = xvp;
4690 a.a_cnp = &nd.ni_cnd;
2d21ac55 4691 a.a_context = ctx;
91447636 4692
b0d623f7 4693#ifndef __LP64__
91447636
A
4694 if (!thread_safe) {
4695 if ( (lock_fsnode(xvp, NULL)) )
4696 goto out1;
4697 }
b0d623f7
A
4698#endif /* __LP64__ */
4699
91447636
A
4700 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4701
b0d623f7 4702#ifndef __LP64__
91447636
A
4703 if (!thread_safe)
4704 unlock_fsnode(xvp, NULL);
b0d623f7 4705#endif /* __LP64__ */
91447636
A
4706
4707 if (error == 0)
4708 vnode_setneedinactive(xvp);
b0d623f7
A
4709
4710 post_event_if_success(xvp, error, NOTE_DELETE);
4711 post_event_if_success(dvp, error, NOTE_WRITE);
91447636 4712 }
b0d623f7 4713
91447636 4714out1:
2d21ac55 4715 vnode_put(dvp);
91447636
A
4716 vnode_put(xvp);
4717out2:
4718 if (filename && filename != &smallname[0]) {
4719 FREE(filename, M_TEMP);
4720 }
4721}
4722
4723/*
4724 * Shadow uid/gid/mod to a ._ AppleDouble file
4725 */
4726static void
4727xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
b0d623f7
A
4728 vfs_context_t ctx)
4729{
91447636
A
4730 vnode_t xvp;
4731 struct nameidata nd;
4732 char smallname[64];
4733 char *filename = NULL;
4734 size_t len;
4735
4736 if ((dvp == NULLVP) ||
4737 (basename == NULL) || (basename[0] == '\0') ||
4738 (basename[0] == '.' && basename[1] == '_')) {
4739 return;
4740 }
4741 filename = &smallname[0];
4742 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4743 if (len >= sizeof(smallname)) {
4744 len++; /* snprintf result doesn't include '\0' */
4745 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4746 len = snprintf(filename, len, "._%s", basename);
4747 }
4748 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
2d21ac55 4749 CAST_USER_ADDR_T(filename), ctx);
91447636
A
4750 nd.ni_dvp = dvp;
4751 if (namei(&nd) != 0)
4752 goto out2;
4753
4754 xvp = nd.ni_vp;
4755 nameidone(&nd);
4756
4757 if (xvp->v_type == VREG) {
b0d623f7
A
4758#ifndef __LP64__
4759 int thread_safe = THREAD_SAFE_FS(dvp);
4760#endif /* __LP64__ */
91447636
A
4761 struct vnop_setattr_args a;
4762
4763 a.a_desc = &vnop_setattr_desc;
4764 a.a_vp = xvp;
4765 a.a_vap = vap;
2d21ac55 4766 a.a_context = ctx;
91447636 4767
b0d623f7 4768#ifndef __LP64__
91447636
A
4769 if (!thread_safe) {
4770 if ( (lock_fsnode(xvp, NULL)) )
4771 goto out1;
4772 }
b0d623f7
A
4773#endif /* __LP64__ */
4774
91447636 4775 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
b0d623f7
A
4776
4777#ifndef __LP64__
91447636
A
4778 if (!thread_safe) {
4779 unlock_fsnode(xvp, NULL);
4780 }
b0d623f7 4781#endif /* __LP64__ */
91447636 4782 }
b0d623f7
A
4783
4784
4785#ifndef __LP64__
91447636 4786out1:
b0d623f7 4787#endif /* __LP64__ */
91447636 4788 vnode_put(xvp);
b0d623f7 4789
91447636
A
4790out2:
4791 if (filename && filename != &smallname[0]) {
4792 FREE(filename, M_TEMP);
4793 }
4794}
4795
4796 #if 0
4797/*
4798 *#
4799 *#% symlink dvp L U U
4800 *#% symlink vpp - U -
4801 *#
4802 */
4803struct vnop_symlink_args {
4804 struct vnodeop_desc *a_desc;
4805 vnode_t a_dvp;
4806 vnode_t *a_vpp;
4807 struct componentname *a_cnp;
4808 struct vnode_attr *a_vap;
4809 char *a_target;
4810 vfs_context_t a_context;
4811};
4812
4813#endif /* 0*/
4814errno_t
4815VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
2d21ac55 4816 struct vnode_attr *vap, char *target, vfs_context_t ctx)
91447636
A
4817{
4818 int _err;
4819 struct vnop_symlink_args a;
b0d623f7 4820#ifndef __LP64__
91447636
A
4821 int thread_safe;
4822 int funnel_state = 0;
b0d623f7 4823#endif /* __LP64__ */
91447636
A
4824
4825 a.a_desc = &vnop_symlink_desc;
4826 a.a_dvp = dvp;
4827 a.a_vpp = vpp;
4828 a.a_cnp = cnp;
4829 a.a_vap = vap;
4830 a.a_target = target;
2d21ac55 4831 a.a_context = ctx;
91447636 4832
b0d623f7
A
4833#ifndef __LP64__
4834 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
4835 if (!thread_safe) {
4836 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
4837 return (_err);
4838 }
4839 }
b0d623f7
A
4840#endif /* __LP64__ */
4841
91447636
A
4842 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4843 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4844 /*
b0d623f7 4845 * Remove stale Apple Double file (if any). Posts its own knotes
91447636 4846 */
b0d623f7 4847 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 4848 }
b0d623f7
A
4849
4850
4851#ifndef __LP64__
4852 if (!thread_safe) {
4853 unlock_fsnode(dvp, &funnel_state);
4854 }
4855#endif /* __LP64__ */
4856
4857 post_event_if_success(dvp, _err, NOTE_WRITE);
4858
4859 return (_err);
91447636
A
4860}
4861
4862#if 0
4863/*
4864 *#
4865 *#% readdir vp L L L
4866 *#
4867 */
4868struct vnop_readdir_args {
4869 struct vnodeop_desc *a_desc;
4870 vnode_t a_vp;
4871 struct uio *a_uio;
4872 int a_flags;
4873 int *a_eofflag;
4874 int *a_numdirent;
4875 vfs_context_t a_context;
4876};
4877
4878#endif /* 0*/
4879errno_t
4880VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
2d21ac55 4881 int *numdirent, vfs_context_t ctx)
91447636
A
4882{
4883 int _err;
4884 struct vnop_readdir_args a;
b0d623f7 4885#ifndef __LP64__
91447636
A
4886 int thread_safe;
4887 int funnel_state = 0;
b0d623f7 4888#endif /* __LP64__ */
91447636
A
4889
4890 a.a_desc = &vnop_readdir_desc;
4891 a.a_vp = vp;
4892 a.a_uio = uio;
4893 a.a_flags = flags;
4894 a.a_eofflag = eofflag;
4895 a.a_numdirent = numdirent;
2d21ac55 4896 a.a_context = ctx;
b0d623f7 4897#ifndef __LP64__
91447636
A
4898 thread_safe = THREAD_SAFE_FS(vp);
4899
4900 if (!thread_safe) {
4901 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4902 return (_err);
4903 }
4904 }
b0d623f7
A
4905#endif /* __LP64__ */
4906
91447636 4907 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
b0d623f7
A
4908
4909#ifndef __LP64__
91447636
A
4910 if (!thread_safe) {
4911 unlock_fsnode(vp, &funnel_state);
4912 }
b0d623f7 4913#endif /* __LP64__ */
91447636
A
4914 return (_err);
4915}
4916
4917#if 0
4918/*
4919 *#
4920 *#% readdirattr vp L L L
4921 *#
4922 */
4923struct vnop_readdirattr_args {
4924 struct vnodeop_desc *a_desc;
4925 vnode_t a_vp;
4926 struct attrlist *a_alist;
4927 struct uio *a_uio;
b0d623f7
A
4928 uint32_t a_maxcount;
4929 uint32_t a_options;
4930 uint32_t *a_newstate;
91447636 4931 int *a_eofflag;
b0d623f7 4932 uint32_t *a_actualcount;
91447636
A
4933 vfs_context_t a_context;
4934};
4935
4936#endif /* 0*/
4937errno_t
b0d623f7
A
4938VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4939 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
91447636
A
4940{
4941 int _err;
4942 struct vnop_readdirattr_args a;
b0d623f7 4943#ifndef __LP64__
91447636
A
4944 int thread_safe;
4945 int funnel_state = 0;
b0d623f7 4946#endif /* __LP64__ */
91447636
A
4947
4948 a.a_desc = &vnop_readdirattr_desc;
4949 a.a_vp = vp;
4950 a.a_alist = alist;
4951 a.a_uio = uio;
4952 a.a_maxcount = maxcount;
4953 a.a_options = options;
4954 a.a_newstate = newstate;
4955 a.a_eofflag = eofflag;
4956 a.a_actualcount = actualcount;
2d21ac55 4957 a.a_context = ctx;
91447636 4958
b0d623f7
A
4959#ifndef __LP64__
4960 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4961 if (!thread_safe) {
4962 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4963 return (_err);
4964 }
4965 }
b0d623f7
A
4966#endif /* __LP64__ */
4967
91447636 4968 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
b0d623f7
A
4969
4970#ifndef __LP64__
91447636
A
4971 if (!thread_safe) {
4972 unlock_fsnode(vp, &funnel_state);
4973 }
b0d623f7
A
4974#endif /* __LP64__ */
4975
91447636
A
4976 return (_err);
4977}
4978
4979#if 0
4980/*
4981 *#
4982 *#% readlink vp L L L
4983 *#
4984 */
4985struct vnop_readlink_args {
4986 struct vnodeop_desc *a_desc;
4987 vnode_t a_vp;
4988 struct uio *a_uio;
4989 vfs_context_t a_context;
4990};
4991#endif /* 0 */
4992
2d21ac55
A
4993/*
4994 * Returns: 0 Success
4995 * lock_fsnode:ENOENT No such file or directory [only for VFS
4996 * that is not thread safe & vnode is
4997 * currently being/has been terminated]
4998 * <vfs_readlink>:EINVAL
4999 * <vfs_readlink>:???
5000 *
5001 * Note: The return codes from the underlying VFS's readlink routine
5002 * can't be fully enumerated here, since third party VFS authors
5003 * may not limit their error returns to the ones documented here,
5004 * even though this may result in some programs functioning
5005 * incorrectly.
5006 *
5007 * The return codes documented above are those which may currently
5008 * be returned by HFS from hfs_vnop_readlink, not including
5009 * additional error code which may be propagated from underlying
5010 * routines.
5011 */
91447636 5012errno_t
2d21ac55 5013VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
91447636
A
5014{
5015 int _err;
5016 struct vnop_readlink_args a;
b0d623f7 5017#ifndef __LP64__
91447636
A
5018 int thread_safe;
5019 int funnel_state = 0;
b0d623f7 5020#endif /* __LP64__ */
91447636
A
5021
5022 a.a_desc = &vnop_readlink_desc;
5023 a.a_vp = vp;
5024 a.a_uio = uio;
2d21ac55 5025 a.a_context = ctx;
91447636 5026
b0d623f7
A
5027#ifndef __LP64__
5028 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5029 if (!thread_safe) {
5030 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5031 return (_err);
5032 }
5033 }
b0d623f7
A
5034#endif /* __LP64__ */
5035
91447636 5036 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
b0d623f7
A
5037
5038#ifndef __LP64__
91447636
A
5039 if (!thread_safe) {
5040 unlock_fsnode(vp, &funnel_state);
5041 }
b0d623f7
A
5042#endif /* __LP64__ */
5043
91447636
A
5044 return (_err);
5045}
5046
5047#if 0
5048/*
5049 *#
5050 *#% inactive vp L U U
5051 *#
5052 */
5053struct vnop_inactive_args {
5054 struct vnodeop_desc *a_desc;
5055 vnode_t a_vp;
5056 vfs_context_t a_context;
5057};
5058#endif /* 0*/
5059errno_t
2d21ac55 5060VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
91447636
A
5061{
5062 int _err;
5063 struct vnop_inactive_args a;
b0d623f7 5064#ifndef __LP64__
91447636
A
5065 int thread_safe;
5066 int funnel_state = 0;
b0d623f7 5067#endif /* __LP64__ */
91447636
A
5068
5069 a.a_desc = &vnop_inactive_desc;
5070 a.a_vp = vp;
2d21ac55 5071 a.a_context = ctx;
b0d623f7
A
5072
5073#ifndef __LP64__
91447636 5074 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5075 if (!thread_safe) {
5076 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5077 return (_err);
5078 }
5079 }
b0d623f7
A
5080#endif /* __LP64__ */
5081
91447636 5082 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
b0d623f7
A
5083
5084#ifndef __LP64__
91447636
A
5085 if (!thread_safe) {
5086 unlock_fsnode(vp, &funnel_state);
5087 }
b0d623f7 5088#endif /* __LP64__ */
cf7d32b8
A
5089
5090#if NAMEDSTREAMS
b0d623f7
A
5091 /* For file systems that do not support namedstream natively, mark
5092 * the shadow stream file vnode to be recycled as soon as the last
5093 * reference goes away. To avoid re-entering reclaim code, do not
5094 * call recycle on terminating namedstream vnodes.
cf7d32b8
A
5095 */
5096 if (vnode_isnamedstream(vp) &&
b0d623f7
A
5097 (vp->v_parent != NULLVP) &&
5098 vnode_isshadow(vp) &&
5099 ((vp->v_lflag & VL_TERMINATE) == 0)) {
cf7d32b8
A
5100 vnode_recycle(vp);
5101 }
5102#endif
5103
91447636
A
5104 return (_err);
5105}
5106
5107
5108#if 0
5109/*
5110 *#
5111 *#% reclaim vp U U U
5112 *#
5113 */
5114struct vnop_reclaim_args {
5115 struct vnodeop_desc *a_desc;
5116 vnode_t a_vp;
5117 vfs_context_t a_context;
5118};
5119#endif /* 0*/
5120errno_t
2d21ac55 5121VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
91447636
A
5122{
5123 int _err;
5124 struct vnop_reclaim_args a;
b0d623f7 5125#ifndef __LP64__
91447636
A
5126 int thread_safe;
5127 int funnel_state = 0;
b0d623f7 5128#endif /* __LP64__ */
91447636
A
5129
5130 a.a_desc = &vnop_reclaim_desc;
5131 a.a_vp = vp;
2d21ac55 5132 a.a_context = ctx;
91447636 5133
b0d623f7
A
5134#ifndef __LP64__
5135 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5136 if (!thread_safe) {
5137 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5138 }
b0d623f7
A
5139#endif /* __LP64__ */
5140
91447636 5141 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
b0d623f7
A
5142
5143#ifndef __LP64__
91447636
A
5144 if (!thread_safe) {
5145 (void) thread_funnel_set(kernel_flock, funnel_state);
5146 }
b0d623f7
A
5147#endif /* __LP64__ */
5148
91447636
A
5149 return (_err);
5150}
5151
5152
2d21ac55
A
5153/*
5154 * Returns: 0 Success
5155 * lock_fsnode:ENOENT No such file or directory [only for VFS
5156 * that is not thread safe & vnode is
5157 * currently being/has been terminated]
5158 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5159 */
91447636
A
5160#if 0
5161/*
5162 *#
5163 *#% pathconf vp L L L
5164 *#
5165 */
5166struct vnop_pathconf_args {
5167 struct vnodeop_desc *a_desc;
5168 vnode_t a_vp;
5169 int a_name;
b0d623f7 5170 int32_t *a_retval;
91447636
A
5171 vfs_context_t a_context;
5172};
5173#endif /* 0*/
5174errno_t
b0d623f7 5175VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
91447636
A
5176{
5177 int _err;
5178 struct vnop_pathconf_args a;
b0d623f7 5179#ifndef __LP64__
91447636
A
5180 int thread_safe;
5181 int funnel_state = 0;
b0d623f7 5182#endif /* __LP64__ */
91447636
A
5183
5184 a.a_desc = &vnop_pathconf_desc;
5185 a.a_vp = vp;
5186 a.a_name = name;
5187 a.a_retval = retval;
2d21ac55 5188 a.a_context = ctx;
91447636 5189
b0d623f7
A
5190#ifndef __LP64__
5191 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5192 if (!thread_safe) {
5193 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5194 return (_err);
5195 }
5196 }
b0d623f7
A
5197#endif /* __LP64__ */
5198
91447636 5199 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
b0d623f7
A
5200
5201#ifndef __LP64__
91447636
A
5202 if (!thread_safe) {
5203 unlock_fsnode(vp, &funnel_state);
5204 }
b0d623f7
A
5205#endif /* __LP64__ */
5206
91447636
A
5207 return (_err);
5208}
5209
2d21ac55
A
5210/*
5211 * Returns: 0 Success
5212 * err_advlock:ENOTSUP
5213 * lf_advlock:???
5214 * <vnop_advlock_desc>:???
5215 *
5216 * Notes: VFS implementations of advisory locking using calls through
5217 * <vnop_advlock_desc> because lock enforcement does not occur
5218 * locally should try to limit themselves to the return codes
5219 * documented above for lf_advlock and err_advlock.
5220 */
91447636
A
5221#if 0
5222/*
5223 *#
5224 *#% advlock vp U U U
5225 *#
5226 */
5227struct vnop_advlock_args {
5228 struct vnodeop_desc *a_desc;
5229 vnode_t a_vp;
5230 caddr_t a_id;
5231 int a_op;
5232 struct flock *a_fl;
5233 int a_flags;
5234 vfs_context_t a_context;
5235};
5236#endif /* 0*/
5237errno_t
2d21ac55 5238VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx)
91447636
A
5239{
5240 int _err;
5241 struct vnop_advlock_args a;
b0d623f7 5242#ifndef __LP64__
91447636
A
5243 int thread_safe;
5244 int funnel_state = 0;
b0d623f7 5245#endif /* __LP64__ */
91447636
A
5246
5247 a.a_desc = &vnop_advlock_desc;
5248 a.a_vp = vp;
5249 a.a_id = id;
5250 a.a_op = op;
5251 a.a_fl = fl;
5252 a.a_flags = flags;
2d21ac55 5253 a.a_context = ctx;
91447636 5254
b0d623f7
A
5255#ifndef __LP64__
5256 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5257 if (!thread_safe) {
5258 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5259 }
b0d623f7
A
5260#endif /* __LP64__ */
5261
91447636
A
5262 /* Disallow advisory locking on non-seekable vnodes */
5263 if (vnode_isfifo(vp)) {
5264 _err = err_advlock(&a);
5265 } else {
5266 if ((vp->v_flag & VLOCKLOCAL)) {
5267 /* Advisory locking done at this layer */
5268 _err = lf_advlock(&a);
5269 } else {
5270 /* Advisory locking done by underlying filesystem */
5271 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5272 }
5273 }
b0d623f7
A
5274
5275#ifndef __LP64__
91447636
A
5276 if (!thread_safe) {
5277 (void) thread_funnel_set(kernel_flock, funnel_state);
5278 }
b0d623f7
A
5279#endif /* __LP64__ */
5280
91447636
A
5281 return (_err);
5282}
5283
5284
5285
5286#if 0
5287/*
5288 *#
5289 *#% allocate vp L L L
5290 *#
5291 */
5292struct vnop_allocate_args {
5293 struct vnodeop_desc *a_desc;
5294 vnode_t a_vp;
5295 off_t a_length;
5296 u_int32_t a_flags;
5297 off_t *a_bytesallocated;
5298 off_t a_offset;
5299 vfs_context_t a_context;
5300};
5301
5302#endif /* 0*/
5303errno_t
2d21ac55 5304VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
91447636
A
5305{
5306 int _err;
5307 struct vnop_allocate_args a;
b0d623f7 5308#ifndef __LP64__
91447636
A
5309 int thread_safe;
5310 int funnel_state = 0;
b0d623f7 5311#endif /* __LP64__ */
91447636
A
5312
5313 a.a_desc = &vnop_allocate_desc;
5314 a.a_vp = vp;
5315 a.a_length = length;
5316 a.a_flags = flags;
5317 a.a_bytesallocated = bytesallocated;
5318 a.a_offset = offset;
2d21ac55 5319 a.a_context = ctx;
91447636 5320
b0d623f7
A
5321#ifndef __LP64__
5322 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5323 if (!thread_safe) {
5324 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5325 return (_err);
5326 }
5327 }
b0d623f7
A
5328#endif /* __LP64__ */
5329
91447636 5330 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
b0d623f7
A
5331#if CONFIG_FSE
5332 if (_err == 0) {
5333 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5334 }
5335#endif
5336
5337#ifndef __LP64__
91447636
A
5338 if (!thread_safe) {
5339 unlock_fsnode(vp, &funnel_state);
5340 }
b0d623f7
A
5341#endif /* __LP64__ */
5342
91447636
A
5343 return (_err);
5344}
5345
5346#if 0
5347/*
5348 *#
5349 *#% pagein vp = = =
5350 *#
5351 */
5352struct vnop_pagein_args {
5353 struct vnodeop_desc *a_desc;
5354 vnode_t a_vp;
5355 upl_t a_pl;
b0d623f7 5356 upl_offset_t a_pl_offset;
91447636
A
5357 off_t a_f_offset;
5358 size_t a_size;
5359 int a_flags;
5360 vfs_context_t a_context;
5361};
5362#endif /* 0*/
5363errno_t
b0d623f7 5364VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
91447636
A
5365{
5366 int _err;
5367 struct vnop_pagein_args a;
b0d623f7 5368#ifndef __LP64__
91447636
A
5369 int thread_safe;
5370 int funnel_state = 0;
b0d623f7 5371#endif /* __LP64__ */
91447636
A
5372
5373 a.a_desc = &vnop_pagein_desc;
5374 a.a_vp = vp;
5375 a.a_pl = pl;
5376 a.a_pl_offset = pl_offset;
5377 a.a_f_offset = f_offset;
5378 a.a_size = size;
5379 a.a_flags = flags;
2d21ac55 5380 a.a_context = ctx;
91447636 5381
b0d623f7
A
5382#ifndef __LP64__
5383 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5384 if (!thread_safe) {
5385 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5386 }
b0d623f7
A
5387#endif /* __LP64__ */
5388
91447636 5389 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
b0d623f7
A
5390
5391#ifndef __LP64__
91447636
A
5392 if (!thread_safe) {
5393 (void) thread_funnel_set(kernel_flock, funnel_state);
5394 }
b0d623f7
A
5395#endif /* __LP64__ */
5396
91447636
A
5397 return (_err);
5398}
5399
5400#if 0
5401/*
5402 *#
5403 *#% pageout vp = = =
5404 *#
5405 */
5406struct vnop_pageout_args {
5407 struct vnodeop_desc *a_desc;
5408 vnode_t a_vp;
5409 upl_t a_pl;
b0d623f7 5410 upl_offset_t a_pl_offset;
91447636
A
5411 off_t a_f_offset;
5412 size_t a_size;
5413 int a_flags;
5414 vfs_context_t a_context;
5415};
5416
5417#endif /* 0*/
5418errno_t
b0d623f7 5419VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
91447636
A
5420{
5421 int _err;
5422 struct vnop_pageout_args a;
b0d623f7 5423#ifndef __LP64__
91447636
A
5424 int thread_safe;
5425 int funnel_state = 0;
b0d623f7 5426#endif /* __LP64__ */
91447636
A
5427
5428 a.a_desc = &vnop_pageout_desc;
5429 a.a_vp = vp;
5430 a.a_pl = pl;
5431 a.a_pl_offset = pl_offset;
5432 a.a_f_offset = f_offset;
5433 a.a_size = size;
5434 a.a_flags = flags;
2d21ac55 5435 a.a_context = ctx;
91447636 5436
b0d623f7
A
5437#ifndef __LP64__
5438 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5439 if (!thread_safe) {
5440 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5441 }
b0d623f7
A
5442#endif /* __LP64__ */
5443
91447636 5444 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
b0d623f7
A
5445
5446#ifndef __LP64__
91447636
A
5447 if (!thread_safe) {
5448 (void) thread_funnel_set(kernel_flock, funnel_state);
5449 }
b0d623f7
A
5450#endif /* __LP64__ */
5451
5452 post_event_if_success(vp, _err, NOTE_WRITE);
5453
91447636
A
5454 return (_err);
5455}
5456
5457
5458#if 0
5459/*
5460 *#
5461 *#% searchfs vp L L L
5462 *#
5463 */
5464struct vnop_searchfs_args {
5465 struct vnodeop_desc *a_desc;
5466 vnode_t a_vp;
5467 void *a_searchparams1;
5468 void *a_searchparams2;
5469 struct attrlist *a_searchattrs;
b0d623f7 5470 uint32_t a_maxmatches;
91447636
A
5471 struct timeval *a_timelimit;
5472 struct attrlist *a_returnattrs;
b0d623f7
A
5473 uint32_t *a_nummatches;
5474 uint32_t a_scriptcode;
5475 uint32_t a_options;
91447636
A
5476 struct uio *a_uio;
5477 struct searchstate *a_searchstate;
5478 vfs_context_t a_context;
5479};
5480
5481#endif /* 0*/
5482errno_t
b0d623f7 5483VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
91447636
A
5484{
5485 int _err;
5486 struct vnop_searchfs_args a;
b0d623f7 5487#ifndef __LP64__
91447636
A
5488 int thread_safe;
5489 int funnel_state = 0;
b0d623f7 5490#endif /* __LP64__ */
91447636
A
5491
5492 a.a_desc = &vnop_searchfs_desc;
5493 a.a_vp = vp;
5494 a.a_searchparams1 = searchparams1;
5495 a.a_searchparams2 = searchparams2;
5496 a.a_searchattrs = searchattrs;
5497 a.a_maxmatches = maxmatches;
5498 a.a_timelimit = timelimit;
5499 a.a_returnattrs = returnattrs;
5500 a.a_nummatches = nummatches;
5501 a.a_scriptcode = scriptcode;
5502 a.a_options = options;
5503 a.a_uio = uio;
5504 a.a_searchstate = searchstate;
2d21ac55 5505 a.a_context = ctx;
91447636 5506
b0d623f7
A
5507#ifndef __LP64__
5508 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5509 if (!thread_safe) {
5510 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5511 return (_err);
5512 }
5513 }
b0d623f7
A
5514#endif /* __LP64__ */
5515
91447636 5516 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
b0d623f7
A
5517
5518#ifndef __LP64__
91447636
A
5519 if (!thread_safe) {
5520 unlock_fsnode(vp, &funnel_state);
5521 }
b0d623f7
A
5522#endif /* __LP64__ */
5523
91447636
A
5524 return (_err);
5525}
5526
5527#if 0
5528/*
5529 *#
5530 *#% copyfile fvp U U U
5531 *#% copyfile tdvp L U U
5532 *#% copyfile tvp X U U
5533 *#
5534 */
5535struct vnop_copyfile_args {
5536 struct vnodeop_desc *a_desc;
5537 vnode_t a_fvp;
5538 vnode_t a_tdvp;
5539 vnode_t a_tvp;
5540 struct componentname *a_tcnp;
5541 int a_mode;
5542 int a_flags;
5543 vfs_context_t a_context;
5544};
5545#endif /* 0*/
5546errno_t
5547VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
2d21ac55 5548 int mode, int flags, vfs_context_t ctx)
91447636
A
5549{
5550 int _err;
5551 struct vnop_copyfile_args a;
5552 a.a_desc = &vnop_copyfile_desc;
5553 a.a_fvp = fvp;
5554 a.a_tdvp = tdvp;
5555 a.a_tvp = tvp;
5556 a.a_tcnp = tcnp;
5557 a.a_mode = mode;
5558 a.a_flags = flags;
2d21ac55 5559 a.a_context = ctx;
91447636
A
5560 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5561 return (_err);
5562}
5563
91447636 5564errno_t
2d21ac55 5565VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
91447636
A
5566{
5567 struct vnop_getxattr_args a;
5568 int error;
b0d623f7 5569#ifndef __LP64__
91447636
A
5570 int thread_safe;
5571 int funnel_state = 0;
b0d623f7 5572#endif /* __LP64__ */
91447636
A
5573
5574 a.a_desc = &vnop_getxattr_desc;
5575 a.a_vp = vp;
5576 a.a_name = name;
5577 a.a_uio = uio;
5578 a.a_size = size;
5579 a.a_options = options;
2d21ac55 5580 a.a_context = ctx;
91447636 5581
b0d623f7 5582#ifndef __LP64__
91447636
A
5583 thread_safe = THREAD_SAFE_FS(vp);
5584 if (!thread_safe) {
5585 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
5586 return (error);
5587 }
5588 }
b0d623f7
A
5589#endif /* __LP64__ */
5590
91447636 5591 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
b0d623f7
A
5592
5593#ifndef __LP64__
91447636
A
5594 if (!thread_safe) {
5595 unlock_fsnode(vp, &funnel_state);
5596 }
b0d623f7
A
5597#endif /* __LP64__ */
5598
91447636
A
5599 return (error);
5600}
5601
5602errno_t
2d21ac55 5603VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
91447636
A
5604{
5605 struct vnop_setxattr_args a;
5606 int error;
b0d623f7 5607#ifndef __LP64__
91447636
A
5608 int thread_safe;
5609 int funnel_state = 0;
b0d623f7 5610#endif /* __LP64__ */
91447636
A
5611
5612 a.a_desc = &vnop_setxattr_desc;
5613 a.a_vp = vp;
5614 a.a_name = name;
5615 a.a_uio = uio;
5616 a.a_options = options;
2d21ac55 5617 a.a_context = ctx;
91447636 5618
b0d623f7 5619#ifndef __LP64__
91447636
A
5620 thread_safe = THREAD_SAFE_FS(vp);
5621 if (!thread_safe) {
5622 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
5623 return (error);
5624 }
5625 }
b0d623f7
A
5626#endif /* __LP64__ */
5627
91447636 5628 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
b0d623f7
A
5629
5630#ifndef __LP64__
91447636
A
5631 if (!thread_safe) {
5632 unlock_fsnode(vp, &funnel_state);
5633 }
b0d623f7
A
5634#endif /* __LP64__ */
5635
2d21ac55
A
5636 if (error == 0)
5637 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
b0d623f7
A
5638
5639 post_event_if_success(vp, error, NOTE_ATTRIB);
5640
91447636
A
5641 return (error);
5642}
5643
5644errno_t
2d21ac55 5645VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
91447636
A
5646{
5647 struct vnop_removexattr_args a;
5648 int error;
b0d623f7 5649#ifndef __LP64__
91447636
A
5650 int thread_safe;
5651 int funnel_state = 0;
b0d623f7 5652#endif /* __LP64__ */
91447636
A
5653
5654 a.a_desc = &vnop_removexattr_desc;
5655 a.a_vp = vp;
5656 a.a_name = name;
5657 a.a_options = options;
2d21ac55 5658 a.a_context = ctx;
91447636 5659
b0d623f7 5660#ifndef __LP64__
91447636
A
5661 thread_safe = THREAD_SAFE_FS(vp);
5662 if (!thread_safe) {
5663 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
5664 return (error);
5665 }
5666 }
b0d623f7
A
5667#endif /* __LP64__ */
5668
91447636 5669 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
b0d623f7
A
5670
5671#ifndef __LP64__
91447636
A
5672 if (!thread_safe) {
5673 unlock_fsnode(vp, &funnel_state);
5674 }
b0d623f7
A
5675#endif /* __LP64__ */
5676
5677 post_event_if_success(vp, error, NOTE_ATTRIB);
5678
91447636
A
5679 return (error);
5680}
5681
5682errno_t
2d21ac55 5683VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
91447636
A
5684{
5685 struct vnop_listxattr_args a;
5686 int error;
b0d623f7 5687#ifndef __LP64__
91447636
A
5688 int thread_safe;
5689 int funnel_state = 0;
b0d623f7 5690#endif /* __LP64__ */
91447636
A
5691
5692 a.a_desc = &vnop_listxattr_desc;
5693 a.a_vp = vp;
5694 a.a_uio = uio;
5695 a.a_size = size;
5696 a.a_options = options;
2d21ac55 5697 a.a_context = ctx;
91447636 5698
b0d623f7 5699#ifndef __LP64__
91447636
A
5700 thread_safe = THREAD_SAFE_FS(vp);
5701 if (!thread_safe) {
5702 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
5703 return (error);
5704 }
5705 }
b0d623f7
A
5706#endif /* __LP64__ */
5707
91447636 5708 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
b0d623f7
A
5709
5710#ifndef __LP64__
91447636
A
5711 if (!thread_safe) {
5712 unlock_fsnode(vp, &funnel_state);
5713 }
b0d623f7
A
5714#endif /* __LP64__ */
5715
91447636
A
5716 return (error);
5717}
5718
5719
5720#if 0
5721/*
5722 *#
5723 *#% blktooff vp = = =
5724 *#
5725 */
5726struct vnop_blktooff_args {
5727 struct vnodeop_desc *a_desc;
5728 vnode_t a_vp;
5729 daddr64_t a_lblkno;
5730 off_t *a_offset;
5731};
5732#endif /* 0*/
5733errno_t
5734VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5735{
5736 int _err;
5737 struct vnop_blktooff_args a;
b0d623f7 5738#ifndef __LP64__
91447636
A
5739 int thread_safe;
5740 int funnel_state = 0;
b0d623f7 5741#endif /* __LP64__ */
91447636
A
5742
5743 a.a_desc = &vnop_blktooff_desc;
5744 a.a_vp = vp;
5745 a.a_lblkno = lblkno;
5746 a.a_offset = offset;
91447636 5747
b0d623f7
A
5748#ifndef __LP64__
5749 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5750 if (!thread_safe) {
5751 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5752 }
b0d623f7
A
5753#endif /* __LP64__ */
5754
91447636 5755 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
b0d623f7
A
5756
5757#ifndef __LP64__
91447636
A
5758 if (!thread_safe) {
5759 (void) thread_funnel_set(kernel_flock, funnel_state);
5760 }
b0d623f7
A
5761#endif /* __LP64__ */
5762
91447636
A
5763 return (_err);
5764}
5765
5766#if 0
5767/*
5768 *#
5769 *#% offtoblk vp = = =
5770 *#
5771 */
5772struct vnop_offtoblk_args {
5773 struct vnodeop_desc *a_desc;
5774 vnode_t a_vp;
5775 off_t a_offset;
5776 daddr64_t *a_lblkno;
5777};
5778#endif /* 0*/
5779errno_t
5780VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5781{
5782 int _err;
5783 struct vnop_offtoblk_args a;
b0d623f7 5784#ifndef __LP64__
91447636
A
5785 int thread_safe;
5786 int funnel_state = 0;
b0d623f7 5787#endif /* __LP64__ */
91447636
A
5788
5789 a.a_desc = &vnop_offtoblk_desc;
5790 a.a_vp = vp;
5791 a.a_offset = offset;
5792 a.a_lblkno = lblkno;
91447636 5793
b0d623f7
A
5794#ifndef __LP64__
5795 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5796 if (!thread_safe) {
5797 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5798 }
b0d623f7
A
5799#endif /* __LP64__ */
5800
91447636 5801 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
b0d623f7
A
5802
5803#ifndef __LP64__
91447636
A
5804 if (!thread_safe) {
5805 (void) thread_funnel_set(kernel_flock, funnel_state);
5806 }
b0d623f7
A
5807#endif /* __LP64__ */
5808
91447636
A
5809 return (_err);
5810}
5811
5812#if 0
5813/*
5814 *#
5815 *#% blockmap vp L L L
5816 *#
5817 */
5818struct vnop_blockmap_args {
5819 struct vnodeop_desc *a_desc;
5820 vnode_t a_vp;
5821 off_t a_foffset;
5822 size_t a_size;
5823 daddr64_t *a_bpn;
5824 size_t *a_run;
5825 void *a_poff;
5826 int a_flags;
5827 vfs_context_t a_context;
5828};
5829#endif /* 0*/
5830errno_t
2d21ac55 5831VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
91447636
A
5832{
5833 int _err;
5834 struct vnop_blockmap_args a;
b0d623f7 5835#ifndef __LP64__
91447636
A
5836 int thread_safe;
5837 int funnel_state = 0;
b0d623f7 5838#endif /* __LP64__ */
91447636 5839
2d21ac55
A
5840 if (ctx == NULL) {
5841 ctx = vfs_context_current();
91447636
A
5842 }
5843 a.a_desc = &vnop_blockmap_desc;
5844 a.a_vp = vp;
5845 a.a_foffset = foffset;
5846 a.a_size = size;
5847 a.a_bpn = bpn;
5848 a.a_run = run;
5849 a.a_poff = poff;
5850 a.a_flags = flags;
2d21ac55 5851 a.a_context = ctx;
91447636 5852
b0d623f7
A
5853#ifndef __LP64__
5854 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5855 if (!thread_safe) {
5856 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5857 }
b0d623f7
A
5858#endif /* __LP64__ */
5859
91447636 5860 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
b0d623f7
A
5861
5862#ifndef __LP64__
91447636
A
5863 if (!thread_safe) {
5864 (void) thread_funnel_set(kernel_flock, funnel_state);
5865 }
b0d623f7
A
5866#endif /* __LP64__ */
5867
91447636
A
5868 return (_err);
5869}
5870
5871#if 0
5872struct vnop_strategy_args {
5873 struct vnodeop_desc *a_desc;
5874 struct buf *a_bp;
5875};
5876
5877#endif /* 0*/
5878errno_t
5879VNOP_STRATEGY(struct buf *bp)
5880{
5881 int _err;
5882 struct vnop_strategy_args a;
5883 a.a_desc = &vnop_strategy_desc;
5884 a.a_bp = bp;
5885 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5886 return (_err);
5887}
5888
5889#if 0
5890struct vnop_bwrite_args {
5891 struct vnodeop_desc *a_desc;
5892 buf_t a_bp;
5893};
5894#endif /* 0*/
5895errno_t
5896VNOP_BWRITE(struct buf *bp)
5897{
5898 int _err;
5899 struct vnop_bwrite_args a;
5900 a.a_desc = &vnop_bwrite_desc;
5901 a.a_bp = bp;
5902 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5903 return (_err);
5904}
5905
5906#if 0
5907struct vnop_kqfilt_add_args {
5908 struct vnodeop_desc *a_desc;
5909 struct vnode *a_vp;
5910 struct knote *a_kn;
5911 vfs_context_t a_context;
5912};
5913#endif
5914errno_t
2d21ac55 5915VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
91447636
A
5916{
5917 int _err;
5918 struct vnop_kqfilt_add_args a;
b0d623f7 5919#ifndef __LP64__
91447636
A
5920 int thread_safe;
5921 int funnel_state = 0;
b0d623f7 5922#endif /* __LP64__ */
91447636
A
5923
5924 a.a_desc = VDESC(vnop_kqfilt_add);
5925 a.a_vp = vp;
5926 a.a_kn = kn;
2d21ac55 5927 a.a_context = ctx;
91447636 5928
b0d623f7
A
5929#ifndef __LP64__
5930 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5931 if (!thread_safe) {
5932 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5933 return (_err);
5934 }
5935 }
b0d623f7
A
5936#endif /* __LP64__ */
5937
91447636 5938 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
b0d623f7
A
5939
5940#ifndef __LP64__
91447636
A
5941 if (!thread_safe) {
5942 unlock_fsnode(vp, &funnel_state);
5943 }
b0d623f7
A
5944#endif /* __LP64__ */
5945
91447636
A
5946 return(_err);
5947}
5948
5949#if 0
5950struct vnop_kqfilt_remove_args {
5951 struct vnodeop_desc *a_desc;
5952 struct vnode *a_vp;
5953 uintptr_t a_ident;
5954 vfs_context_t a_context;
5955};
5956#endif
5957errno_t
2d21ac55 5958VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
91447636
A
5959{
5960 int _err;
5961 struct vnop_kqfilt_remove_args a;
b0d623f7 5962#ifndef __LP64__
91447636
A
5963 int thread_safe;
5964 int funnel_state = 0;
b0d623f7 5965#endif /* __LP64__ */
91447636
A
5966
5967 a.a_desc = VDESC(vnop_kqfilt_remove);
5968 a.a_vp = vp;
5969 a.a_ident = ident;
2d21ac55 5970 a.a_context = ctx;
91447636 5971
b0d623f7
A
5972#ifndef __LP64__
5973 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5974 if (!thread_safe) {
5975 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5976 return (_err);
5977 }
5978 }
b0d623f7
A
5979#endif /* __LP64__ */
5980
91447636 5981 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
b0d623f7
A
5982
5983#ifndef __LP64__
91447636
A
5984 if (!thread_safe) {
5985 unlock_fsnode(vp, &funnel_state);
5986 }
b0d623f7
A
5987#endif /* __LP64__ */
5988
5989 return(_err);
5990}
5991
5992errno_t
5993VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5994{
5995 int _err;
5996 struct vnop_monitor_args a;
5997#ifndef __LP64__
5998 int thread_safe;
5999 int funnel_state = 0;
6000#endif /* __LP64__ */
6001
6002 a.a_desc = VDESC(vnop_monitor);
6003 a.a_vp = vp;
6004 a.a_events = events;
6005 a.a_flags = flags;
6006 a.a_handle = handle;
6007 a.a_context = ctx;
6008
6009#ifndef __LP64__
6010 thread_safe = THREAD_SAFE_FS(vp);
6011 if (!thread_safe) {
6012 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6013 return (_err);
6014 }
6015 }
6016#endif /* __LP64__ */
6017
6018 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6019
6020#ifndef __LP64__
6021 if (!thread_safe) {
6022 unlock_fsnode(vp, &funnel_state);
6023 }
6024#endif /* __LP64__ */
6025
91447636
A
6026 return(_err);
6027}
6028
2d21ac55
A
6029#if 0
6030struct vnop_setlabel_args {
6031 struct vnodeop_desc *a_desc;
6032 struct vnode *a_vp;
6033 struct label *a_vl;
6034 vfs_context_t a_context;
6035};
6036#endif
6037errno_t
6038VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6039{
6040 int _err;
6041 struct vnop_setlabel_args a;
b0d623f7 6042#ifndef __LP64__
2d21ac55
A
6043 int thread_safe;
6044 int funnel_state = 0;
b0d623f7 6045#endif /* __LP64__ */
2d21ac55
A
6046
6047 a.a_desc = VDESC(vnop_setlabel);
6048 a.a_vp = vp;
6049 a.a_vl = label;
6050 a.a_context = ctx;
2d21ac55 6051
b0d623f7
A
6052#ifndef __LP64__
6053 thread_safe = THREAD_SAFE_FS(vp);
2d21ac55
A
6054 if (!thread_safe) {
6055 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6056 return (_err);
6057 }
6058 }
b0d623f7
A
6059#endif /* __LP64__ */
6060
2d21ac55 6061 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
b0d623f7
A
6062
6063#ifndef __LP64__
2d21ac55
A
6064 if (!thread_safe) {
6065 unlock_fsnode(vp, &funnel_state);
6066 }
b0d623f7
A
6067#endif /* __LP64__ */
6068
2d21ac55
A
6069 return(_err);
6070}
6071
6072
6073#if NAMEDSTREAMS
6074/*
6075 * Get a named streamed
6076 */
6077errno_t
6078VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6079{
6080 struct vnop_getnamedstream_args a;
6081
b0d623f7 6082#ifndef __LP64__
2d21ac55
A
6083 if (!THREAD_SAFE_FS(vp))
6084 return (ENOTSUP);
b0d623f7
A
6085#endif /* __LP64__ */
6086
2d21ac55
A
6087 a.a_desc = &vnop_getnamedstream_desc;
6088 a.a_vp = vp;
6089 a.a_svpp = svpp;
6090 a.a_name = name;
6091 a.a_operation = operation;
6092 a.a_flags = flags;
6093 a.a_context = ctx;
6094
6095 return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6096}
6097
6098/*
6099 * Create a named streamed
6100 */
6101errno_t
6102VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6103{
6104 struct vnop_makenamedstream_args a;
6105
b0d623f7 6106#ifndef __LP64__
2d21ac55
A
6107 if (!THREAD_SAFE_FS(vp))
6108 return (ENOTSUP);
b0d623f7
A
6109#endif /* __LP64__ */
6110
2d21ac55
A
6111 a.a_desc = &vnop_makenamedstream_desc;
6112 a.a_vp = vp;
6113 a.a_svpp = svpp;
6114 a.a_name = name;
6115 a.a_flags = flags;
6116 a.a_context = ctx;
6117
6118 return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6119}
6120
6121
6122/*
6123 * Remove a named streamed
6124 */
6125errno_t
6126VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6127{
6128 struct vnop_removenamedstream_args a;
6129
b0d623f7 6130#ifndef __LP64__
2d21ac55
A
6131 if (!THREAD_SAFE_FS(vp))
6132 return (ENOTSUP);
b0d623f7
A
6133#endif /* __LP64__ */
6134
2d21ac55
A
6135 a.a_desc = &vnop_removenamedstream_desc;
6136 a.a_vp = vp;
6137 a.a_svp = svp;
6138 a.a_name = name;
6139 a.a_flags = flags;
6140 a.a_context = ctx;
6141
6142 return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6143}
6144#endif