]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/kpi_vfs.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
CommitLineData
91447636 1/*
316670eb 2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
2d21ac55
A
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
91447636
A
74
75/*
76 * External virtual filesystem routines
77 */
78
91447636
A
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/proc_internal.h>
83#include <sys/kauth.h>
84#include <sys/mount.h>
85#include <sys/mount_internal.h>
86#include <sys/time.h>
87#include <sys/vnode_internal.h>
88#include <sys/stat.h>
89#include <sys/namei.h>
90#include <sys/ucred.h>
91#include <sys/buf.h>
92#include <sys/errno.h>
93#include <sys/malloc.h>
94#include <sys/domain.h>
95#include <sys/mbuf.h>
96#include <sys/syslog.h>
97#include <sys/ubc.h>
98#include <sys/vm.h>
99#include <sys/sysctl.h>
100#include <sys/filedesc.h>
b0d623f7 101#include <sys/event.h>
91447636
A
102#include <sys/fsevents.h>
103#include <sys/user.h>
104#include <sys/lockf.h>
105#include <sys/xattr.h>
106
107#include <kern/assert.h>
108#include <kern/kalloc.h>
2d21ac55 109#include <kern/task.h>
91447636 110
0c530ab8
A
111#include <libkern/OSByteOrder.h>
112
91447636
A
113#include <miscfs/specfs/specdev.h>
114
115#include <mach/mach_types.h>
116#include <mach/memory_object_types.h>
2d21ac55
A
117#include <mach/task.h>
118
119#if CONFIG_MACF
120#include <security/mac_framework.h>
121#endif
91447636
A
122
123#define ESUCCESS 0
124#undef mount_t
125#undef vnode_t
126
127#define COMPAT_ONLY
128
129
316670eb 130#if CONFIG_VFS_FUNNEL
91447636
A
131#define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
316670eb 133#endif /* CONFIG_VFS_FUNNEL */
91447636
A
134
135#define NATIVE_XATTR(VP) \
2d21ac55 136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
91447636 137
2d21ac55 138static void xattrfile_remove(vnode_t dvp, const char *basename,
b0d623f7 139 vfs_context_t ctx, int force);
2d21ac55 140static void xattrfile_setattr(vnode_t dvp, const char * basename,
b0d623f7 141 struct vnode_attr * vap, vfs_context_t ctx);
91447636 142
b0d623f7
A
143/*
144 * vnode_setneedinactive
145 *
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
148 * via VNOP_INACTIVE.
149 *
150 * Parameters: vnode_t vnode to mark
151 *
152 * Returns: Nothing
153 *
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
157 */
158void
91447636
A
159vnode_setneedinactive(vnode_t vp)
160{
161 cache_purge(vp);
162
2d21ac55 163 vnode_lock_spin(vp);
91447636
A
164 vp->v_lflag |= VL_NEEDINACTIVE;
165 vnode_unlock(vp);
166}
167
168
316670eb 169#if CONFIG_VFS_FUNNEL
91447636
A
170int
171lock_fsnode(vnode_t vp, int *funnel_state)
172{
173 if (funnel_state)
174 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
175
176 if (vp->v_unsafefs) {
177 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
178 vp->v_unsafefs->fsnode_count++;
179 } else {
180 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
181
182 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
183 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
184
185 if (funnel_state)
186 (void) thread_funnel_set(kernel_flock, *funnel_state);
187 return (ENOENT);
188 }
189 vp->v_unsafefs->fsnodeowner = current_thread();
190 vp->v_unsafefs->fsnode_count = 1;
191 }
192 }
193 return (0);
194}
195
196
197void
198unlock_fsnode(vnode_t vp, int *funnel_state)
199{
200 if (vp->v_unsafefs) {
201 if (--vp->v_unsafefs->fsnode_count == 0) {
202 vp->v_unsafefs->fsnodeowner = NULL;
203 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
204 }
205 }
206 if (funnel_state)
207 (void) thread_funnel_set(kernel_flock, *funnel_state);
208}
316670eb 209#endif /* CONFIG_VFS_FUNNEL */
91447636
A
210
211
212
213/* ====================================================================== */
214/* ************ EXTERNAL KERNEL APIS ********************************** */
215/* ====================================================================== */
216
217/*
b0d623f7 218 * implementations of exported VFS operations
91447636
A
219 */
220int
2d21ac55 221VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
91447636
A
222{
223 int error;
316670eb 224#if CONFIG_VFS_FUNNEL
91447636
A
225 int thread_safe;
226 int funnel_state = 0;
316670eb 227#endif /* CONFIG_VFS_FUNNEL */
91447636
A
228
229 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
230 return(ENOTSUP);
231
316670eb 232#if CONFIG_VFS_FUNNEL
b0d623f7 233 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
234 if (!thread_safe) {
235 funnel_state = thread_funnel_set(kernel_flock, TRUE);
236 }
316670eb
A
237#endif /* CONFIG_VFS_FUNNEL */
238
2d21ac55 239 if (vfs_context_is64bit(ctx)) {
91447636 240 if (vfs_64bitready(mp)) {
2d21ac55 241 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
91447636
A
242 }
243 else {
244 error = ENOTSUP;
245 }
246 }
247 else {
2d21ac55 248 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
91447636
A
249 }
250
316670eb 251#if CONFIG_VFS_FUNNEL
91447636
A
252 if (!thread_safe) {
253 (void) thread_funnel_set(kernel_flock, funnel_state);
254 }
316670eb 255#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 256
91447636
A
257 return (error);
258}
259
260int
2d21ac55 261VFS_START(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
262{
263 int error;
316670eb 264#if CONFIG_VFS_FUNNEL
91447636
A
265 int thread_safe;
266 int funnel_state = 0;
316670eb 267#endif /* CONFIG_VFS_FUNNEL */
91447636
A
268
269 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
270 return(ENOTSUP);
271
316670eb 272#if CONFIG_VFS_FUNNEL
b0d623f7 273 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
274 if (!thread_safe) {
275 funnel_state = thread_funnel_set(kernel_flock, TRUE);
276 }
316670eb 277#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 278
2d21ac55 279 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
b0d623f7 280
316670eb 281#if CONFIG_VFS_FUNNEL
91447636
A
282 if (!thread_safe) {
283 (void) thread_funnel_set(kernel_flock, funnel_state);
284 }
316670eb 285#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 286
91447636
A
287 return (error);
288}
289
290int
2d21ac55 291VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
292{
293 int error;
316670eb 294#if CONFIG_VFS_FUNNEL
91447636
A
295 int thread_safe;
296 int funnel_state = 0;
316670eb 297#endif /* CONFIG_VFS_FUNNEL */
91447636
A
298
299 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
300 return(ENOTSUP);
301
316670eb 302#if CONFIG_VFS_FUNNEL
b0d623f7 303 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
304 if (!thread_safe) {
305 funnel_state = thread_funnel_set(kernel_flock, TRUE);
306 }
316670eb 307#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 308
2d21ac55 309 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
b0d623f7 310
316670eb 311#if CONFIG_VFS_FUNNEL
91447636
A
312 if (!thread_safe) {
313 (void) thread_funnel_set(kernel_flock, funnel_state);
314 }
316670eb 315#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 316
91447636
A
317 return (error);
318}
319
2d21ac55
A
320/*
321 * Returns: 0 Success
322 * ENOTSUP Not supported
323 * <vfs_root>:ENOENT
324 * <vfs_root>:???
325 *
326 * Note: The return codes from the underlying VFS's root routine can't
327 * be fully enumerated here, since third party VFS authors may not
328 * limit their error returns to the ones documented here, even
329 * though this may result in some programs functioning incorrectly.
330 *
331 * The return codes documented above are those which may currently
332 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
333 * for a call to hfs_vget on the volume mount poit, not including
334 * additional error codes which may be propagated from underlying
335 * routines called by hfs_vget.
336 */
91447636 337int
2d21ac55 338VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
91447636
A
339{
340 int error;
316670eb 341#if CONFIG_VFS_FUNNEL
91447636
A
342 int thread_safe;
343 int funnel_state = 0;
316670eb 344#endif /* CONFIG_VFS_FUNNEL */
91447636
A
345
346 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
347 return(ENOTSUP);
348
2d21ac55
A
349 if (ctx == NULL) {
350 ctx = vfs_context_current();
91447636 351 }
91447636 352
316670eb 353#if CONFIG_VFS_FUNNEL
b0d623f7 354 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
355 if (!thread_safe) {
356 funnel_state = thread_funnel_set(kernel_flock, TRUE);
357 }
316670eb 358#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 359
2d21ac55 360 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
b0d623f7 361
316670eb 362#if CONFIG_VFS_FUNNEL
91447636
A
363 if (!thread_safe) {
364 (void) thread_funnel_set(kernel_flock, funnel_state);
365 }
316670eb 366#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 367
91447636
A
368 return (error);
369}
370
371int
2d21ac55 372VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
91447636
A
373{
374 int error;
316670eb 375#if CONFIG_VFS_FUNNEL
91447636
A
376 int thread_safe;
377 int funnel_state = 0;
316670eb 378#endif /* CONFIG_VFS_FUNNEL */
91447636
A
379
380 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
381 return(ENOTSUP);
382
316670eb 383#if CONFIG_VFS_FUNNEL
b0d623f7 384 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
385 if (!thread_safe) {
386 funnel_state = thread_funnel_set(kernel_flock, TRUE);
387 }
316670eb 388#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 389
2d21ac55 390 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
b0d623f7 391
316670eb 392#if CONFIG_VFS_FUNNEL
91447636
A
393 if (!thread_safe) {
394 (void) thread_funnel_set(kernel_flock, funnel_state);
395 }
316670eb 396#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 397
91447636
A
398 return (error);
399}
400
401int
2d21ac55 402VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
91447636
A
403{
404 int error;
316670eb 405#if CONFIG_VFS_FUNNEL
91447636
A
406 int thread_safe;
407 int funnel_state = 0;
316670eb 408#endif /* CONFIG_VFS_FUNNEL */
91447636
A
409
410 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
411 return(ENOTSUP);
412
2d21ac55
A
413 if (ctx == NULL) {
414 ctx = vfs_context_current();
91447636 415 }
2d21ac55 416
316670eb 417#if CONFIG_VFS_FUNNEL
b0d623f7 418 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
419 if (!thread_safe) {
420 funnel_state = thread_funnel_set(kernel_flock, TRUE);
421 }
316670eb 422#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 423
2d21ac55 424 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
b0d623f7 425
316670eb 426#if CONFIG_VFS_FUNNEL
91447636
A
427 if (!thread_safe) {
428 (void) thread_funnel_set(kernel_flock, funnel_state);
429 }
316670eb 430#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 431
91447636
A
432 return(error);
433}
434
435int
2d21ac55 436VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
91447636
A
437{
438 int error;
316670eb 439#if CONFIG_VFS_FUNNEL
91447636
A
440 int thread_safe;
441 int funnel_state = 0;
316670eb 442#endif /* CONFIG_VFS_FUNNEL */
91447636
A
443
444 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
445 return(ENOTSUP);
446
2d21ac55
A
447 if (ctx == NULL) {
448 ctx = vfs_context_current();
91447636 449 }
2d21ac55 450
316670eb 451#if CONFIG_VFS_FUNNEL
b0d623f7 452 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
453 if (!thread_safe) {
454 funnel_state = thread_funnel_set(kernel_flock, TRUE);
455 }
316670eb 456#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 457
2d21ac55 458 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
b0d623f7 459
316670eb 460#if CONFIG_VFS_FUNNEL
91447636
A
461 if (!thread_safe) {
462 (void) thread_funnel_set(kernel_flock, funnel_state);
463 }
316670eb 464#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 465
91447636
A
466 return(error);
467}
468
469int
2d21ac55 470VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
91447636
A
471{
472 int error;
316670eb 473#if CONFIG_VFS_FUNNEL
91447636
A
474 int thread_safe;
475 int funnel_state = 0;
316670eb 476#endif /* CONFIG_VFS_FUNNEL */
91447636
A
477
478 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
479 return(ENOTSUP);
480
2d21ac55
A
481 if (ctx == NULL) {
482 ctx = vfs_context_current();
91447636 483 }
91447636 484
316670eb 485#if CONFIG_VFS_FUNNEL
b0d623f7 486 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
487 if (!thread_safe) {
488 funnel_state = thread_funnel_set(kernel_flock, TRUE);
489 }
316670eb 490#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 491
2d21ac55 492 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
b0d623f7 493
316670eb 494#if CONFIG_VFS_FUNNEL
91447636
A
495 if (!thread_safe) {
496 (void) thread_funnel_set(kernel_flock, funnel_state);
497 }
316670eb 498#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 499
91447636
A
500 return(error);
501}
502
503int
2d21ac55 504VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
91447636
A
505{
506 int error;
316670eb 507#if CONFIG_VFS_FUNNEL
91447636
A
508 int thread_safe;
509 int funnel_state = 0;
316670eb 510#endif /* CONFIG_VFS_FUNNEL */
91447636
A
511
512 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
513 return(ENOTSUP);
514
2d21ac55
A
515 if (ctx == NULL) {
516 ctx = vfs_context_current();
91447636 517 }
91447636 518
316670eb 519#if CONFIG_VFS_FUNNEL
b0d623f7 520 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
521 if (!thread_safe) {
522 funnel_state = thread_funnel_set(kernel_flock, TRUE);
523 }
316670eb 524#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 525
2d21ac55 526 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
b0d623f7 527
316670eb 528#if CONFIG_VFS_FUNNEL
91447636
A
529 if (!thread_safe) {
530 (void) thread_funnel_set(kernel_flock, funnel_state);
531 }
316670eb 532#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 533
91447636
A
534 return(error);
535}
536
537int
2d21ac55 538VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
91447636
A
539{
540 int error;
316670eb 541#if CONFIG_VFS_FUNNEL
91447636
A
542 int thread_safe;
543 int funnel_state = 0;
316670eb 544#endif /* CONFIG_VFS_FUNNEL */
91447636
A
545
546 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
547 return(ENOTSUP);
548
2d21ac55
A
549 if (ctx == NULL) {
550 ctx = vfs_context_current();
91447636 551 }
91447636 552
316670eb 553#if CONFIG_VFS_FUNNEL
b0d623f7 554 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
91447636
A
555 if (!thread_safe) {
556 funnel_state = thread_funnel_set(kernel_flock, TRUE);
557 }
316670eb 558#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 559
2d21ac55 560 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
b0d623f7 561
316670eb 562#if CONFIG_VFS_FUNNEL
91447636
A
563 if (!thread_safe) {
564 (void) thread_funnel_set(kernel_flock, funnel_state);
565 }
316670eb 566#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 567
91447636
A
568 return(error);
569}
570
571int
2d21ac55 572VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
91447636
A
573{
574 int error;
316670eb 575#if CONFIG_VFS_FUNNEL
91447636
A
576 int thread_safe;
577 int funnel_state = 0;
316670eb 578#endif /* CONFIG_VFS_FUNNEL */
91447636
A
579
580 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
581 return(ENOTSUP);
582
2d21ac55
A
583 if (ctx == NULL) {
584 ctx = vfs_context_current();
91447636 585 }
91447636 586
316670eb 587#if CONFIG_VFS_FUNNEL
b0d623f7 588 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
589 if (!thread_safe) {
590 funnel_state = thread_funnel_set(kernel_flock, TRUE);
591 }
316670eb 592#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 593
2d21ac55 594 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
b0d623f7 595
316670eb 596#if CONFIG_VFS_FUNNEL
91447636
A
597 if (!thread_safe) {
598 (void) thread_funnel_set(kernel_flock, funnel_state);
599 }
316670eb 600#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 601
91447636
A
602 return(error);
603}
604
605
6d2010ae
A
606/* returns the cached throttle mask for the mount_t */
607uint64_t
608vfs_throttle_mask(mount_t mp)
609{
610 return(mp->mnt_throttle_mask);
611}
612
91447636
A
613/* returns a copy of vfs type name for the mount_t */
614void
615vfs_name(mount_t mp, char * buffer)
616{
617 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
618}
619
620/* returns vfs type number for the mount_t */
621int
622vfs_typenum(mount_t mp)
623{
624 return(mp->mnt_vtable->vfc_typenum);
625}
626
b0d623f7
A
627/* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
628void*
629vfs_mntlabel(mount_t mp)
630{
631 return (void*)mp->mnt_mntlabel;
632}
91447636
A
633
634/* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
635uint64_t
636vfs_flags(mount_t mp)
637{
638 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
639}
640
641/* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
642void
643vfs_setflags(mount_t mp, uint64_t flags)
644{
645 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
646
2d21ac55 647 mount_lock(mp);
91447636 648 mp->mnt_flag |= lflags;
2d21ac55 649 mount_unlock(mp);
91447636
A
650}
651
652/* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
653void
654vfs_clearflags(mount_t mp , uint64_t flags)
655{
656 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
657
2d21ac55 658 mount_lock(mp);
91447636 659 mp->mnt_flag &= ~lflags;
2d21ac55 660 mount_unlock(mp);
91447636
A
661}
662
663/* Is the mount_t ronly and upgrade read/write requested? */
664int
665vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
666{
667 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
668}
669
670
671/* Is the mount_t mounted ronly */
672int
673vfs_isrdonly(mount_t mp)
674{
675 return (mp->mnt_flag & MNT_RDONLY);
676}
677
678/* Is the mount_t mounted for filesystem synchronous writes? */
679int
680vfs_issynchronous(mount_t mp)
681{
682 return (mp->mnt_flag & MNT_SYNCHRONOUS);
683}
684
685/* Is the mount_t mounted read/write? */
686int
687vfs_isrdwr(mount_t mp)
688{
689 return ((mp->mnt_flag & MNT_RDONLY) == 0);
690}
691
692
693/* Is mount_t marked for update (ie MNT_UPDATE) */
694int
695vfs_isupdate(mount_t mp)
696{
697 return (mp->mnt_flag & MNT_UPDATE);
698}
699
700
701/* Is mount_t marked for reload (ie MNT_RELOAD) */
702int
703vfs_isreload(mount_t mp)
704{
705 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
706}
707
b0d623f7 708/* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
91447636
A
709int
710vfs_isforce(mount_t mp)
711{
2d21ac55 712 if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
91447636
A
713 return(1);
714 else
715 return(0);
716}
717
b0d623f7
A
718int
719vfs_isunmount(mount_t mp)
720{
721 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
722 return 1;
723 } else {
724 return 0;
725 }
726}
727
91447636
A
728int
729vfs_64bitready(mount_t mp)
730{
b0d623f7 731 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
91447636
A
732 return(1);
733 else
734 return(0);
735}
736
2d21ac55
A
737
738int
739vfs_authcache_ttl(mount_t mp)
740{
741 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
742 return (mp->mnt_authcache_ttl);
743 else
744 return (CACHED_RIGHT_INFINITE_TTL);
745}
746
747void
748vfs_setauthcache_ttl(mount_t mp, int ttl)
749{
750 mount_lock(mp);
751 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
752 mp->mnt_authcache_ttl = ttl;
753 mount_unlock(mp);
754}
755
756void
757vfs_clearauthcache_ttl(mount_t mp)
758{
759 mount_lock(mp);
760 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
761 /*
762 * back to the default TTL value in case
763 * MNTK_AUTH_OPAQUE is set on this mount
764 */
765 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
766 mount_unlock(mp);
767}
768
769void
770vfs_markdependency(mount_t mp)
771{
772 proc_t p = current_proc();
773 mount_lock(mp);
774 mp->mnt_dependent_process = p;
775 mp->mnt_dependent_pid = proc_pid(p);
776 mount_unlock(mp);
777}
778
779
91447636
A
780int
781vfs_authopaque(mount_t mp)
782{
783 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
784 return(1);
785 else
786 return(0);
787}
788
789int
790vfs_authopaqueaccess(mount_t mp)
791{
792 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
793 return(1);
794 else
795 return(0);
796}
797
798void
799vfs_setauthopaque(mount_t mp)
800{
801 mount_lock(mp);
802 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
803 mount_unlock(mp);
804}
805
806void
807vfs_setauthopaqueaccess(mount_t mp)
808{
809 mount_lock(mp);
810 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
811 mount_unlock(mp);
812}
813
814void
815vfs_clearauthopaque(mount_t mp)
816{
817 mount_lock(mp);
818 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
819 mount_unlock(mp);
820}
821
822void
823vfs_clearauthopaqueaccess(mount_t mp)
824{
825 mount_lock(mp);
826 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
827 mount_unlock(mp);
828}
829
830void
831vfs_setextendedsecurity(mount_t mp)
832{
833 mount_lock(mp);
834 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
835 mount_unlock(mp);
836}
837
838void
839vfs_clearextendedsecurity(mount_t mp)
840{
841 mount_lock(mp);
842 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
843 mount_unlock(mp);
844}
845
846int
847vfs_extendedsecurity(mount_t mp)
848{
849 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
850}
851
852/* returns the max size of short symlink in this mount_t */
853uint32_t
854vfs_maxsymlen(mount_t mp)
855{
856 return(mp->mnt_maxsymlinklen);
857}
858
859/* set max size of short symlink on mount_t */
860void
861vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
862{
863 mp->mnt_maxsymlinklen = symlen;
864}
865
866/* return a pointer to the RO vfs_statfs associated with mount_t */
867struct vfsstatfs *
868vfs_statfs(mount_t mp)
869{
870 return(&mp->mnt_vfsstat);
871}
872
873int
874vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
875{
876 int error;
91447636
A
877
878 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
879 return(error);
880
881 /*
882 * If we have a filesystem create time, use it to default some others.
883 */
884 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
885 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
886 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
887 }
888
889 return(0);
890}
891
892int
893vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
894{
895 int error;
896
897 if (vfs_isrdonly(mp))
898 return EROFS;
899
900 error = VFS_SETATTR(mp, vfa, ctx);
901
902 /*
903 * If we had alternate ways of setting vfs attributes, we'd
904 * fall back here.
905 */
906
907 return error;
908}
909
910/* return the private data handle stored in mount_t */
911void *
912vfs_fsprivate(mount_t mp)
913{
914 return(mp->mnt_data);
915}
916
917/* set the private data handle in mount_t */
918void
919vfs_setfsprivate(mount_t mp, void *mntdata)
920{
2d21ac55 921 mount_lock(mp);
91447636 922 mp->mnt_data = mntdata;
2d21ac55 923 mount_unlock(mp);
91447636
A
924}
925
926
927/*
928 * return the block size of the underlying
929 * device associated with mount_t
930 */
931int
932vfs_devblocksize(mount_t mp) {
933
934 return(mp->mnt_devblocksize);
935}
936
b0d623f7
A
937/*
938 * Returns vnode with an iocount that must be released with vnode_put()
939 */
940vnode_t
941vfs_vnodecovered(mount_t mp)
942{
943 vnode_t vp = mp->mnt_vnodecovered;
944 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
945 return NULL;
946 } else {
947 return vp;
948 }
949}
91447636 950
6d2010ae
A
951/*
952 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
953 * The iocount must be released with vnode_put(). Note that this KPI is subtle
954 * with respect to the validity of using this device vnode for anything substantial
955 * (which is discouraged). If commands are sent to the device driver without
956 * taking proper steps to ensure that the device is still open, chaos may ensue.
957 * Similarly, this routine should only be called if there is some guarantee that
958 * the mount itself is still valid.
959 */
960vnode_t
961vfs_devvp(mount_t mp)
962{
963 vnode_t vp = mp->mnt_devvp;
964
965 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
966 return vp;
967 }
968
969 return NULLVP;
970}
971
91447636
A
972/*
973 * return the io attributes associated with mount_t
974 */
975void
976vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
977{
978 if (mp == NULL) {
979 ioattrp->io_maxreadcnt = MAXPHYS;
980 ioattrp->io_maxwritecnt = MAXPHYS;
981 ioattrp->io_segreadcnt = 32;
982 ioattrp->io_segwritecnt = 32;
983 ioattrp->io_maxsegreadsize = MAXPHYS;
984 ioattrp->io_maxsegwritesize = MAXPHYS;
985 ioattrp->io_devblocksize = DEV_BSIZE;
2d21ac55 986 ioattrp->io_flags = 0;
91447636
A
987 } else {
988 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
989 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
990 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
991 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
992 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
993 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
994 ioattrp->io_devblocksize = mp->mnt_devblocksize;
2d21ac55 995 ioattrp->io_flags = mp->mnt_ioflags;
91447636 996 }
2d21ac55
A
997 ioattrp->io_reserved[0] = NULL;
998 ioattrp->io_reserved[1] = NULL;
91447636
A
999}
1000
1001
1002/*
1003 * set the IO attributes associated with mount_t
1004 */
1005void
1006vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
1007{
1008 if (mp == NULL)
1009 return;
1010 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
1011 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
1012 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
1013 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
1014 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
1015 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
1016 mp->mnt_devblocksize = ioattrp->io_devblocksize;
2d21ac55 1017 mp->mnt_ioflags = ioattrp->io_flags;
91447636
A
1018}
1019
1020/*
1021 * Add a new filesystem into the kernel specified in passed in
1022 * vfstable structure. It fills in the vnode
1023 * dispatch vector that is to be passed to when vnodes are created.
1024 * It returns a handle which is to be used to when the FS is to be removed
1025 */
1026typedef int (*PFI)(void *);
1027extern int vfs_opv_numops;
1028errno_t
1029vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
1030{
91447636
A
1031 struct vfstable *newvfstbl = NULL;
1032 int i,j;
1033 int (***opv_desc_vector_p)(void *);
1034 int (**opv_desc_vector)(void *);
1035 struct vnodeopv_entry_desc *opve_descp;
1036 int desccount;
1037 int descsize;
1038 PFI *descptr;
1039
1040 /*
1041 * This routine is responsible for all the initialization that would
1042 * ordinarily be done as part of the system startup;
1043 */
1044
1045 if (vfe == (struct vfs_fsentry *)0)
1046 return(EINVAL);
1047
1048 desccount = vfe->vfe_vopcnt;
b0d623f7 1049 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
91447636
A
1050 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
1051 return(EINVAL);
1052
316670eb
A
1053#if !CONFIG_VFS_FUNNEL
1054 /* Non-threadsafe filesystems are not supported e.g. on K64 & iOS */
b0d623f7
A
1055 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
1056 return (EINVAL);
1057 }
316670eb 1058#endif /* !CONFIG_VFS_FUNNEL */
91447636
A
1059
1060 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
1061 M_WAITOK);
1062 bzero(newvfstbl, sizeof(struct vfstable));
1063 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
1064 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
1065 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
1066 newvfstbl->vfc_typenum = maxvfsconf++;
1067 else
1068 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
1069
1070 newvfstbl->vfc_refcount = 0;
1071 newvfstbl->vfc_flags = 0;
1072 newvfstbl->vfc_mountroot = NULL;
1073 newvfstbl->vfc_next = NULL;
91447636
A
1074 newvfstbl->vfc_vfsflags = 0;
1075 if (vfe->vfe_flags & VFS_TBL64BITREADY)
b0d623f7
A
1076 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1077 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
1078 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1079 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
1080 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
316670eb 1081#if CONFIG_VFS_FUNNEL
91447636 1082 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
b0d623f7 1083 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
91447636 1084 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
b0d623f7 1085 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
316670eb 1086#endif /* CONFIG_VFS_FUNNEL */
91447636
A
1087 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
1088 newvfstbl->vfc_flags |= MNT_LOCAL;
2d21ac55 1089 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
91447636
A
1090 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1091 else
1092 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
2d21ac55
A
1093
1094 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
1095 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1096 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
1097 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1098 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
1099 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1100 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
1101 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
91447636
A
1102
1103 /*
1104 * Allocate and init the vectors.
1105 * Also handle backwards compatibility.
1106 *
1107 * We allocate one large block to hold all <desccount>
1108 * vnode operation vectors stored contiguously.
1109 */
1110 /* XXX - shouldn't be M_TEMP */
1111
1112 descsize = desccount * vfs_opv_numops * sizeof(PFI);
1113 MALLOC(descptr, PFI *, descsize,
1114 M_TEMP, M_WAITOK);
1115 bzero(descptr, descsize);
1116
1117 newvfstbl->vfc_descptr = descptr;
1118 newvfstbl->vfc_descsize = descsize;
1119
1120
1121 for (i= 0; i< desccount; i++ ) {
1122 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1123 /*
1124 * Fill in the caller's pointer to the start of the i'th vector.
1125 * They'll need to supply it when calling vnode_create.
1126 */
1127 opv_desc_vector = descptr + i * vfs_opv_numops;
1128 *opv_desc_vector_p = opv_desc_vector;
1129
1130 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1131 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1132
1133 /*
1134 * Sanity check: is this operation listed
1135 * in the list of operations? We check this
b0d623f7 1136 * by seeing if its offset is zero. Since
91447636
A
1137 * the default routine should always be listed
1138 * first, it should be the only one with a zero
1139 * offset. Any other operation with a zero
1140 * offset is probably not listed in
1141 * vfs_op_descs, and so is probably an error.
1142 *
1143 * A panic here means the layer programmer
1144 * has committed the all-too common bug
1145 * of adding a new operation to the layer's
1146 * list of vnode operations but
1147 * not adding the operation to the system-wide
1148 * list of supported operations.
1149 */
1150 if (opve_descp->opve_op->vdesc_offset == 0 &&
1151 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
1152 printf("vfs_fsadd: operation %s not listed in %s.\n",
1153 opve_descp->opve_op->vdesc_name,
1154 "vfs_op_descs");
1155 panic("vfs_fsadd: bad operation");
1156 }
1157 /*
1158 * Fill in this entry.
1159 */
1160 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1161 opve_descp->opve_impl;
1162 }
1163
1164
1165 /*
1166 * Finally, go back and replace unfilled routines
1167 * with their default. (Sigh, an O(n^3) algorithm. I
1168 * could make it better, but that'd be work, and n is small.)
1169 */
1170 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1171
1172 /*
1173 * Force every operations vector to have a default routine.
1174 */
1175 opv_desc_vector = *opv_desc_vector_p;
1176 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
1177 panic("vfs_fsadd: operation vector without default routine.");
1178 for (j = 0; j < vfs_opv_numops; j++)
1179 if (opv_desc_vector[j] == NULL)
1180 opv_desc_vector[j] =
1181 opv_desc_vector[VOFFSET(vnop_default)];
1182
1183 } /* end of each vnodeopv_desc parsing */
1184
1185
1186
1187 *handle = vfstable_add(newvfstbl);
1188
1189 if (newvfstbl->vfc_typenum <= maxvfsconf )
1190 maxvfsconf = newvfstbl->vfc_typenum + 1;
91447636 1191
b0d623f7
A
1192 if (newvfstbl->vfc_vfsops->vfs_init) {
1193 struct vfsconf vfsc;
1194 bzero(&vfsc, sizeof(struct vfsconf));
1195 vfsc.vfc_reserved1 = 0;
1196 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1197 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1198 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1199 vfsc.vfc_flags = (*handle)->vfc_flags;
1200 vfsc.vfc_reserved2 = 0;
1201 vfsc.vfc_reserved3 = 0;
1202
1203 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1204 }
91447636
A
1205
1206 FREE(newvfstbl, M_TEMP);
1207
1208 return(0);
1209}
1210
1211/*
1212 * Removes the filesystem from kernel.
1213 * The argument passed in is the handle that was given when
1214 * file system was added
1215 */
1216errno_t
1217vfs_fsremove(vfstable_t handle)
1218{
1219 struct vfstable * vfstbl = (struct vfstable *)handle;
1220 void *old_desc = NULL;
1221 errno_t err;
1222
1223 /* Preflight check for any mounts */
1224 mount_list_lock();
1225 if ( vfstbl->vfc_refcount != 0 ) {
1226 mount_list_unlock();
1227 return EBUSY;
1228 }
91447636
A
1229
1230 /*
1231 * save the old descriptor; the free cannot occur unconditionally,
1232 * since vfstable_del() may fail.
1233 */
1234 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1235 old_desc = vfstbl->vfc_descptr;
1236 }
1237 err = vfstable_del(vfstbl);
1238
b0d623f7
A
1239 mount_list_unlock();
1240
91447636
A
1241 /* free the descriptor if the delete was successful */
1242 if (err == 0 && old_desc) {
1243 FREE(old_desc, M_TEMP);
1244 }
1245
1246 return(err);
1247}
1248
91447636 1249int
2d21ac55 1250vfs_context_pid(vfs_context_t ctx)
91447636 1251{
2d21ac55 1252 return (proc_pid(vfs_context_proc(ctx)));
91447636
A
1253}
1254
1255int
2d21ac55 1256vfs_context_suser(vfs_context_t ctx)
91447636 1257{
2d21ac55 1258 return (suser(ctx->vc_ucred, NULL));
91447636 1259}
2d21ac55
A
1260
1261/*
b0d623f7
A
1262 * Return bit field of signals posted to all threads in the context's process.
1263 *
2d21ac55
A
1264 * XXX Signals should be tied to threads, not processes, for most uses of this
1265 * XXX call.
1266 */
91447636 1267int
2d21ac55 1268vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
91447636 1269{
2d21ac55
A
1270 proc_t p = vfs_context_proc(ctx);
1271 if (p)
1272 return(proc_pendingsignals(p, mask));
91447636
A
1273 return(0);
1274}
1275
1276int
2d21ac55 1277vfs_context_is64bit(vfs_context_t ctx)
91447636 1278{
2d21ac55
A
1279 proc_t proc = vfs_context_proc(ctx);
1280
1281 if (proc)
1282 return(proc_is64bit(proc));
91447636
A
1283 return(0);
1284}
1285
2d21ac55
A
1286
1287/*
1288 * vfs_context_proc
1289 *
1290 * Description: Given a vfs_context_t, return the proc_t associated with it.
1291 *
1292 * Parameters: vfs_context_t The context to use
1293 *
1294 * Returns: proc_t The process for this context
1295 *
1296 * Notes: This function will return the current_proc() if any of the
1297 * following conditions are true:
1298 *
1299 * o The supplied context pointer is NULL
1300 * o There is no Mach thread associated with the context
1301 * o There is no Mach task associated with the Mach thread
1302 * o There is no proc_t associated with the Mach task
1303 * o The proc_t has no per process open file table
1304 * o The proc_t is post-vfork()
1305 *
1306 * This causes this function to return a value matching as
1307 * closely as possible the previous behaviour, while at the
1308 * same time avoiding the task lending that results from vfork()
1309 */
91447636 1310proc_t
2d21ac55
A
1311vfs_context_proc(vfs_context_t ctx)
1312{
1313 proc_t proc = NULL;
1314
1315 if (ctx != NULL && ctx->vc_thread != NULL)
1316 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1317 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1318 proc = NULL;
1319
1320 return(proc == NULL ? current_proc() : proc);
1321}
1322
1323/*
1324 * vfs_context_get_special_port
1325 *
1326 * Description: Return the requested special port from the task associated
1327 * with the given context.
1328 *
1329 * Parameters: vfs_context_t The context to use
1330 * int Index of special port
1331 * ipc_port_t * Pointer to returned port
1332 *
1333 * Returns: kern_return_t see task_get_special_port()
1334 */
1335kern_return_t
1336vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1337{
1338 task_t task = NULL;
1339
1340 if (ctx != NULL && ctx->vc_thread != NULL)
1341 task = get_threadtask(ctx->vc_thread);
1342
1343 return task_get_special_port(task, which, portp);
1344}
1345
1346/*
1347 * vfs_context_set_special_port
1348 *
1349 * Description: Set the requested special port in the task associated
1350 * with the given context.
1351 *
1352 * Parameters: vfs_context_t The context to use
1353 * int Index of special port
1354 * ipc_port_t New special port
1355 *
1356 * Returns: kern_return_t see task_set_special_port()
1357 */
1358kern_return_t
1359vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1360{
1361 task_t task = NULL;
1362
1363 if (ctx != NULL && ctx->vc_thread != NULL)
1364 task = get_threadtask(ctx->vc_thread);
1365
1366 return task_set_special_port(task, which, port);
1367}
1368
1369/*
1370 * vfs_context_thread
1371 *
1372 * Description: Return the Mach thread associated with a vfs_context_t
1373 *
1374 * Parameters: vfs_context_t The context to use
1375 *
1376 * Returns: thread_t The thread for this context, or
1377 * NULL, if there is not one.
1378 *
1379 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1380 * as a result of a static vfs_context_t declaration in a function
1381 * and will result in this function returning NULL.
1382 *
1383 * This is intentional; this function should NOT return the
1384 * current_thread() in this case.
1385 */
1386thread_t
1387vfs_context_thread(vfs_context_t ctx)
91447636 1388{
2d21ac55
A
1389 return(ctx->vc_thread);
1390}
1391
1392
1393/*
1394 * vfs_context_cwd
1395 *
1396 * Description: Returns a reference on the vnode for the current working
1397 * directory for the supplied context
1398 *
1399 * Parameters: vfs_context_t The context to use
1400 *
1401 * Returns: vnode_t The current working directory
1402 * for this context
1403 *
1404 * Notes: The function first attempts to obtain the current directory
1405 * from the thread, and if it is not present there, falls back
1406 * to obtaining it from the process instead. If it can't be
1407 * obtained from either place, we return NULLVP.
1408 */
1409vnode_t
1410vfs_context_cwd(vfs_context_t ctx)
1411{
1412 vnode_t cwd = NULLVP;
1413
1414 if(ctx != NULL && ctx->vc_thread != NULL) {
1415 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1416 proc_t proc;
1417
1418 /*
1419 * Get the cwd from the thread; if there isn't one, get it
1420 * from the process, instead.
1421 */
1422 if ((cwd = uth->uu_cdir) == NULLVP &&
1423 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1424 proc->p_fd != NULL)
1425 cwd = proc->p_fd->fd_cdir;
1426 }
1427
1428 return(cwd);
91447636
A
1429}
1430
b0d623f7
A
1431/*
1432 * vfs_context_create
1433 *
1434 * Description: Allocate and initialize a new context.
1435 *
1436 * Parameters: vfs_context_t: Context to copy, or NULL for new
1437 *
1438 * Returns: Pointer to new context
1439 *
1440 * Notes: Copy cred and thread from argument, if available; else
1441 * initialize with current thread and new cred. Returns
1442 * with a reference held on the credential.
1443 */
91447636 1444vfs_context_t
2d21ac55 1445vfs_context_create(vfs_context_t ctx)
91447636 1446{
2d21ac55 1447 vfs_context_t newcontext;
91447636 1448
2d21ac55 1449 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
91447636
A
1450
1451 if (newcontext) {
0c530ab8 1452 kauth_cred_t safecred;
2d21ac55
A
1453 if (ctx) {
1454 newcontext->vc_thread = ctx->vc_thread;
1455 safecred = ctx->vc_ucred;
91447636 1456 } else {
2d21ac55 1457 newcontext->vc_thread = current_thread();
0c530ab8 1458 safecred = kauth_cred_get();
91447636 1459 }
0c530ab8
A
1460 if (IS_VALID_CRED(safecred))
1461 kauth_cred_ref(safecred);
1462 newcontext->vc_ucred = safecred;
1463 return(newcontext);
91447636 1464 }
2d21ac55
A
1465 return(NULL);
1466}
1467
1468
1469vfs_context_t
1470vfs_context_current(void)
1471{
1472 vfs_context_t ctx = NULL;
1473 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1474
1475 if (ut != NULL ) {
1476 if (ut->uu_context.vc_ucred != NULL) {
1477 ctx = &ut->uu_context;
1478 }
1479 }
1480
1481 return(ctx == NULL ? vfs_context_kernel() : ctx);
1482}
1483
1484
1485/*
1486 * XXX Do not ask
1487 *
1488 * Dangerous hack - adopt the first kernel thread as the current thread, to
1489 * get to the vfs_context_t in the uthread associated with a kernel thread.
1490 * This is used by UDF to make the call into IOCDMediaBSDClient,
1491 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1492 * ioctl() is being called from kernel or user space (and all this because
1493 * we do not pass threads into our ioctl()'s, instead of processes).
1494 *
1495 * This is also used by imageboot_setup(), called early from bsd_init() after
1496 * kernproc has been given a credential.
1497 *
1498 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1499 * of many Mach headers to do the reference directly rather than indirectly;
1500 * we will need to forego this convenience when we reture proc_thread().
1501 */
1502static struct vfs_context kerncontext;
1503vfs_context_t
1504vfs_context_kernel(void)
1505{
1506 if (kerncontext.vc_ucred == NOCRED)
1507 kerncontext.vc_ucred = kernproc->p_ucred;
1508 if (kerncontext.vc_thread == NULL)
1509 kerncontext.vc_thread = proc_thread(kernproc);
1510
1511 return(&kerncontext);
91447636
A
1512}
1513
2d21ac55 1514
91447636 1515int
2d21ac55 1516vfs_context_rele(vfs_context_t ctx)
91447636 1517{
2d21ac55
A
1518 if (ctx) {
1519 if (IS_VALID_CRED(ctx->vc_ucred))
1520 kauth_cred_unref(&ctx->vc_ucred);
1521 kfree(ctx, sizeof(struct vfs_context));
0c530ab8 1522 }
91447636
A
1523 return(0);
1524}
1525
1526
b0d623f7 1527kauth_cred_t
2d21ac55 1528vfs_context_ucred(vfs_context_t ctx)
91447636 1529{
2d21ac55 1530 return (ctx->vc_ucred);
91447636
A
1531}
1532
1533/*
1534 * Return true if the context is owned by the superuser.
1535 */
1536int
2d21ac55 1537vfs_context_issuser(vfs_context_t ctx)
91447636 1538{
2d21ac55 1539 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
91447636
A
1540}
1541
b0d623f7
A
1542/*
1543 * Given a context, for all fields of vfs_context_t which
1544 * are not held with a reference, set those fields to the
1545 * values for the current execution context. Currently, this
1546 * just means the vc_thread.
1547 *
1548 * Returns: 0 for success, nonzero for failure
1549 *
1550 * The intended use is:
1551 * 1. vfs_context_create() gets the caller a context
1552 * 2. vfs_context_bind() sets the unrefcounted data
1553 * 3. vfs_context_rele() releases the context
1554 *
1555 */
1556int
1557vfs_context_bind(vfs_context_t ctx)
1558{
1559 ctx->vc_thread = current_thread();
1560 return 0;
1561}
91447636
A
1562
1563/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1564
1565
1566/*
1567 * Convert between vnode types and inode formats (since POSIX.1
1568 * defines mode word of stat structure in terms of inode formats).
1569 */
1570enum vtype
1571vnode_iftovt(int mode)
1572{
1573 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1574}
1575
1576int
1577vnode_vttoif(enum vtype indx)
1578{
1579 return(vttoif_tab[(int)(indx)]);
1580}
1581
1582int
1583vnode_makeimode(int indx, int mode)
1584{
1585 return (int)(VTTOIF(indx) | (mode));
1586}
1587
1588
1589/*
1590 * vnode manipulation functions.
1591 */
1592
b0d623f7 1593/* returns system root vnode iocount; It should be released using vnode_put() */
91447636
A
1594vnode_t
1595vfs_rootvnode(void)
1596{
1597 int error;
1598
1599 error = vnode_get(rootvnode);
1600 if (error)
1601 return ((vnode_t)0);
1602 else
1603 return rootvnode;
1604}
1605
1606
1607uint32_t
1608vnode_vid(vnode_t vp)
1609{
1610 return ((uint32_t)(vp->v_id));
1611}
1612
91447636
A
1613mount_t
1614vnode_mount(vnode_t vp)
1615{
1616 return (vp->v_mount);
1617}
1618
91447636
A
1619mount_t
1620vnode_mountedhere(vnode_t vp)
1621{
1622 mount_t mp;
1623
1624 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1625 (mp->mnt_vnodecovered == vp))
1626 return (mp);
1627 else
1628 return (mount_t)NULL;
1629}
1630
1631/* returns vnode type of vnode_t */
1632enum vtype
1633vnode_vtype(vnode_t vp)
1634{
1635 return (vp->v_type);
1636}
1637
1638/* returns FS specific node saved in vnode */
1639void *
1640vnode_fsnode(vnode_t vp)
1641{
1642 return (vp->v_data);
1643}
1644
1645void
1646vnode_clearfsnode(vnode_t vp)
1647{
2d21ac55 1648 vp->v_data = NULL;
91447636
A
1649}
1650
1651dev_t
1652vnode_specrdev(vnode_t vp)
1653{
1654 return(vp->v_rdev);
1655}
1656
1657
1658/* Accessor functions */
1659/* is vnode_t a root vnode */
1660int
1661vnode_isvroot(vnode_t vp)
1662{
1663 return ((vp->v_flag & VROOT)? 1 : 0);
1664}
1665
1666/* is vnode_t a system vnode */
1667int
1668vnode_issystem(vnode_t vp)
1669{
1670 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1671}
1672
2d21ac55
A
1673/* is vnode_t a swap file vnode */
1674int
1675vnode_isswap(vnode_t vp)
1676{
1677 return ((vp->v_flag & VSWAP)? 1 : 0);
1678}
1679
b0d623f7
A
1680/* is vnode_t a tty */
1681int
1682vnode_istty(vnode_t vp)
1683{
1684 return ((vp->v_flag & VISTTY) ? 1 : 0);
1685}
1686
91447636
A
1687/* if vnode_t mount operation in progress */
1688int
1689vnode_ismount(vnode_t vp)
1690{
1691 return ((vp->v_flag & VMOUNT)? 1 : 0);
1692}
1693
1694/* is this vnode under recyle now */
1695int
1696vnode_isrecycled(vnode_t vp)
1697{
1698 int ret;
1699
2d21ac55 1700 vnode_lock_spin(vp);
91447636
A
1701 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1702 vnode_unlock(vp);
1703 return(ret);
1704}
1705
b0d623f7
A
1706/* vnode was created by background task requesting rapid aging
1707 and has not since been referenced by a normal task */
1708int
1709vnode_israge(vnode_t vp)
1710{
1711 return ((vp->v_flag & VRAGE)? 1 : 0);
1712}
1713
6d2010ae
A
1714int
1715vnode_needssnapshots(vnode_t vp)
1716{
1717 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1718}
1719
1720
1721/* Check the process/thread to see if we should skip atime updates */
1722int
1723vfs_ctx_skipatime (vfs_context_t ctx) {
1724 struct uthread *ut;
1725 proc_t proc;
1726 thread_t thr;
1727
1728 proc = vfs_context_proc(ctx);
1729 thr = vfs_context_thread (ctx);
1730
1731 /* Validate pointers in case we were invoked via a kernel context */
1732 if (thr && proc) {
1733 ut = get_bsdthread_info (thr);
1734
1735 if (proc->p_lflag & P_LRAGE_VNODES) {
1736 return 1;
1737 }
1738
1739 if (ut) {
1740 if (ut->uu_flag & UT_RAGE_VNODES) {
1741 return 1;
1742 }
1743 }
1744 }
1745 return 0;
1746}
1747
91447636
A
1748/* is vnode_t marked to not keep data cached once it's been consumed */
1749int
1750vnode_isnocache(vnode_t vp)
1751{
1752 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1753}
1754
1755/*
1756 * has sequential readahead been disabled on this vnode
1757 */
1758int
1759vnode_isnoreadahead(vnode_t vp)
1760{
1761 return ((vp->v_flag & VRAOFF)? 1 : 0);
1762}
1763
2d21ac55
A
1764int
1765vnode_is_openevt(vnode_t vp)
1766{
1767 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1768}
1769
91447636
A
1770/* is vnode_t a standard one? */
1771int
1772vnode_isstandard(vnode_t vp)
1773{
1774 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1775}
1776
1777/* don't vflush() if SKIPSYSTEM */
1778int
1779vnode_isnoflush(vnode_t vp)
1780{
1781 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1782}
1783
1784/* is vnode_t a regular file */
1785int
1786vnode_isreg(vnode_t vp)
1787{
1788 return ((vp->v_type == VREG)? 1 : 0);
1789}
1790
1791/* is vnode_t a directory? */
1792int
1793vnode_isdir(vnode_t vp)
1794{
1795 return ((vp->v_type == VDIR)? 1 : 0);
1796}
1797
1798/* is vnode_t a symbolic link ? */
1799int
1800vnode_islnk(vnode_t vp)
1801{
1802 return ((vp->v_type == VLNK)? 1 : 0);
1803}
1804
6d2010ae
A
1805int
1806vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1807{
1808 struct nameidata *ndp = cnp->cn_ndp;
1809
1810 if (ndp == NULL) {
1811 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1812 }
1813
1814 if (vnode_isdir(vp)) {
1815 if (vp->v_mountedhere != NULL) {
1816 goto yes;
1817 }
1818
1819#if CONFIG_TRIGGERS
1820 if (vp->v_resolve) {
1821 goto yes;
1822 }
1823#endif /* CONFIG_TRIGGERS */
1824
1825 }
1826
1827
1828 if (vnode_islnk(vp)) {
1829 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1830 if (cnp->cn_flags & FOLLOW) {
1831 goto yes;
1832 }
1833 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1834 goto yes;
1835 }
1836 }
1837
1838 return 0;
1839
1840yes:
1841 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1842 return EKEEPLOOKING;
1843}
1844
91447636
A
1845/* is vnode_t a fifo ? */
1846int
1847vnode_isfifo(vnode_t vp)
1848{
1849 return ((vp->v_type == VFIFO)? 1 : 0);
1850}
1851
1852/* is vnode_t a block device? */
1853int
1854vnode_isblk(vnode_t vp)
1855{
1856 return ((vp->v_type == VBLK)? 1 : 0);
1857}
1858
b0d623f7
A
1859int
1860vnode_isspec(vnode_t vp)
1861{
1862 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1863}
1864
91447636
A
1865/* is vnode_t a char device? */
1866int
1867vnode_ischr(vnode_t vp)
1868{
1869 return ((vp->v_type == VCHR)? 1 : 0);
1870}
1871
1872/* is vnode_t a socket? */
1873int
1874vnode_issock(vnode_t vp)
1875{
1876 return ((vp->v_type == VSOCK)? 1 : 0);
1877}
1878
b0d623f7
A
1879/* is vnode_t a device with multiple active vnodes referring to it? */
1880int
1881vnode_isaliased(vnode_t vp)
1882{
1883 enum vtype vt = vp->v_type;
1884 if (!((vt == VCHR) || (vt == VBLK))) {
1885 return 0;
1886 } else {
1887 return (vp->v_specflags & SI_ALIASED);
1888 }
1889}
1890
2d21ac55
A
1891/* is vnode_t a named stream? */
1892int
1893vnode_isnamedstream(
1894#if NAMEDSTREAMS
1895 vnode_t vp
1896#else
1897 __unused vnode_t vp
1898#endif
1899 )
1900{
1901#if NAMEDSTREAMS
1902 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1903#else
1904 return (0);
1905#endif
1906}
91447636 1907
b0d623f7 1908int
c910b4d9
A
1909vnode_isshadow(
1910#if NAMEDSTREAMS
b0d623f7 1911 vnode_t vp
c910b4d9 1912#else
b0d623f7 1913 __unused vnode_t vp
c910b4d9 1914#endif
b0d623f7 1915 )
c910b4d9
A
1916{
1917#if NAMEDSTREAMS
b0d623f7 1918 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
c910b4d9 1919#else
b0d623f7 1920 return (0);
c910b4d9
A
1921#endif
1922}
1923
b0d623f7
A
1924/* does vnode have associated named stream vnodes ? */
1925int
1926vnode_hasnamedstreams(
1927#if NAMEDSTREAMS
1928 vnode_t vp
1929#else
1930 __unused vnode_t vp
1931#endif
1932 )
1933{
1934#if NAMEDSTREAMS
1935 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1936#else
1937 return (0);
1938#endif
1939}
91447636
A
1940/* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1941void
1942vnode_setnocache(vnode_t vp)
1943{
2d21ac55 1944 vnode_lock_spin(vp);
91447636
A
1945 vp->v_flag |= VNOCACHE_DATA;
1946 vnode_unlock(vp);
1947}
1948
1949void
1950vnode_clearnocache(vnode_t vp)
1951{
2d21ac55 1952 vnode_lock_spin(vp);
91447636
A
1953 vp->v_flag &= ~VNOCACHE_DATA;
1954 vnode_unlock(vp);
1955}
1956
2d21ac55
A
1957void
1958vnode_set_openevt(vnode_t vp)
1959{
1960 vnode_lock_spin(vp);
1961 vp->v_flag |= VOPENEVT;
1962 vnode_unlock(vp);
1963}
1964
1965void
1966vnode_clear_openevt(vnode_t vp)
1967{
1968 vnode_lock_spin(vp);
1969 vp->v_flag &= ~VOPENEVT;
1970 vnode_unlock(vp);
1971}
1972
1973
91447636
A
1974void
1975vnode_setnoreadahead(vnode_t vp)
1976{
2d21ac55 1977 vnode_lock_spin(vp);
91447636
A
1978 vp->v_flag |= VRAOFF;
1979 vnode_unlock(vp);
1980}
1981
1982void
1983vnode_clearnoreadahead(vnode_t vp)
1984{
2d21ac55 1985 vnode_lock_spin(vp);
91447636
A
1986 vp->v_flag &= ~VRAOFF;
1987 vnode_unlock(vp);
1988}
1989
1990
1991/* mark vnode_t to skip vflush() is SKIPSYSTEM */
1992void
1993vnode_setnoflush(vnode_t vp)
1994{
2d21ac55 1995 vnode_lock_spin(vp);
91447636
A
1996 vp->v_flag |= VNOFLUSH;
1997 vnode_unlock(vp);
1998}
1999
2000void
2001vnode_clearnoflush(vnode_t vp)
2002{
2d21ac55 2003 vnode_lock_spin(vp);
91447636
A
2004 vp->v_flag &= ~VNOFLUSH;
2005 vnode_unlock(vp);
2006}
2007
2008
2009/* is vnode_t a blkdevice and has a FS mounted on it */
2010int
2011vnode_ismountedon(vnode_t vp)
2012{
2013 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
2014}
2015
2016void
2017vnode_setmountedon(vnode_t vp)
2018{
2d21ac55 2019 vnode_lock_spin(vp);
91447636
A
2020 vp->v_specflags |= SI_MOUNTEDON;
2021 vnode_unlock(vp);
2022}
2023
2024void
2025vnode_clearmountedon(vnode_t vp)
2026{
2d21ac55 2027 vnode_lock_spin(vp);
91447636
A
2028 vp->v_specflags &= ~SI_MOUNTEDON;
2029 vnode_unlock(vp);
2030}
2031
2032
2033void
2034vnode_settag(vnode_t vp, int tag)
2035{
2036 vp->v_tag = tag;
2037
2038}
2039
2040int
2041vnode_tag(vnode_t vp)
2042{
2043 return(vp->v_tag);
2044}
2045
2046vnode_t
2047vnode_parent(vnode_t vp)
2048{
2049
2050 return(vp->v_parent);
2051}
2052
2053void
2054vnode_setparent(vnode_t vp, vnode_t dvp)
2055{
2056 vp->v_parent = dvp;
2057}
2058
2d21ac55 2059const char *
91447636
A
2060vnode_name(vnode_t vp)
2061{
2062 /* we try to keep v_name a reasonable name for the node */
2063 return(vp->v_name);
2064}
2065
2066void
2067vnode_setname(vnode_t vp, char * name)
2068{
2069 vp->v_name = name;
2070}
2071
2072/* return the registered FS name when adding the FS to kernel */
2073void
2074vnode_vfsname(vnode_t vp, char * buf)
2075{
2076 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2077}
2078
2079/* return the FS type number */
2080int
2081vnode_vfstypenum(vnode_t vp)
2082{
2083 return(vp->v_mount->mnt_vtable->vfc_typenum);
2084}
2085
2086int
2087vnode_vfs64bitready(vnode_t vp)
2088{
2089
b0d623f7
A
2090 /*
2091 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2092 */
2093 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
91447636
A
2094 return(1);
2095 else
2096 return(0);
2097}
2098
2099
2100
2101/* return the visible flags on associated mount point of vnode_t */
2102uint32_t
2103vnode_vfsvisflags(vnode_t vp)
2104{
2105 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
2106}
2107
2108/* return the command modifier flags on associated mount point of vnode_t */
2109uint32_t
2110vnode_vfscmdflags(vnode_t vp)
2111{
2112 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
2113}
2114
2115/* return the max symlink of short links of vnode_t */
2116uint32_t
2117vnode_vfsmaxsymlen(vnode_t vp)
2118{
2119 return(vp->v_mount->mnt_maxsymlinklen);
2120}
2121
2122/* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2123struct vfsstatfs *
2124vnode_vfsstatfs(vnode_t vp)
2125{
2126 return(&vp->v_mount->mnt_vfsstat);
2127}
2128
2129/* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2130void *
2131vnode_vfsfsprivate(vnode_t vp)
2132{
2133 return(vp->v_mount->mnt_data);
2134}
2135
2136/* is vnode_t in a rdonly mounted FS */
2137int
2138vnode_vfsisrdonly(vnode_t vp)
2139{
2140 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
2141}
2142
6d2010ae
A
2143int
2144vnode_compound_rename_available(vnode_t vp)
2145{
2146 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2147}
2148int
2149vnode_compound_rmdir_available(vnode_t vp)
2150{
2151 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2152}
2153int
2154vnode_compound_mkdir_available(vnode_t vp)
2155{
2156 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2157}
2158int
2159vnode_compound_remove_available(vnode_t vp)
2160{
2161 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2162}
2163int
2164vnode_compound_open_available(vnode_t vp)
2165{
2166 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2167}
2168
2169int
2170vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2171{
2172 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
2173}
91447636 2174
2d21ac55
A
2175/*
2176 * Returns vnode ref to current working directory; if a per-thread current
2177 * working directory is in effect, return that instead of the per process one.
2178 *
2179 * XXX Published, but not used.
2180 */
91447636
A
2181vnode_t
2182current_workingdir(void)
2183{
2d21ac55 2184 return vfs_context_cwd(vfs_context_current());
91447636
A
2185}
2186
2187/* returns vnode ref to current root(chroot) directory */
2188vnode_t
2189current_rootdir(void)
2190{
2d21ac55 2191 proc_t proc = current_proc();
91447636
A
2192 struct vnode * vp ;
2193
2d21ac55 2194 if ( (vp = proc->p_fd->fd_rdir) ) {
91447636
A
2195 if ( (vnode_getwithref(vp)) )
2196 return (NULL);
2197 }
2198 return vp;
2199}
2200
0c530ab8
A
2201/*
2202 * Get a filesec and optional acl contents from an extended attribute.
2203 * Function will attempt to retrive ACL, UUID, and GUID information using a
2204 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2205 *
2206 * Parameters: vp The vnode on which to operate.
2207 * fsecp The filesec (and ACL, if any) being
2208 * retrieved.
2209 * ctx The vnode context in which the
2210 * operation is to be attempted.
2211 *
2212 * Returns: 0 Success
2213 * !0 errno value
2214 *
2215 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2216 * host byte order, as will be the ACL contents, if any.
2217 * Internally, we will cannonize these values from network (PPC)
2218 * byte order after we retrieve them so that the on-disk contents
2219 * of the extended attribute are identical for both PPC and Intel
2220 * (if we were not being required to provide this service via
2221 * fallback, this would be the job of the filesystem
2222 * 'VNOP_GETATTR' call).
2223 *
2224 * We use ntohl() because it has a transitive property on Intel
2225 * machines and no effect on PPC mancines. This guarantees us
2226 *
2227 * XXX: Deleting rather than ignoreing a corrupt security structure is
2228 * probably the only way to reset it without assistance from an
2229 * file system integrity checking tool. Right now we ignore it.
2230 *
2231 * XXX: We should enummerate the possible errno values here, and where
2232 * in the code they originated.
2233 */
91447636
A
2234static int
2235vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2236{
2237 kauth_filesec_t fsec;
2238 uio_t fsec_uio;
2239 size_t fsec_size;
2240 size_t xsize, rsize;
2241 int error;
0c530ab8
A
2242 uint32_t host_fsec_magic;
2243 uint32_t host_acl_entrycount;
91447636
A
2244
2245 fsec = NULL;
2246 fsec_uio = NULL;
2247 error = 0;
2248
2249 /* find out how big the EA is */
2250 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
2251 /* no EA, no filesec */
2252 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2253 error = 0;
2254 /* either way, we are done */
2255 goto out;
2256 }
0c530ab8
A
2257
2258 /*
2259 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2260 * ACE entrly ACL, and if it's larger than that, it must have the right
2261 * number of bytes such that it contains an atomic number of ACEs,
2262 * rather than partial entries. Otherwise, we ignore it.
2263 */
2264 if (!KAUTH_FILESEC_VALID(xsize)) {
2265 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2266 error = 0;
2267 goto out;
2268 }
91447636
A
2269
2270 /* how many entries would fit? */
2271 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2272
2273 /* get buffer and uio */
2274 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2275 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2276 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2277 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2278 error = ENOMEM;
2279 goto out;
2280 }
2281
2282 /* read security attribute */
2283 rsize = xsize;
2284 if ((error = vn_getxattr(vp,
2285 KAUTH_FILESEC_XATTR,
2286 fsec_uio,
2287 &rsize,
2288 XATTR_NOSECURITY,
2289 ctx)) != 0) {
2290
2291 /* no attribute - no security data */
2292 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2293 error = 0;
2294 /* either way, we are done */
2295 goto out;
2296 }
2297
2298 /*
0c530ab8
A
2299 * Validate security structure; the validation must take place in host
2300 * byte order. If it's corrupt, we will just ignore it.
91447636 2301 */
0c530ab8
A
2302
2303 /* Validate the size before trying to convert it */
91447636
A
2304 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2305 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2306 goto out;
2307 }
0c530ab8
A
2308
2309 /* Validate the magic number before trying to convert it */
2310 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2311 if (fsec->fsec_magic != host_fsec_magic) {
2312 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
6601e61a
A
2313 goto out;
2314 }
0c530ab8
A
2315
2316 /* Validate the entry count before trying to convert it. */
2317 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2318 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2319 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2320 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2321 goto out;
2322 }
2323 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2324 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2325 goto out;
2326 }
91447636 2327 }
4452a7af 2328
0c530ab8
A
2329 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2330
91447636
A
2331 *fsecp = fsec;
2332 fsec = NULL;
2333 error = 0;
2334out:
2335 if (fsec != NULL)
2336 kauth_filesec_free(fsec);
2337 if (fsec_uio != NULL)
2338 uio_free(fsec_uio);
2339 if (error)
2340 *fsecp = NULL;
2341 return(error);
2342}
2343
0c530ab8
A
2344/*
2345 * Set a filesec and optional acl contents into an extended attribute.
2346 * function will attempt to store ACL, UUID, and GUID information using a
2347 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2348 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2349 * original caller supplied an acl.
2350 *
2351 * Parameters: vp The vnode on which to operate.
2352 * fsec The filesec being set.
2353 * acl The acl to be associated with 'fsec'.
2354 * ctx The vnode context in which the
2355 * operation is to be attempted.
2356 *
2357 * Returns: 0 Success
2358 * !0 errno value
2359 *
2360 * Notes: Both the fsec and the acl are always valid.
2361 *
2362 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2363 * as are the acl contents, if they are used. Internally, we will
2364 * cannonize these values into network (PPC) byte order before we
2365 * attempt to write them so that the on-disk contents of the
2366 * extended attribute are identical for both PPC and Intel (if we
2367 * were not being required to provide this service via fallback,
2368 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2369 * We reverse this process on the way out, so we leave with the
2370 * same byte order we started with.
2371 *
2372 * XXX: We should enummerate the possible errno values here, and where
2373 * in the code they originated.
2374 */
91447636
A
2375static int
2376vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2377{
0c530ab8
A
2378 uio_t fsec_uio;
2379 int error;
0c530ab8 2380 uint32_t saved_acl_copysize;
91447636
A
2381
2382 fsec_uio = NULL;
2383
2384 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2385 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2386 error = ENOMEM;
2387 goto out;
2388 }
0c530ab8
A
2389 /*
2390 * Save the pre-converted ACL copysize, because it gets swapped too
2391 * if we are running with the wrong endianness.
2392 */
2393 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2394
2395 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2396
b0d623f7 2397 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
0c530ab8 2398 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
91447636
A
2399 error = vn_setxattr(vp,
2400 KAUTH_FILESEC_XATTR,
2401 fsec_uio,
2402 XATTR_NOSECURITY, /* we have auth'ed already */
2403 ctx);
2404 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2405
0c530ab8
A
2406 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2407
91447636
A
2408out:
2409 if (fsec_uio != NULL)
2410 uio_free(fsec_uio);
2411 return(error);
2412}
2413
2414
2d21ac55
A
2415/*
2416 * Returns: 0 Success
2417 * ENOMEM Not enough space [only if has filesec]
2418 * VNOP_GETATTR: ???
2419 * vnode_get_filesec: ???
2420 * kauth_cred_guid2uid: ???
2421 * kauth_cred_guid2gid: ???
2422 * vfs_update_vfsstat: ???
2423 */
91447636
A
2424int
2425vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2426{
2427 kauth_filesec_t fsec;
2428 kauth_acl_t facl;
2429 int error;
2430 uid_t nuid;
2431 gid_t ngid;
2432
2433 /* don't ask for extended security data if the filesystem doesn't support it */
2434 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2435 VATTR_CLEAR_ACTIVE(vap, va_acl);
2436 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2437 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2438 }
2439
2440 /*
2441 * If the caller wants size values we might have to synthesise, give the
2442 * filesystem the opportunity to supply better intermediate results.
2443 */
2444 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2445 VATTR_IS_ACTIVE(vap, va_total_size) ||
2446 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2447 VATTR_SET_ACTIVE(vap, va_data_size);
2448 VATTR_SET_ACTIVE(vap, va_data_alloc);
2449 VATTR_SET_ACTIVE(vap, va_total_size);
2450 VATTR_SET_ACTIVE(vap, va_total_alloc);
2451 }
2452
2453 error = VNOP_GETATTR(vp, vap, ctx);
2454 if (error) {
2455 KAUTH_DEBUG("ERROR - returning %d", error);
2456 goto out;
2457 }
2458
2459 /*
2460 * If extended security data was requested but not returned, try the fallback
2461 * path.
2462 */
2463 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2464 fsec = NULL;
2465
2466 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
2467 /* try to get the filesec */
2468 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2469 goto out;
2470 }
2471 /* if no filesec, no attributes */
2472 if (fsec == NULL) {
2473 VATTR_RETURN(vap, va_acl, NULL);
2474 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2475 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2476 } else {
2477
2478 /* looks good, try to return what we were asked for */
2479 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2480 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2481
2482 /* only return the ACL if we were actually asked for it */
2483 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2484 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2485 VATTR_RETURN(vap, va_acl, NULL);
2486 } else {
2487 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2488 if (facl == NULL) {
2489 kauth_filesec_free(fsec);
2490 error = ENOMEM;
2491 goto out;
2492 }
2493 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2494 VATTR_RETURN(vap, va_acl, facl);
2495 }
2496 }
2497 kauth_filesec_free(fsec);
2498 }
2499 }
2500 /*
2501 * If someone gave us an unsolicited filesec, toss it. We promise that
2502 * we're OK with a filesystem giving us anything back, but our callers
2503 * only expect what they asked for.
2504 */
2505 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2506 if (vap->va_acl != NULL)
2507 kauth_acl_free(vap->va_acl);
2508 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2509 }
2510
2511#if 0 /* enable when we have a filesystem only supporting UUIDs */
2512 /*
2513 * Handle the case where we need a UID/GID, but only have extended
2514 * security information.
2515 */
2516 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2517 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2518 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2519 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2520 VATTR_RETURN(vap, va_uid, nuid);
2521 }
2522 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2523 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2524 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2525 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2526 VATTR_RETURN(vap, va_gid, ngid);
2527 }
2528#endif
2529
2530 /*
2531 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2532 */
2533 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2d21ac55
A
2534 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2535 nuid = vap->va_uid;
2536 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
91447636
A
2537 nuid = vp->v_mount->mnt_fsowner;
2538 if (nuid == KAUTH_UID_NONE)
2539 nuid = 99;
2540 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2541 nuid = vap->va_uid;
2542 } else {
2543 /* this will always be something sensible */
2544 nuid = vp->v_mount->mnt_fsowner;
2545 }
2546 if ((nuid == 99) && !vfs_context_issuser(ctx))
2547 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2548 VATTR_RETURN(vap, va_uid, nuid);
2549 }
2550 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2d21ac55
A
2551 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2552 ngid = vap->va_gid;
2553 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
91447636
A
2554 ngid = vp->v_mount->mnt_fsgroup;
2555 if (ngid == KAUTH_GID_NONE)
2556 ngid = 99;
2557 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2558 ngid = vap->va_gid;
2559 } else {
2560 /* this will always be something sensible */
2561 ngid = vp->v_mount->mnt_fsgroup;
2562 }
2563 if ((ngid == 99) && !vfs_context_issuser(ctx))
2564 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2565 VATTR_RETURN(vap, va_gid, ngid);
2566 }
2567
2568 /*
2569 * Synthesise some values that can be reasonably guessed.
2570 */
2571 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2572 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2573
2574 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2575 VATTR_RETURN(vap, va_flags, 0);
2576
2577 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2578 VATTR_RETURN(vap, va_filerev, 0);
2579
2580 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2581 VATTR_RETURN(vap, va_gen, 0);
2582
2583 /*
2584 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2585 */
2586 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2587 VATTR_RETURN(vap, va_data_size, 0);
2588
2589 /* do we want any of the possibly-computed values? */
2590 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2591 VATTR_IS_ACTIVE(vap, va_total_size) ||
2592 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2593 /* make sure f_bsize is valid */
2594 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2d21ac55 2595 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
91447636
A
2596 goto out;
2597 }
2598
2599 /* default va_data_alloc from va_data_size */
2600 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2601 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2602
2603 /* default va_total_size from va_data_size */
2604 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2605 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2606
2607 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2608 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2609 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2610 }
2611
2612 /*
2613 * If we don't have a change time, pull it from the modtime.
2614 */
2615 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2616 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2617
2618 /*
2619 * This is really only supported for the creation VNOPs, but since the field is there
2620 * we should populate it correctly.
2621 */
2622 VATTR_RETURN(vap, va_type, vp->v_type);
2623
2624 /*
2625 * The fsid can be obtained from the mountpoint directly.
2626 */
2627 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2628
2629out:
2630
2631 return(error);
2632}
2633
0c530ab8
A
2634/*
2635 * Set the attributes on a vnode in a vnode context.
2636 *
2637 * Parameters: vp The vnode whose attributes to set.
2638 * vap A pointer to the attributes to set.
2639 * ctx The vnode context in which the
2640 * operation is to be attempted.
2641 *
2642 * Returns: 0 Success
2643 * !0 errno value
2644 *
2645 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2646 *
2647 * The contents of the data area pointed to by 'vap' may be
2648 * modified if the vnode is on a filesystem which has been
2649 * mounted with ingore ownership flags, or by the underlyng
2650 * VFS itself, or by the fallback code, if the underlying VFS
2651 * does not support ACL, UUID, or GUUID attributes directly.
2652 *
2653 * XXX: We should enummerate the possible errno values here, and where
2654 * in the code they originated.
2655 */
91447636
A
2656int
2657vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2658{
2d21ac55 2659 int error, is_perm_change=0;
91447636
A
2660
2661 /*
2662 * Make sure the filesystem is mounted R/W.
2663 * If not, return an error.
2664 */
0c530ab8
A
2665 if (vfs_isrdonly(vp->v_mount)) {
2666 error = EROFS;
2667 goto out;
2668 }
2d21ac55
A
2669#if NAMEDSTREAMS
2670 /* For streams, va_data_size is the only setable attribute. */
2671 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2672 error = EPERM;
2673 goto out;
2674 }
2675#endif
91447636
A
2676
2677 /*
2678 * If ownership is being ignored on this volume, we silently discard
2679 * ownership changes.
2680 */
2681 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2682 VATTR_CLEAR_ACTIVE(vap, va_uid);
2683 VATTR_CLEAR_ACTIVE(vap, va_gid);
2684 }
2685
2d21ac55
A
2686 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2687 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2688 is_perm_change = 1;
91447636
A
2689 }
2690
2691 /*
2692 * Make sure that extended security is enabled if we're going to try
2693 * to set any.
2694 */
2695 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2696 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2697 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
0c530ab8
A
2698 error = ENOTSUP;
2699 goto out;
91447636
A
2700 }
2701
2702 error = VNOP_SETATTR(vp, vap, ctx);
2703
2704 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2705 error = vnode_setattr_fallback(vp, vap, ctx);
2706
2d21ac55 2707#if CONFIG_FSE
91447636 2708 // only send a stat_changed event if this is more than
b0d623f7
A
2709 // just an access or backup time update
2710 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2d21ac55
A
2711 if (is_perm_change) {
2712 if (need_fsevent(FSE_CHOWN, vp)) {
2713 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2714 }
2715 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2716 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
91447636
A
2717 }
2718 }
2d21ac55 2719#endif
0c530ab8
A
2720
2721out:
91447636
A
2722 return(error);
2723}
2724
2725/*
0c530ab8
A
2726 * Fallback for setting the attributes on a vnode in a vnode context. This
2727 * Function will attempt to store ACL, UUID, and GUID information utilizing
2728 * a read/modify/write operation against an EA used as a backing store for
2729 * the object.
2730 *
2731 * Parameters: vp The vnode whose attributes to set.
2732 * vap A pointer to the attributes to set.
2733 * ctx The vnode context in which the
2734 * operation is to be attempted.
2735 *
2736 * Returns: 0 Success
2737 * !0 errno value
2738 *
2739 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2740 * as are the fsec and lfsec, if they are used.
2741 *
2742 * The contents of the data area pointed to by 'vap' may be
2743 * modified to indicate that the attribute is supported for
2744 * any given requested attribute.
2745 *
2746 * XXX: We should enummerate the possible errno values here, and where
2747 * in the code they originated.
2748 */
91447636
A
2749int
2750vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2751{
2752 kauth_filesec_t fsec;
2753 kauth_acl_t facl;
2754 struct kauth_filesec lfsec;
2755 int error;
2756
2757 error = 0;
2758
2759 /*
2760 * Extended security fallback via extended attributes.
2761 *
0c530ab8
A
2762 * Note that we do not free the filesec; the caller is expected to
2763 * do this.
91447636
A
2764 */
2765 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2766 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2767 VATTR_NOT_RETURNED(vap, va_guuid)) {
2768 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2769
2770 /*
0c530ab8
A
2771 * Fail for file types that we don't permit extended security
2772 * to be set on.
91447636
A
2773 */
2774 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2775 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2776 error = EINVAL;
2777 goto out;
2778 }
2779
2780 /*
0c530ab8
A
2781 * If we don't have all the extended security items, we need
2782 * to fetch the existing data to perform a read-modify-write
2783 * operation.
91447636
A
2784 */
2785 fsec = NULL;
2786 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2787 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2788 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2789 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2790 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2791 goto out;
2792 }
2793 }
2794 /* if we didn't get a filesec, use our local one */
2795 if (fsec == NULL) {
2796 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2797 fsec = &lfsec;
2798 } else {
2799 KAUTH_DEBUG("SETATTR - updating existing filesec");
2800 }
2801 /* find the ACL */
2802 facl = &fsec->fsec_acl;
2803
2804 /* if we're using the local filesec, we need to initialise it */
2805 if (fsec == &lfsec) {
2806 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2807 fsec->fsec_owner = kauth_null_guid;
2808 fsec->fsec_group = kauth_null_guid;
2809 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2810 facl->acl_flags = 0;
2811 }
2812
2813 /*
2814 * Update with the supplied attributes.
2815 */
2816 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2817 KAUTH_DEBUG("SETATTR - updating owner UUID");
2818 fsec->fsec_owner = vap->va_uuuid;
2819 VATTR_SET_SUPPORTED(vap, va_uuuid);
2820 }
2821 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2822 KAUTH_DEBUG("SETATTR - updating group UUID");
2823 fsec->fsec_group = vap->va_guuid;
2824 VATTR_SET_SUPPORTED(vap, va_guuid);
2825 }
2826 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2827 if (vap->va_acl == NULL) {
2828 KAUTH_DEBUG("SETATTR - removing ACL");
2829 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2830 } else {
2831 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2832 facl = vap->va_acl;
2833 }
2834 VATTR_SET_SUPPORTED(vap, va_acl);
2835 }
2836
2837 /*
0c530ab8
A
2838 * If the filesec data is all invalid, we can just remove
2839 * the EA completely.
91447636
A
2840 */
2841 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2842 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2843 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2844 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2845 /* no attribute is ok, nothing to delete */
2846 if (error == ENOATTR)
2847 error = 0;
2848 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2849 } else {
2850 /* write the EA */
2851 error = vnode_set_filesec(vp, fsec, facl, ctx);
2852 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2853 }
2854
2855 /* if we fetched a filesec, dispose of the buffer */
2856 if (fsec != &lfsec)
2857 kauth_filesec_free(fsec);
2858 }
2859out:
2860
2861 return(error);
2862}
2863
b0d623f7
A
2864/*
2865 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2866 * event on a vnode.
2867 */
2868int
2869vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2870{
2871 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2872 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2873 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2874 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2875 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2876 uint32_t knote_events = (events & knote_mask);
2877
2878 /* Permissions are not explicitly part of the kqueue model */
2879 if (events & VNODE_EVENT_PERMS) {
2880 knote_events |= NOTE_ATTRIB;
2881 }
2882
2883 /* Directory contents information just becomes NOTE_WRITE */
2884 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2885 knote_events |= NOTE_WRITE;
2886 }
2887
2888 if (knote_events) {
2889 lock_vnode_and_post(vp, knote_events);
2890#if CONFIG_FSE
2891 if (vap != NULL) {
2892 create_fsevent_from_kevent(vp, events, vap);
2893 }
2894#else
2895 (void)vap;
2896#endif
2897 }
2898
2899 return 0;
2900}
2901
6d2010ae
A
2902
2903
2904int
2905vnode_isdyldsharedcache(vnode_t vp)
2906{
2907 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2908}
2909
2910
b0d623f7
A
2911/*
2912 * For a filesystem that isn't tracking its own vnode watchers:
2913 * check whether a vnode is being monitored.
2914 */
2915int
2916vnode_ismonitored(vnode_t vp) {
2917 return (vp->v_knotes.slh_first != NULL);
2918}
2919
b0d623f7
A
2920/*
2921 * Initialize a struct vnode_attr and activate the attributes required
2922 * by the vnode_notify() call.
2923 */
2924int
2925vfs_get_notify_attributes(struct vnode_attr *vap)
2926{
2927 VATTR_INIT(vap);
2928 vap->va_active = VNODE_NOTIFY_ATTRS;
2929 return 0;
2930}
2931
6d2010ae
A
2932#if CONFIG_TRIGGERS
2933int
2934vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2935{
2936 int error;
2937 mount_t mp;
2938
2939 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2940 if (mp == NULL) {
2941 return ENOENT;
2942 }
2943
2944 error = vfs_busy(mp, LK_NOWAIT);
2945 mount_iterdrop(mp);
2946
2947 if (error != 0) {
2948 return ENOENT;
2949 }
2950
2951 mount_lock(mp);
2952 if (mp->mnt_triggercallback != NULL) {
2953 error = EBUSY;
2954 mount_unlock(mp);
2955 goto out;
2956 }
2957
2958 mp->mnt_triggercallback = vtc;
2959 mp->mnt_triggerdata = data;
2960 mount_unlock(mp);
2961
2962 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2963
2964out:
2965 vfs_unbusy(mp);
2966 return 0;
2967}
2968#endif /* CONFIG_TRIGGERS */
2969
91447636
A
2970/*
2971 * Definition of vnode operations.
2972 */
2973
2974#if 0
2975/*
2976 *#
2977 *#% lookup dvp L ? ?
2978 *#% lookup vpp - L -
2979 */
2980struct vnop_lookup_args {
2981 struct vnodeop_desc *a_desc;
2982 vnode_t a_dvp;
2983 vnode_t *a_vpp;
2984 struct componentname *a_cnp;
2985 vfs_context_t a_context;
2986};
2987#endif /* 0*/
2988
2d21ac55
A
2989/*
2990 * Returns: 0 Success
2991 * lock_fsnode:ENOENT No such file or directory [only for VFS
2992 * that is not thread safe & vnode is
2993 * currently being/has been terminated]
2994 * <vfs_lookup>:ENAMETOOLONG
2995 * <vfs_lookup>:ENOENT
2996 * <vfs_lookup>:EJUSTRETURN
2997 * <vfs_lookup>:EPERM
2998 * <vfs_lookup>:EISDIR
2999 * <vfs_lookup>:ENOTDIR
3000 * <vfs_lookup>:???
3001 *
3002 * Note: The return codes from the underlying VFS's lookup routine can't
3003 * be fully enumerated here, since third party VFS authors may not
3004 * limit their error returns to the ones documented here, even
3005 * though this may result in some programs functioning incorrectly.
3006 *
3007 * The return codes documented above are those which may currently
3008 * be returned by HFS from hfs_lookup, not including additional
3009 * error code which may be propagated from underlying routines.
3010 */
91447636 3011errno_t
2d21ac55 3012VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
91447636
A
3013{
3014 int _err;
3015 struct vnop_lookup_args a;
3016 vnode_t vp;
316670eb 3017#if CONFIG_VFS_FUNNEL
91447636
A
3018 int thread_safe;
3019 int funnel_state = 0;
316670eb 3020#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3021
3022 a.a_desc = &vnop_lookup_desc;
3023 a.a_dvp = dvp;
3024 a.a_vpp = vpp;
3025 a.a_cnp = cnp;
2d21ac55 3026 a.a_context = ctx;
91447636 3027
316670eb 3028#if CONFIG_VFS_FUNNEL
b0d623f7 3029 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3030 if (!thread_safe) {
3031 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3032 return (_err);
3033 }
3034 }
316670eb 3035#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3036
91447636
A
3037 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3038
3039 vp = *vpp;
3040
316670eb 3041#if CONFIG_VFS_FUNNEL
91447636
A
3042 if (!thread_safe) {
3043 if ( (cnp->cn_flags & ISLASTCN) ) {
3044 if ( (cnp->cn_flags & LOCKPARENT) ) {
3045 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
3046 /*
3047 * leave the fsnode lock held on
3048 * the directory, but restore the funnel...
3049 * also indicate that we need to drop the
3050 * fsnode_lock when we're done with the
3051 * system call processing for this path
3052 */
3053 cnp->cn_flags |= FSNODELOCKHELD;
3054
3055 (void) thread_funnel_set(kernel_flock, funnel_state);
3056 return (_err);
3057 }
3058 }
3059 }
3060 unlock_fsnode(dvp, &funnel_state);
3061 }
316670eb 3062#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3063
91447636
A
3064 return (_err);
3065}
3066
3067#if 0
6d2010ae
A
3068struct vnop_compound_open_args {
3069 struct vnodeop_desc *a_desc;
3070 vnode_t a_dvp;
3071 vnode_t *a_vpp;
3072 struct componentname *a_cnp;
3073 int32_t a_flags;
3074 int32_t a_fmode;
3075 struct vnode_attr *a_vap;
3076 vfs_context_t a_context;
3077 void *a_reserved;
3078};
3079#endif /* 0 */
3080
3081int
3082VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3083{
3084 int _err;
3085 struct vnop_compound_open_args a;
3086 int did_create = 0;
3087 int want_create;
3088 uint32_t tmp_status = 0;
3089 struct componentname *cnp = &ndp->ni_cnd;
3090
3091 want_create = (flags & VNOP_COMPOUND_OPEN_DO_CREATE);
3092
3093 a.a_desc = &vnop_compound_open_desc;
3094 a.a_dvp = dvp;
3095 a.a_vpp = vpp; /* Could be NULL */
3096 a.a_cnp = cnp;
3097 a.a_flags = flags;
3098 a.a_fmode = fmode;
3099 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3100 a.a_vap = vap;
3101 a.a_context = ctx;
3102 a.a_open_create_authorizer = vn_authorize_create;
3103 a.a_open_existing_authorizer = vn_authorize_open_existing;
3104 a.a_reserved = NULL;
3105
3106 if (dvp == NULLVP) {
3107 panic("No dvp?");
3108 }
3109 if (want_create && !vap) {
3110 panic("Want create, but no vap?");
3111 }
3112 if (!want_create && vap) {
3113 panic("Don't want create, but have a vap?");
3114 }
3115
3116 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3117
3118 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3119
3120 if (did_create && !want_create) {
3121 panic("Filesystem did a create, even though none was requested?");
3122 }
3123
3124 if (did_create) {
3125 if (!NATIVE_XATTR(dvp)) {
3126 /*
3127 * Remove stale Apple Double file (if any).
3128 */
3129 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3130 }
3131
3132 /* On create, provide kqueue notification */
3133 post_event_if_success(dvp, _err, NOTE_WRITE);
3134 }
3135
3136 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3137#if 0 /* FSEvents... */
3138 if (*vpp && _err && _err != EKEEPLOOKING) {
3139 vnode_put(*vpp);
3140 *vpp = NULLVP;
3141 }
3142#endif /* 0 */
3143
3144 return (_err);
3145
3146}
3147
3148#if 0
91447636
A
3149struct vnop_create_args {
3150 struct vnodeop_desc *a_desc;
3151 vnode_t a_dvp;
3152 vnode_t *a_vpp;
3153 struct componentname *a_cnp;
3154 struct vnode_attr *a_vap;
3155 vfs_context_t a_context;
3156};
3157#endif /* 0*/
3158errno_t
2d21ac55 3159VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3160{
3161 int _err;
3162 struct vnop_create_args a;
316670eb 3163#if CONFIG_VFS_FUNNEL
91447636
A
3164 int thread_safe;
3165 int funnel_state = 0;
316670eb 3166#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3167
3168 a.a_desc = &vnop_create_desc;
3169 a.a_dvp = dvp;
3170 a.a_vpp = vpp;
3171 a.a_cnp = cnp;
3172 a.a_vap = vap;
2d21ac55 3173 a.a_context = ctx;
91447636 3174
316670eb 3175#if CONFIG_VFS_FUNNEL
b0d623f7 3176 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3177 if (!thread_safe) {
3178 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3179 return (_err);
3180 }
3181 }
316670eb 3182#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3183
91447636
A
3184 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3185 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3186 /*
3187 * Remove stale Apple Double file (if any).
3188 */
b0d623f7 3189 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 3190 }
b0d623f7 3191
316670eb 3192#if CONFIG_VFS_FUNNEL
91447636
A
3193 if (!thread_safe) {
3194 unlock_fsnode(dvp, &funnel_state);
3195 }
316670eb 3196#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
3197
3198 post_event_if_success(dvp, _err, NOTE_WRITE);
3199
91447636
A
3200 return (_err);
3201}
3202
3203#if 0
3204/*
3205 *#
3206 *#% whiteout dvp L L L
3207 *#% whiteout cnp - - -
3208 *#% whiteout flag - - -
3209 *#
3210 */
3211struct vnop_whiteout_args {
3212 struct vnodeop_desc *a_desc;
3213 vnode_t a_dvp;
3214 struct componentname *a_cnp;
3215 int a_flags;
3216 vfs_context_t a_context;
3217};
3218#endif /* 0*/
3219errno_t
2d21ac55 3220VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
91447636
A
3221{
3222 int _err;
3223 struct vnop_whiteout_args a;
316670eb 3224#if CONFIG_VFS_FUNNEL
91447636
A
3225 int thread_safe;
3226 int funnel_state = 0;
316670eb 3227#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3228
3229 a.a_desc = &vnop_whiteout_desc;
3230 a.a_dvp = dvp;
3231 a.a_cnp = cnp;
3232 a.a_flags = flags;
2d21ac55 3233 a.a_context = ctx;
91447636 3234
316670eb 3235#if CONFIG_VFS_FUNNEL
b0d623f7 3236 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3237 if (!thread_safe) {
3238 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3239 return (_err);
3240 }
3241 }
316670eb 3242#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3243
91447636 3244 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
b0d623f7 3245
316670eb 3246#if CONFIG_VFS_FUNNEL
91447636
A
3247 if (!thread_safe) {
3248 unlock_fsnode(dvp, &funnel_state);
3249 }
316670eb 3250#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
3251
3252 post_event_if_success(dvp, _err, NOTE_WRITE);
3253
91447636
A
3254 return (_err);
3255}
3256
3257 #if 0
3258/*
3259 *#
3260 *#% mknod dvp L U U
3261 *#% mknod vpp - X -
3262 *#
3263 */
3264struct vnop_mknod_args {
3265 struct vnodeop_desc *a_desc;
3266 vnode_t a_dvp;
3267 vnode_t *a_vpp;
3268 struct componentname *a_cnp;
3269 struct vnode_attr *a_vap;
3270 vfs_context_t a_context;
3271};
3272#endif /* 0*/
3273errno_t
2d21ac55 3274VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3275{
3276
3277 int _err;
3278 struct vnop_mknod_args a;
316670eb 3279#if CONFIG_VFS_FUNNEL
b0d623f7
A
3280 int thread_safe;
3281 int funnel_state = 0;
316670eb 3282#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3283
3284 a.a_desc = &vnop_mknod_desc;
3285 a.a_dvp = dvp;
3286 a.a_vpp = vpp;
3287 a.a_cnp = cnp;
3288 a.a_vap = vap;
2d21ac55 3289 a.a_context = ctx;
91447636 3290
316670eb 3291#if CONFIG_VFS_FUNNEL
b0d623f7 3292 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
3293 if (!thread_safe) {
3294 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3295 return (_err);
3296 }
3297 }
316670eb 3298#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3299
91447636 3300 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
b0d623f7 3301
316670eb 3302#if CONFIG_VFS_FUNNEL
91447636
A
3303 if (!thread_safe) {
3304 unlock_fsnode(dvp, &funnel_state);
3305 }
316670eb 3306#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
3307
3308 post_event_if_success(dvp, _err, NOTE_WRITE);
3309
91447636
A
3310 return (_err);
3311}
3312
3313#if 0
3314/*
3315 *#
3316 *#% open vp L L L
3317 *#
3318 */
3319struct vnop_open_args {
3320 struct vnodeop_desc *a_desc;
3321 vnode_t a_vp;
3322 int a_mode;
3323 vfs_context_t a_context;
3324};
3325#endif /* 0*/
3326errno_t
6d2010ae 3327VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
91447636
A
3328{
3329 int _err;
3330 struct vnop_open_args a;
316670eb 3331#if CONFIG_VFS_FUNNEL
91447636 3332 int thread_safe;
6d2010ae 3333 int funnel_state = 0;
316670eb 3334#endif /* CONFIG_VFS_FUNNEL */
91447636 3335
2d21ac55
A
3336 if (ctx == NULL) {
3337 ctx = vfs_context_current();
6d2010ae 3338 }
91447636
A
3339 a.a_desc = &vnop_open_desc;
3340 a.a_vp = vp;
3341 a.a_mode = mode;
6d2010ae 3342 a.a_context = ctx;
91447636 3343
316670eb 3344#if CONFIG_VFS_FUNNEL
b0d623f7 3345 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3346 if (!thread_safe) {
3347 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3348 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
6d2010ae
A
3349 if ( (_err = lock_fsnode(vp, NULL)) ) {
3350 (void) thread_funnel_set(kernel_flock, funnel_state);
3351 return (_err);
3352 }
3353 }
3354 }
316670eb 3355#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3356
91447636 3357 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
b0d623f7 3358
316670eb 3359#if CONFIG_VFS_FUNNEL
91447636
A
3360 if (!thread_safe) {
3361 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3362 unlock_fsnode(vp, NULL);
6d2010ae 3363 }
91447636 3364 (void) thread_funnel_set(kernel_flock, funnel_state);
6d2010ae 3365 }
316670eb 3366#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3367
91447636
A
3368 return (_err);
3369}
3370
3371#if 0
3372/*
3373 *#
3374 *#% close vp U U U
3375 *#
3376 */
3377struct vnop_close_args {
3378 struct vnodeop_desc *a_desc;
3379 vnode_t a_vp;
3380 int a_fflag;
3381 vfs_context_t a_context;
3382};
3383#endif /* 0*/
3384errno_t
2d21ac55 3385VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
91447636
A
3386{
3387 int _err;
3388 struct vnop_close_args a;
316670eb 3389#if CONFIG_VFS_FUNNEL
91447636
A
3390 int thread_safe;
3391 int funnel_state = 0;
316670eb 3392#endif /* CONFIG_VFS_FUNNEL */
91447636 3393
2d21ac55
A
3394 if (ctx == NULL) {
3395 ctx = vfs_context_current();
91447636
A
3396 }
3397 a.a_desc = &vnop_close_desc;
3398 a.a_vp = vp;
3399 a.a_fflag = fflag;
2d21ac55 3400 a.a_context = ctx;
91447636 3401
316670eb 3402#if CONFIG_VFS_FUNNEL
b0d623f7 3403 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3404 if (!thread_safe) {
3405 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3406 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3407 if ( (_err = lock_fsnode(vp, NULL)) ) {
3408 (void) thread_funnel_set(kernel_flock, funnel_state);
3409 return (_err);
3410 }
3411 }
3412 }
316670eb 3413#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3414
91447636 3415 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
b0d623f7 3416
316670eb 3417#if CONFIG_VFS_FUNNEL
91447636
A
3418 if (!thread_safe) {
3419 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3420 unlock_fsnode(vp, NULL);
3421 }
3422 (void) thread_funnel_set(kernel_flock, funnel_state);
3423 }
316670eb 3424#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3425
91447636
A
3426 return (_err);
3427}
3428
3429#if 0
3430/*
3431 *#
3432 *#% access vp L L L
3433 *#
3434 */
3435struct vnop_access_args {
3436 struct vnodeop_desc *a_desc;
3437 vnode_t a_vp;
3438 int a_action;
3439 vfs_context_t a_context;
3440};
3441#endif /* 0*/
3442errno_t
2d21ac55 3443VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
91447636
A
3444{
3445 int _err;
3446 struct vnop_access_args a;
316670eb 3447#if CONFIG_VFS_FUNNEL
91447636
A
3448 int thread_safe;
3449 int funnel_state = 0;
316670eb 3450#endif /* CONFIG_VFS_FUNNEL */
91447636 3451
2d21ac55
A
3452 if (ctx == NULL) {
3453 ctx = vfs_context_current();
91447636
A
3454 }
3455 a.a_desc = &vnop_access_desc;
3456 a.a_vp = vp;
3457 a.a_action = action;
2d21ac55 3458 a.a_context = ctx;
91447636 3459
316670eb 3460#if CONFIG_VFS_FUNNEL
b0d623f7 3461 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3462 if (!thread_safe) {
3463 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3464 return (_err);
3465 }
3466 }
316670eb 3467#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3468
91447636 3469 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
b0d623f7 3470
316670eb 3471#if CONFIG_VFS_FUNNEL
91447636
A
3472 if (!thread_safe) {
3473 unlock_fsnode(vp, &funnel_state);
3474 }
316670eb 3475#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3476
91447636
A
3477 return (_err);
3478}
3479
3480#if 0
3481/*
3482 *#
3483 *#% getattr vp = = =
3484 *#
3485 */
3486struct vnop_getattr_args {
3487 struct vnodeop_desc *a_desc;
3488 vnode_t a_vp;
3489 struct vnode_attr *a_vap;
3490 vfs_context_t a_context;
3491};
3492#endif /* 0*/
3493errno_t
2d21ac55 3494VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3495{
3496 int _err;
3497 struct vnop_getattr_args a;
316670eb 3498#if CONFIG_VFS_FUNNEL
91447636 3499 int thread_safe;
b0d623f7 3500 int funnel_state = 0;
316670eb 3501#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3502
3503 a.a_desc = &vnop_getattr_desc;
3504 a.a_vp = vp;
3505 a.a_vap = vap;
2d21ac55 3506 a.a_context = ctx;
91447636 3507
316670eb 3508#if CONFIG_VFS_FUNNEL
b0d623f7 3509 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3510 if (!thread_safe) {
3511 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3512 return (_err);
3513 }
3514 }
316670eb 3515#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3516
91447636 3517 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
b0d623f7 3518
316670eb 3519#if CONFIG_VFS_FUNNEL
91447636
A
3520 if (!thread_safe) {
3521 unlock_fsnode(vp, &funnel_state);
3522 }
316670eb 3523#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3524
91447636
A
3525 return (_err);
3526}
3527
3528#if 0
3529/*
3530 *#
3531 *#% setattr vp L L L
3532 *#
3533 */
3534struct vnop_setattr_args {
3535 struct vnodeop_desc *a_desc;
3536 vnode_t a_vp;
3537 struct vnode_attr *a_vap;
3538 vfs_context_t a_context;
3539};
3540#endif /* 0*/
3541errno_t
2d21ac55 3542VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
91447636
A
3543{
3544 int _err;
3545 struct vnop_setattr_args a;
316670eb 3546#if CONFIG_VFS_FUNNEL
91447636 3547 int thread_safe;
b0d623f7 3548 int funnel_state = 0;
316670eb 3549#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3550
3551 a.a_desc = &vnop_setattr_desc;
3552 a.a_vp = vp;
3553 a.a_vap = vap;
2d21ac55 3554 a.a_context = ctx;
91447636 3555
316670eb 3556#if CONFIG_VFS_FUNNEL
b0d623f7 3557 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3558 if (!thread_safe) {
3559 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3560 return (_err);
3561 }
3562 }
316670eb 3563#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3564
91447636
A
3565 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3566
3567 /*
2d21ac55 3568 * Shadow uid/gid/mod change to extended attribute file.
91447636
A
3569 */
3570 if (_err == 0 && !NATIVE_XATTR(vp)) {
3571 struct vnode_attr va;
3572 int change = 0;
3573
3574 VATTR_INIT(&va);
3575 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3576 VATTR_SET(&va, va_uid, vap->va_uid);
3577 change = 1;
3578 }
3579 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3580 VATTR_SET(&va, va_gid, vap->va_gid);
3581 change = 1;
3582 }
3583 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3584 VATTR_SET(&va, va_mode, vap->va_mode);
3585 change = 1;
3586 }
3587 if (change) {
3588 vnode_t dvp;
2d21ac55 3589 const char *vname;
91447636
A
3590
3591 dvp = vnode_getparent(vp);
3592 vname = vnode_getname(vp);
3593
b0d623f7 3594 xattrfile_setattr(dvp, vname, &va, ctx);
91447636
A
3595 if (dvp != NULLVP)
3596 vnode_put(dvp);
3597 if (vname != NULL)
3598 vnode_putname(vname);
3599 }
3600 }
b0d623f7 3601
316670eb 3602#if CONFIG_VFS_FUNNEL
91447636
A
3603 if (!thread_safe) {
3604 unlock_fsnode(vp, &funnel_state);
3605 }
316670eb 3606#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3607
2d21ac55
A
3608 /*
3609 * If we have changed any of the things about the file that are likely
3610 * to result in changes to authorization results, blow the vnode auth
3611 * cache
3612 */
3613 if (_err == 0 && (
3614 VATTR_IS_SUPPORTED(vap, va_mode) ||
3615 VATTR_IS_SUPPORTED(vap, va_uid) ||
3616 VATTR_IS_SUPPORTED(vap, va_gid) ||
3617 VATTR_IS_SUPPORTED(vap, va_flags) ||
3618 VATTR_IS_SUPPORTED(vap, va_acl) ||
3619 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
b0d623f7 3620 VATTR_IS_SUPPORTED(vap, va_guuid))) {
2d21ac55 3621 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
91447636 3622
b0d623f7
A
3623#if NAMEDSTREAMS
3624 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3625 vnode_t svp;
3626 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3627 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3628 vnode_put(svp);
3629 }
3630 }
3631#endif /* NAMEDSTREAMS */
3632 }
3633
3634
3635 post_event_if_success(vp, _err, NOTE_ATTRIB);
3636
91447636
A
3637 return (_err);
3638}
3639
3640
3641#if 0
3642/*
3643 *#
3644 *#% read vp L L L
3645 *#
3646 */
3647struct vnop_read_args {
3648 struct vnodeop_desc *a_desc;
3649 vnode_t a_vp;
3650 struct uio *a_uio;
3651 int a_ioflag;
3652 vfs_context_t a_context;
3653};
3654#endif /* 0*/
3655errno_t
2d21ac55 3656VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
91447636
A
3657{
3658 int _err;
3659 struct vnop_read_args a;
316670eb 3660#if CONFIG_VFS_FUNNEL
91447636
A
3661 int thread_safe;
3662 int funnel_state = 0;
316670eb 3663#endif /* CONFIG_VFS_FUNNEL */
91447636 3664
2d21ac55
A
3665 if (ctx == NULL) {
3666 ctx = vfs_context_current();
91447636
A
3667 }
3668
3669 a.a_desc = &vnop_read_desc;
3670 a.a_vp = vp;
3671 a.a_uio = uio;
3672 a.a_ioflag = ioflag;
2d21ac55 3673 a.a_context = ctx;
91447636 3674
316670eb 3675#if CONFIG_VFS_FUNNEL
b0d623f7 3676 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3677 if (!thread_safe) {
3678 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3679 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3680 if ( (_err = lock_fsnode(vp, NULL)) ) {
3681 (void) thread_funnel_set(kernel_flock, funnel_state);
3682 return (_err);
3683 }
3684 }
3685 }
316670eb 3686#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3687
91447636
A
3688 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3689
316670eb 3690#if CONFIG_VFS_FUNNEL
91447636
A
3691 if (!thread_safe) {
3692 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3693 unlock_fsnode(vp, NULL);
3694 }
3695 (void) thread_funnel_set(kernel_flock, funnel_state);
3696 }
316670eb 3697#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3698
91447636
A
3699 return (_err);
3700}
3701
3702
3703#if 0
3704/*
3705 *#
3706 *#% write vp L L L
3707 *#
3708 */
3709struct vnop_write_args {
3710 struct vnodeop_desc *a_desc;
3711 vnode_t a_vp;
3712 struct uio *a_uio;
3713 int a_ioflag;
3714 vfs_context_t a_context;
3715};
3716#endif /* 0*/
3717errno_t
2d21ac55 3718VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
91447636
A
3719{
3720 struct vnop_write_args a;
3721 int _err;
316670eb 3722#if CONFIG_VFS_FUNNEL
91447636
A
3723 int thread_safe;
3724 int funnel_state = 0;
316670eb 3725#endif /* CONFIG_VFS_FUNNEL */
91447636 3726
2d21ac55
A
3727 if (ctx == NULL) {
3728 ctx = vfs_context_current();
91447636
A
3729 }
3730
3731 a.a_desc = &vnop_write_desc;
3732 a.a_vp = vp;
3733 a.a_uio = uio;
3734 a.a_ioflag = ioflag;
2d21ac55 3735 a.a_context = ctx;
91447636 3736
316670eb 3737#if CONFIG_VFS_FUNNEL
b0d623f7 3738 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3739 if (!thread_safe) {
3740 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3741 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3742 if ( (_err = lock_fsnode(vp, NULL)) ) {
3743 (void) thread_funnel_set(kernel_flock, funnel_state);
3744 return (_err);
3745 }
3746 }
3747 }
316670eb 3748#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3749
91447636
A
3750 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3751
316670eb 3752#if CONFIG_VFS_FUNNEL
91447636
A
3753 if (!thread_safe) {
3754 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3755 unlock_fsnode(vp, NULL);
3756 }
3757 (void) thread_funnel_set(kernel_flock, funnel_state);
3758 }
316670eb 3759#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
3760
3761 post_event_if_success(vp, _err, NOTE_WRITE);
3762
91447636
A
3763 return (_err);
3764}
3765
3766
3767#if 0
3768/*
3769 *#
3770 *#% ioctl vp U U U
3771 *#
3772 */
3773struct vnop_ioctl_args {
3774 struct vnodeop_desc *a_desc;
3775 vnode_t a_vp;
3776 u_long a_command;
3777 caddr_t a_data;
3778 int a_fflag;
3779 vfs_context_t a_context;
3780};
3781#endif /* 0*/
3782errno_t
2d21ac55 3783VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
91447636
A
3784{
3785 int _err;
3786 struct vnop_ioctl_args a;
316670eb 3787#if CONFIG_VFS_FUNNEL
91447636
A
3788 int thread_safe;
3789 int funnel_state = 0;
316670eb 3790#endif /* CONFIG_VFS_FUNNEL */
91447636 3791
2d21ac55
A
3792 if (ctx == NULL) {
3793 ctx = vfs_context_current();
91447636
A
3794 }
3795
b0d623f7
A
3796 /*
3797 * This check should probably have been put in the TTY code instead...
3798 *
3799 * We have to be careful about what we assume during startup and shutdown.
3800 * We have to be able to use the root filesystem's device vnode even when
3801 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3802 * structure. If there is no data pointer, it doesn't matter whether
3803 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3804 * which passes NULL for its data pointer can therefore be used during
3805 * mount or unmount of the root filesystem.
3806 *
3807 * Depending on what root filesystems need to do during mount/unmount, we
3808 * may need to loosen this check again in the future.
3809 */
3810 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3811 if (data != NULL && !vnode_vfs64bitready(vp)) {
91447636
A
3812 return(ENOTTY);
3813 }
3814 }
3815
3816 a.a_desc = &vnop_ioctl_desc;
3817 a.a_vp = vp;
3818 a.a_command = command;
3819 a.a_data = data;
3820 a.a_fflag = fflag;
2d21ac55 3821 a.a_context= ctx;
91447636 3822
316670eb 3823#if CONFIG_VFS_FUNNEL
b0d623f7 3824 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3825 if (!thread_safe) {
3826 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3827 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3828 if ( (_err = lock_fsnode(vp, NULL)) ) {
3829 (void) thread_funnel_set(kernel_flock, funnel_state);
3830 return (_err);
3831 }
3832 }
3833 }
316670eb 3834#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3835
91447636 3836 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
b0d623f7 3837
316670eb 3838#if CONFIG_VFS_FUNNEL
91447636
A
3839 if (!thread_safe) {
3840 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3841 unlock_fsnode(vp, NULL);
3842 }
3843 (void) thread_funnel_set(kernel_flock, funnel_state);
3844 }
316670eb 3845#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3846
91447636
A
3847 return (_err);
3848}
3849
3850
3851#if 0
3852/*
3853 *#
3854 *#% select vp U U U
3855 *#
3856 */
3857struct vnop_select_args {
3858 struct vnodeop_desc *a_desc;
3859 vnode_t a_vp;
3860 int a_which;
3861 int a_fflags;
3862 void *a_wql;
3863 vfs_context_t a_context;
3864};
3865#endif /* 0*/
3866errno_t
2d21ac55 3867VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
91447636
A
3868{
3869 int _err;
3870 struct vnop_select_args a;
316670eb 3871#if CONFIG_VFS_FUNNEL
91447636
A
3872 int thread_safe;
3873 int funnel_state = 0;
316670eb 3874#endif /* CONFIG_VFS_FUNNEL */
91447636 3875
2d21ac55
A
3876 if (ctx == NULL) {
3877 ctx = vfs_context_current();
91447636
A
3878 }
3879 a.a_desc = &vnop_select_desc;
3880 a.a_vp = vp;
3881 a.a_which = which;
3882 a.a_fflags = fflags;
2d21ac55 3883 a.a_context = ctx;
91447636 3884 a.a_wql = wql;
91447636 3885
316670eb 3886#if CONFIG_VFS_FUNNEL
b0d623f7 3887 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
3888 if (!thread_safe) {
3889 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3890 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3891 if ( (_err = lock_fsnode(vp, NULL)) ) {
3892 (void) thread_funnel_set(kernel_flock, funnel_state);
3893 return (_err);
3894 }
3895 }
3896 }
316670eb 3897#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3898
91447636 3899 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
b0d623f7 3900
316670eb 3901#if CONFIG_VFS_FUNNEL
91447636
A
3902 if (!thread_safe) {
3903 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3904 unlock_fsnode(vp, NULL);
3905 }
3906 (void) thread_funnel_set(kernel_flock, funnel_state);
3907 }
316670eb 3908#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3909
91447636
A
3910 return (_err);
3911}
3912
3913
3914#if 0
3915/*
3916 *#
3917 *#% exchange fvp L L L
3918 *#% exchange tvp L L L
3919 *#
3920 */
3921struct vnop_exchange_args {
3922 struct vnodeop_desc *a_desc;
3923 vnode_t a_fvp;
3924 vnode_t a_tvp;
3925 int a_options;
3926 vfs_context_t a_context;
3927};
3928#endif /* 0*/
3929errno_t
2d21ac55 3930VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
91447636
A
3931{
3932 int _err;
3933 struct vnop_exchange_args a;
316670eb 3934#if CONFIG_VFS_FUNNEL
91447636
A
3935 int thread_safe;
3936 int funnel_state = 0;
3937 vnode_t lock_first = NULL, lock_second = NULL;
316670eb 3938#endif /* CONFIG_VFS_FUNNEL */
91447636
A
3939
3940 a.a_desc = &vnop_exchange_desc;
3941 a.a_fvp = fvp;
3942 a.a_tvp = tvp;
3943 a.a_options = options;
2d21ac55 3944 a.a_context = ctx;
91447636 3945
316670eb 3946#if CONFIG_VFS_FUNNEL
b0d623f7 3947 thread_safe = THREAD_SAFE_FS(fvp);
91447636
A
3948 if (!thread_safe) {
3949 /*
3950 * Lock in vnode address order to avoid deadlocks
3951 */
3952 if (fvp < tvp) {
3953 lock_first = fvp;
3954 lock_second = tvp;
3955 } else {
3956 lock_first = tvp;
3957 lock_second = fvp;
3958 }
3959 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3960 return (_err);
3961 }
3962 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3963 unlock_fsnode(lock_first, &funnel_state);
3964 return (_err);
3965 }
3966 }
316670eb 3967#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 3968
91447636 3969 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
b0d623f7 3970
316670eb 3971#if CONFIG_VFS_FUNNEL
91447636
A
3972 if (!thread_safe) {
3973 unlock_fsnode(lock_second, NULL);
3974 unlock_fsnode(lock_first, &funnel_state);
3975 }
316670eb 3976#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
3977
3978 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3979 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3980 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3981
91447636
A
3982 return (_err);
3983}
3984
3985
3986#if 0
3987/*
3988 *#
3989 *#% revoke vp U U U
3990 *#
3991 */
3992struct vnop_revoke_args {
3993 struct vnodeop_desc *a_desc;
3994 vnode_t a_vp;
3995 int a_flags;
3996 vfs_context_t a_context;
3997};
3998#endif /* 0*/
3999errno_t
2d21ac55 4000VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
91447636
A
4001{
4002 struct vnop_revoke_args a;
4003 int _err;
316670eb 4004#if CONFIG_VFS_FUNNEL
91447636
A
4005 int thread_safe;
4006 int funnel_state = 0;
316670eb 4007#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4008
4009 a.a_desc = &vnop_revoke_desc;
4010 a.a_vp = vp;
4011 a.a_flags = flags;
2d21ac55 4012 a.a_context = ctx;
91447636 4013
316670eb 4014#if CONFIG_VFS_FUNNEL
b0d623f7 4015 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4016 if (!thread_safe) {
4017 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4018 }
316670eb 4019#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4020
91447636 4021 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
b0d623f7 4022
316670eb 4023#if CONFIG_VFS_FUNNEL
91447636
A
4024 if (!thread_safe) {
4025 (void) thread_funnel_set(kernel_flock, funnel_state);
4026 }
316670eb 4027#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4028
91447636
A
4029 return (_err);
4030}
4031
4032
4033#if 0
4034/*
4035 *#
4036 *# mmap - vp U U U
4037 *#
4038 */
4039struct vnop_mmap_args {
4040 struct vnodeop_desc *a_desc;
4041 vnode_t a_vp;
4042 int a_fflags;
4043 vfs_context_t a_context;
4044};
4045#endif /* 0*/
4046errno_t
2d21ac55 4047VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
91447636
A
4048{
4049 int _err;
4050 struct vnop_mmap_args a;
316670eb 4051#if CONFIG_VFS_FUNNEL
91447636
A
4052 int thread_safe;
4053 int funnel_state = 0;
316670eb 4054#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4055
4056 a.a_desc = &vnop_mmap_desc;
4057 a.a_vp = vp;
4058 a.a_fflags = fflags;
2d21ac55 4059 a.a_context = ctx;
91447636 4060
316670eb 4061#if CONFIG_VFS_FUNNEL
b0d623f7 4062 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4063 if (!thread_safe) {
4064 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4065 return (_err);
4066 }
4067 }
316670eb 4068#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4069
91447636 4070 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
b0d623f7 4071
316670eb 4072#if CONFIG_VFS_FUNNEL
91447636
A
4073 if (!thread_safe) {
4074 unlock_fsnode(vp, &funnel_state);
4075 }
316670eb 4076#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4077
91447636
A
4078 return (_err);
4079}
4080
4081
4082#if 0
4083/*
4084 *#
4085 *# mnomap - vp U U U
4086 *#
4087 */
4088struct vnop_mnomap_args {
4089 struct vnodeop_desc *a_desc;
4090 vnode_t a_vp;
4091 vfs_context_t a_context;
4092};
4093#endif /* 0*/
4094errno_t
2d21ac55 4095VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
91447636
A
4096{
4097 int _err;
4098 struct vnop_mnomap_args a;
316670eb 4099#if CONFIG_VFS_FUNNEL
91447636
A
4100 int thread_safe;
4101 int funnel_state = 0;
316670eb 4102#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4103
4104 a.a_desc = &vnop_mnomap_desc;
4105 a.a_vp = vp;
2d21ac55 4106 a.a_context = ctx;
91447636 4107
316670eb 4108#if CONFIG_VFS_FUNNEL
b0d623f7 4109 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4110 if (!thread_safe) {
4111 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4112 return (_err);
4113 }
4114 }
316670eb 4115#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4116
91447636 4117 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
b0d623f7 4118
316670eb 4119#if CONFIG_VFS_FUNNEL
91447636
A
4120 if (!thread_safe) {
4121 unlock_fsnode(vp, &funnel_state);
4122 }
316670eb 4123#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4124
91447636
A
4125 return (_err);
4126}
4127
4128
4129#if 0
4130/*
4131 *#
4132 *#% fsync vp L L L
4133 *#
4134 */
4135struct vnop_fsync_args {
4136 struct vnodeop_desc *a_desc;
4137 vnode_t a_vp;
4138 int a_waitfor;
4139 vfs_context_t a_context;
4140};
4141#endif /* 0*/
4142errno_t
2d21ac55 4143VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
91447636
A
4144{
4145 struct vnop_fsync_args a;
4146 int _err;
316670eb 4147#if CONFIG_VFS_FUNNEL
91447636
A
4148 int thread_safe;
4149 int funnel_state = 0;
316670eb 4150#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4151
4152 a.a_desc = &vnop_fsync_desc;
4153 a.a_vp = vp;
4154 a.a_waitfor = waitfor;
2d21ac55 4155 a.a_context = ctx;
91447636 4156
316670eb 4157#if CONFIG_VFS_FUNNEL
b0d623f7 4158 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4159 if (!thread_safe) {
4160 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4161 return (_err);
4162 }
4163 }
316670eb 4164#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4165
91447636 4166 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
b0d623f7 4167
316670eb 4168#if CONFIG_VFS_FUNNEL
91447636
A
4169 if (!thread_safe) {
4170 unlock_fsnode(vp, &funnel_state);
4171 }
316670eb 4172#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4173
91447636
A
4174 return (_err);
4175}
4176
4177
4178#if 0
4179/*
4180 *#
4181 *#% remove dvp L U U
4182 *#% remove vp L U U
4183 *#
4184 */
4185struct vnop_remove_args {
4186 struct vnodeop_desc *a_desc;
4187 vnode_t a_dvp;
4188 vnode_t a_vp;
4189 struct componentname *a_cnp;
4190 int a_flags;
4191 vfs_context_t a_context;
4192};
4193#endif /* 0*/
4194errno_t
2d21ac55 4195VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
91447636
A
4196{
4197 int _err;
4198 struct vnop_remove_args a;
316670eb 4199#if CONFIG_VFS_FUNNEL
91447636
A
4200 int thread_safe;
4201 int funnel_state = 0;
316670eb 4202#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4203
4204 a.a_desc = &vnop_remove_desc;
4205 a.a_dvp = dvp;
4206 a.a_vp = vp;
4207 a.a_cnp = cnp;
4208 a.a_flags = flags;
2d21ac55 4209 a.a_context = ctx;
91447636 4210
316670eb 4211#if CONFIG_VFS_FUNNEL
b0d623f7 4212 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
4213 if (!thread_safe) {
4214 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4215 return (_err);
4216 }
4217 }
316670eb 4218#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4219
91447636
A
4220 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4221
4222 if (_err == 0) {
4223 vnode_setneedinactive(vp);
4224
4225 if ( !(NATIVE_XATTR(dvp)) ) {
4226 /*
2d21ac55 4227 * Remove any associated extended attribute file (._ AppleDouble file).
91447636 4228 */
b0d623f7 4229 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
91447636
A
4230 }
4231 }
b0d623f7 4232
316670eb 4233#if CONFIG_VFS_FUNNEL
91447636
A
4234 if (!thread_safe) {
4235 unlock_fsnode(vp, &funnel_state);
4236 }
316670eb 4237#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
4238
4239 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4240 post_event_if_success(dvp, _err, NOTE_WRITE);
4241
91447636
A
4242 return (_err);
4243}
4244
6d2010ae
A
4245int
4246VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4247{
4248 int _err;
4249 struct vnop_compound_remove_args a;
4250 int no_vp = (*vpp == NULLVP);
4251
4252 a.a_desc = &vnop_compound_remove_desc;
4253 a.a_dvp = dvp;
4254 a.a_vpp = vpp;
4255 a.a_cnp = &ndp->ni_cnd;
4256 a.a_flags = flags;
4257 a.a_vap = vap;
4258 a.a_context = ctx;
4259 a.a_remove_authorizer = vn_authorize_unlink;
4260
4261 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4262 if (_err == 0) {
4263 vnode_setneedinactive(*vpp);
4264
4265 if ( !(NATIVE_XATTR(dvp)) ) {
4266 /*
4267 * Remove any associated extended attribute file (._ AppleDouble file).
4268 */
4269 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4270 }
4271 }
4272
4273 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4274 post_event_if_success(dvp, _err, NOTE_WRITE);
4275
4276 if (no_vp) {
4277 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4278 if (*vpp && _err && _err != EKEEPLOOKING) {
4279 vnode_put(*vpp);
4280 *vpp = NULLVP;
4281 }
4282 }
4283
4284 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4285
4286 return (_err);
4287}
91447636
A
4288
4289#if 0
4290/*
4291 *#
4292 *#% link vp U U U
4293 *#% link tdvp L U U
4294 *#
4295 */
4296struct vnop_link_args {
4297 struct vnodeop_desc *a_desc;
4298 vnode_t a_vp;
4299 vnode_t a_tdvp;
4300 struct componentname *a_cnp;
4301 vfs_context_t a_context;
4302};
4303#endif /* 0*/
4304errno_t
2d21ac55 4305VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
91447636
A
4306{
4307 int _err;
4308 struct vnop_link_args a;
316670eb 4309#if CONFIG_VFS_FUNNEL
91447636
A
4310 int thread_safe;
4311 int funnel_state = 0;
316670eb 4312#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4313
4314 /*
4315 * For file systems with non-native extended attributes,
4316 * disallow linking to an existing "._" Apple Double file.
4317 */
4318 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
2d21ac55 4319 const char *vname;
91447636
A
4320
4321 vname = vnode_getname(vp);
4322 if (vname != NULL) {
4323 _err = 0;
4324 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4325 _err = EPERM;
4326 }
4327 vnode_putname(vname);
4328 if (_err)
4329 return (_err);
4330 }
4331 }
4332 a.a_desc = &vnop_link_desc;
4333 a.a_vp = vp;
4334 a.a_tdvp = tdvp;
4335 a.a_cnp = cnp;
2d21ac55 4336 a.a_context = ctx;
91447636 4337
316670eb 4338#if CONFIG_VFS_FUNNEL
b0d623f7 4339 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
4340 if (!thread_safe) {
4341 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4342 return (_err);
4343 }
4344 }
316670eb 4345#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4346
91447636 4347 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
b0d623f7 4348
316670eb 4349#if CONFIG_VFS_FUNNEL
91447636
A
4350 if (!thread_safe) {
4351 unlock_fsnode(vp, &funnel_state);
4352 }
316670eb 4353#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
4354
4355 post_event_if_success(vp, _err, NOTE_LINK);
4356 post_event_if_success(tdvp, _err, NOTE_WRITE);
4357
91447636
A
4358 return (_err);
4359}
4360
91447636 4361errno_t
6d2010ae
A
4362vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4363 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4364 uint32_t flags, vfs_context_t ctx)
91447636 4365{
6d2010ae 4366 int _err;
b0d623f7
A
4367 vnode_t src_attr_vp = NULLVP;
4368 vnode_t dst_attr_vp = NULLVP;
316670eb
A
4369 struct nameidata *fromnd = NULL;
4370 struct nameidata *tond = NULL;
6d2010ae
A
4371 char smallname1[48];
4372 char smallname2[48];
4373 char *xfromname = NULL;
4374 char *xtoname = NULL;
4375 int batched;
91447636 4376
6d2010ae 4377 batched = vnode_compound_rename_available(fdvp);
91447636 4378
316670eb 4379#if CONFIG_VFS_FUNNEL
6d2010ae 4380 vnode_t fdvp_unsafe = (THREAD_SAFE_FS(fdvp) ? NULLVP : fdvp);
316670eb 4381#endif /* CONFIG_VFS_FUNNEL */
91447636 4382
6d2010ae
A
4383 if (!batched) {
4384 if (*fvpp == NULLVP)
4385 panic("Not batched, and no fvp?");
91447636 4386 }
6d2010ae 4387
91447636 4388 /*
b0d623f7
A
4389 * We need to preflight any potential AppleDouble file for the source file
4390 * before doing the rename operation, since we could potentially be doing
4391 * this operation on a network filesystem, and would end up duplicating
4392 * the work. Also, save the source and destination names. Skip it if the
4393 * source has a "._" prefix.
91447636 4394 */
b0d623f7 4395
91447636
A
4396 if (!NATIVE_XATTR(fdvp) &&
4397 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4398 size_t len;
b0d623f7 4399 int error;
91447636
A
4400
4401 /* Get source attribute file name. */
4402 len = fcnp->cn_namelen + 3;
4403 if (len > sizeof(smallname1)) {
4404 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
4405 } else {
4406 xfromname = &smallname1[0];
4407 }
2d21ac55 4408 strlcpy(xfromname, "._", min(sizeof smallname1, len));
91447636
A
4409 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
4410 xfromname[len-1] = '\0';
4411
4412 /* Get destination attribute file name. */
4413 len = tcnp->cn_namelen + 3;
4414 if (len > sizeof(smallname2)) {
4415 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
4416 } else {
4417 xtoname = &smallname2[0];
4418 }
2d21ac55 4419 strlcpy(xtoname, "._", min(sizeof smallname2, len));
91447636
A
4420 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
4421 xtoname[len-1] = '\0';
b0d623f7
A
4422
4423 /*
4424 * Look up source attribute file, keep reference on it if exists.
4425 * Note that we do the namei with the nameiop of RENAME, which is different than
4426 * in the rename syscall. It's OK if the source file does not exist, since this
4427 * is only for AppleDouble files.
4428 */
4429 if (xfromname != NULL) {
316670eb
A
4430 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
4431 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
6d2010ae 4432 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
316670eb
A
4433 fromnd->ni_dvp = fdvp;
4434 error = namei(fromnd);
b0d623f7
A
4435
4436 /*
4437 * If there was an error looking up source attribute file,
4438 * we'll behave as if it didn't exist.
4439 */
4440
4441 if (error == 0) {
316670eb 4442 if (fromnd->ni_vp) {
b0d623f7 4443 /* src_attr_vp indicates need to call vnode_put / nameidone later */
316670eb
A
4444 src_attr_vp = fromnd->ni_vp;
4445
4446 if (fromnd->ni_vp->v_type != VREG) {
b0d623f7 4447 src_attr_vp = NULLVP;
316670eb 4448 vnode_put(fromnd->ni_vp);
b0d623f7
A
4449 }
4450 }
4451 /*
4452 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4453 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4454 * have a vnode here, so we drop our namei buffer for the source attribute file
4455 */
4456 if (src_attr_vp == NULLVP) {
316670eb 4457 nameidone(fromnd);
b0d623f7
A
4458 }
4459 }
4460 }
91447636
A
4461 }
4462
6d2010ae
A
4463 if (batched) {
4464 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4465 if (_err != 0) {
4466 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4467 }
b0d623f7 4468
6d2010ae
A
4469 } else {
4470 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
91447636 4471 }
b0d623f7 4472
91447636 4473 if (_err == 0) {
6d2010ae 4474 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
91447636
A
4475 }
4476
4477 /*
2d21ac55 4478 * Rename any associated extended attribute file (._ AppleDouble file).
91447636
A
4479 */
4480 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
b0d623f7
A
4481 int error = 0;
4482
91447636 4483 /*
b0d623f7
A
4484 * Get destination attribute file vnode.
4485 * Note that tdvp already has an iocount reference. Make sure to check that we
4486 * get a valid vnode from namei.
91447636 4487 */
316670eb
A
4488 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4489 NDINIT(tond, RENAME, OP_RENAME,
b0d623f7
A
4490 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4491 CAST_USER_ADDR_T(xtoname), ctx);
316670eb
A
4492 tond->ni_dvp = tdvp;
4493 error = namei(tond);
b0d623f7
A
4494
4495 if (error)
4496 goto out;
4497
316670eb
A
4498 if (tond->ni_vp) {
4499 dst_attr_vp = tond->ni_vp;
91447636 4500 }
b0d623f7
A
4501
4502 if (src_attr_vp) {
316670eb
A
4503 const char *old_name = src_attr_vp->v_name;
4504 vnode_t old_parent = src_attr_vp->v_parent;
4505
6d2010ae 4506 if (batched) {
316670eb
A
4507 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4508 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
6d2010ae
A
4509 0, ctx);
4510 } else {
316670eb
A
4511 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4512 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
b0d623f7 4513 }
b0d623f7 4514
316670eb
A
4515 if (error == 0 && old_name == src_attr_vp->v_name &&
4516 old_parent == src_attr_vp->v_parent) {
4517 int update_flags = VNODE_UPDATE_NAME;
4518
4519 if (fdvp != tdvp)
4520 update_flags |= VNODE_UPDATE_PARENT;
4521
4522 vnode_update_identity(src_attr_vp, tdvp,
4523 tond->ni_cnd.cn_nameptr,
4524 tond->ni_cnd.cn_namelen,
4525 tond->ni_cnd.cn_hash,
4526 update_flags);
4527 }
4528
b0d623f7
A
4529 /* kevent notifications for moving resource files
4530 * _err is zero if we're here, so no need to notify directories, code
4531 * below will do that. only need to post the rename on the source and
4532 * possibly a delete on the dest
4533 */
4534 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4535 if (dst_attr_vp) {
4536 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4537 }
4538
4539 } else if (dst_attr_vp) {
91447636 4540 /*
b0d623f7
A
4541 * Just delete destination attribute file vnode if it exists, since
4542 * we didn't have a source attribute file.
91447636
A
4543 * Note that tdvp already has an iocount reference.
4544 */
b0d623f7
A
4545
4546 struct vnop_remove_args args;
4547
91447636
A
4548 args.a_desc = &vnop_remove_desc;
4549 args.a_dvp = tdvp;
b0d623f7 4550 args.a_vp = dst_attr_vp;
316670eb 4551 args.a_cnp = &tond->ni_cnd;
2d21ac55 4552 args.a_context = ctx;
91447636 4553
316670eb 4554#if CONFIG_VFS_FUNNEL
91447636 4555 if (fdvp_unsafe != NULLVP)
b0d623f7 4556 error = lock_fsnode(dst_attr_vp, NULL);
316670eb 4557#endif /* CONFIG_VFS_FUNNEL */
91447636 4558 if (error == 0) {
b0d623f7 4559 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
91447636 4560
316670eb 4561#if CONFIG_VFS_FUNNEL
91447636 4562 if (fdvp_unsafe != NULLVP)
b0d623f7 4563 unlock_fsnode(dst_attr_vp, NULL);
316670eb 4564#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4565
4566 if (error == 0)
b0d623f7 4567 vnode_setneedinactive(dst_attr_vp);
91447636 4568 }
2d21ac55 4569
b0d623f7
A
4570 /* kevent notification for deleting the destination's attribute file
4571 * if it existed. Only need to post the delete on the destination, since
4572 * the code below will handle the directories.
4573 */
4574 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
91447636 4575 }
91447636
A
4576 }
4577out:
b0d623f7
A
4578 if (src_attr_vp) {
4579 vnode_put(src_attr_vp);
316670eb 4580 nameidone(fromnd);
b0d623f7
A
4581 }
4582 if (dst_attr_vp) {
4583 vnode_put(dst_attr_vp);
316670eb
A
4584 nameidone(tond);
4585 }
4586 if (fromnd) {
4587 FREE(fromnd, M_TEMP);
4588 }
4589 if (tond) {
4590 FREE(tond, M_TEMP);
b0d623f7 4591 }
91447636
A
4592 if (xfromname && xfromname != &smallname1[0]) {
4593 FREE(xfromname, M_TEMP);
4594 }
4595 if (xtoname && xtoname != &smallname2[0]) {
4596 FREE(xtoname, M_TEMP);
4597 }
b0d623f7 4598
6d2010ae
A
4599 return _err;
4600}
4601
4602
4603#if 0
4604/*
4605 *#
4606 *#% rename fdvp U U U
4607 *#% rename fvp U U U
4608 *#% rename tdvp L U U
4609 *#% rename tvp X U U
4610 *#
4611 */
4612struct vnop_rename_args {
4613 struct vnodeop_desc *a_desc;
4614 vnode_t a_fdvp;
4615 vnode_t a_fvp;
4616 struct componentname *a_fcnp;
4617 vnode_t a_tdvp;
4618 vnode_t a_tvp;
4619 struct componentname *a_tcnp;
4620 vfs_context_t a_context;
4621};
4622#endif /* 0*/
4623errno_t
4624VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4625 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4626 vfs_context_t ctx)
4627{
4628 int _err = 0;
4629 int events;
4630 struct vnop_rename_args a;
316670eb 4631#if CONFIG_VFS_FUNNEL
6d2010ae
A
4632 int funnel_state = 0;
4633 vnode_t lock_first = NULL, lock_second = NULL;
4634 vnode_t fdvp_unsafe = NULLVP;
4635 vnode_t tdvp_unsafe = NULLVP;
316670eb 4636#endif /* CONFIG_VFS_FUNNEL */
6d2010ae
A
4637
4638 a.a_desc = &vnop_rename_desc;
4639 a.a_fdvp = fdvp;
4640 a.a_fvp = fvp;
4641 a.a_fcnp = fcnp;
4642 a.a_tdvp = tdvp;
4643 a.a_tvp = tvp;
4644 a.a_tcnp = tcnp;
4645 a.a_context = ctx;
4646
316670eb 4647#if CONFIG_VFS_FUNNEL
6d2010ae
A
4648 if (!THREAD_SAFE_FS(fdvp))
4649 fdvp_unsafe = fdvp;
4650 if (!THREAD_SAFE_FS(tdvp))
4651 tdvp_unsafe = tdvp;
4652
4653 if (fdvp_unsafe != NULLVP) {
4654 /*
4655 * Lock parents in vnode address order to avoid deadlocks
4656 * note that it's possible for the fdvp to be unsafe,
4657 * but the tdvp to be safe because tvp could be a directory
4658 * in the root of a filesystem... in that case, tdvp is the
4659 * in the filesystem that this root is mounted on
4660 */
4661 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
4662 lock_first = fdvp_unsafe;
4663 lock_second = NULL;
4664 } else if (fdvp_unsafe < tdvp_unsafe) {
4665 lock_first = fdvp_unsafe;
4666 lock_second = tdvp_unsafe;
4667 } else {
4668 lock_first = tdvp_unsafe;
4669 lock_second = fdvp_unsafe;
4670 }
4671 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
4672 return (_err);
4673
4674 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
4675 unlock_fsnode(lock_first, &funnel_state);
4676 return (_err);
4677 }
4678
4679 /*
4680 * Lock both children in vnode address order to avoid deadlocks
4681 */
4682 if (tvp == NULL || tvp == fvp) {
4683 lock_first = fvp;
4684 lock_second = NULL;
4685 } else if (fvp < tvp) {
4686 lock_first = fvp;
4687 lock_second = tvp;
4688 } else {
4689 lock_first = tvp;
4690 lock_second = fvp;
4691 }
4692 if ( (_err = lock_fsnode(lock_first, NULL)) )
4693 goto out1;
4694
4695 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
4696 unlock_fsnode(lock_first, NULL);
4697 goto out1;
4698 }
4699 }
316670eb 4700#endif /* CONFIG_VFS_FUNNEL */
6d2010ae
A
4701
4702 /* do the rename of the main file. */
4703 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4704
316670eb 4705#if CONFIG_VFS_FUNNEL
6d2010ae
A
4706 if (fdvp_unsafe != NULLVP) {
4707 if (lock_second != NULL)
4708 unlock_fsnode(lock_second, NULL);
4709 unlock_fsnode(lock_first, NULL);
4710 }
316670eb 4711#endif /* CONFIG_VFS_FUNNEL */
6d2010ae
A
4712
4713 if (_err == 0) {
4714 if (tvp && tvp != fvp)
4715 vnode_setneedinactive(tvp);
4716 }
4717
316670eb 4718#if CONFIG_VFS_FUNNEL
91447636
A
4719out1:
4720 if (fdvp_unsafe != NULLVP) {
4721 if (tdvp_unsafe != NULLVP)
4722 unlock_fsnode(tdvp_unsafe, NULL);
4723 unlock_fsnode(fdvp_unsafe, &funnel_state);
4724 }
316670eb 4725#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
4726
4727 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4728 if (0 == _err) {
4729 events = NOTE_WRITE;
4730 if (vnode_isdir(fvp)) {
4731 /* Link count on dir changed only if we are moving a dir and...
4732 * --Moved to new dir, not overwriting there
4733 * --Kept in same dir and DID overwrite
4734 */
4735 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4736 events |= NOTE_LINK;
4737 }
4738 }
4739
4740 lock_vnode_and_post(fdvp, events);
4741 if (fdvp != tdvp) {
4742 lock_vnode_and_post(tdvp, events);
4743 }
4744
4745 /* If you're replacing the target, post a deletion for it */
4746 if (tvp)
4747 {
4748 lock_vnode_and_post(tvp, NOTE_DELETE);
4749 }
4750
4751 lock_vnode_and_post(fvp, NOTE_RENAME);
4752 }
4753
91447636
A
4754 return (_err);
4755}
4756
6d2010ae
A
4757int
4758VNOP_COMPOUND_RENAME(
4759 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4760 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4761 uint32_t flags, vfs_context_t ctx)
4762{
4763 int _err = 0;
4764 int events;
4765 struct vnop_compound_rename_args a;
4766 int no_fvp, no_tvp;
4767
4768 no_fvp = (*fvpp) == NULLVP;
4769 no_tvp = (*tvpp) == NULLVP;
4770
4771 a.a_desc = &vnop_compound_rename_desc;
4772
4773 a.a_fdvp = fdvp;
4774 a.a_fvpp = fvpp;
4775 a.a_fcnp = fcnp;
4776 a.a_fvap = fvap;
4777
4778 a.a_tdvp = tdvp;
4779 a.a_tvpp = tvpp;
4780 a.a_tcnp = tcnp;
4781 a.a_tvap = tvap;
4782
4783 a.a_flags = flags;
4784 a.a_context = ctx;
4785 a.a_rename_authorizer = vn_authorize_rename;
4786 a.a_reserved = NULL;
4787
4788 /* do the rename of the main file. */
4789 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4790
4791 if (_err == 0) {
4792 if (*tvpp && *tvpp != *fvpp)
4793 vnode_setneedinactive(*tvpp);
4794 }
4795
4796 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4797 if (0 == _err && *fvpp != *tvpp) {
4798 if (!*fvpp) {
4799 panic("No fvpp after compound rename?");
4800 }
4801
4802 events = NOTE_WRITE;
4803 if (vnode_isdir(*fvpp)) {
4804 /* Link count on dir changed only if we are moving a dir and...
4805 * --Moved to new dir, not overwriting there
4806 * --Kept in same dir and DID overwrite
4807 */
4808 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4809 events |= NOTE_LINK;
4810 }
4811 }
4812
4813 lock_vnode_and_post(fdvp, events);
4814 if (fdvp != tdvp) {
4815 lock_vnode_and_post(tdvp, events);
4816 }
4817
4818 /* If you're replacing the target, post a deletion for it */
4819 if (*tvpp)
4820 {
4821 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4822 }
4823
4824 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4825 }
4826
4827 if (no_fvp) {
4828 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4829 }
4830 if (no_tvp && *tvpp != NULLVP) {
4831 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4832 }
4833
4834 if (_err && _err != EKEEPLOOKING) {
4835 if (*fvpp) {
4836 vnode_put(*fvpp);
4837 *fvpp = NULLVP;
4838 }
4839 if (*tvpp) {
4840 vnode_put(*tvpp);
4841 *tvpp = NULLVP;
4842 }
4843 }
4844
4845 return (_err);
4846}
4847
4848int
4849vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4850 struct vnode_attr *vap, vfs_context_t ctx)
4851{
4852 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4853 panic("Non-CREATE nameiop in vn_mkdir()?");
4854 }
4855
4856 if (vnode_compound_mkdir_available(dvp)) {
4857 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4858 } else {
4859 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4860 }
4861}
4862
91447636
A
4863 #if 0
4864/*
4865 *#
4866 *#% mkdir dvp L U U
4867 *#% mkdir vpp - L -
4868 *#
4869 */
4870struct vnop_mkdir_args {
4871 struct vnodeop_desc *a_desc;
4872 vnode_t a_dvp;
4873 vnode_t *a_vpp;
4874 struct componentname *a_cnp;
4875 struct vnode_attr *a_vap;
4876 vfs_context_t a_context;
4877};
4878#endif /* 0*/
4879errno_t
4880VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
2d21ac55 4881 struct vnode_attr *vap, vfs_context_t ctx)
91447636
A
4882{
4883 int _err;
4884 struct vnop_mkdir_args a;
316670eb 4885#if CONFIG_VFS_FUNNEL
91447636
A
4886 int thread_safe;
4887 int funnel_state = 0;
316670eb 4888#endif /* CONFIG_VFS_FUNNEL */
91447636
A
4889
4890 a.a_desc = &vnop_mkdir_desc;
4891 a.a_dvp = dvp;
4892 a.a_vpp = vpp;
4893 a.a_cnp = cnp;
4894 a.a_vap = vap;
2d21ac55 4895 a.a_context = ctx;
91447636 4896
316670eb 4897#if CONFIG_VFS_FUNNEL
b0d623f7 4898 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
4899 if (!thread_safe) {
4900 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
4901 return (_err);
4902 }
4903 }
316670eb 4904#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 4905
91447636
A
4906 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4907 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4908 /*
4909 * Remove stale Apple Double file (if any).
4910 */
b0d623f7 4911 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 4912 }
b0d623f7 4913
316670eb 4914#if CONFIG_VFS_FUNNEL
91447636
A
4915 if (!thread_safe) {
4916 unlock_fsnode(dvp, &funnel_state);
4917 }
316670eb 4918#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
4919
4920 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4921
91447636
A
4922 return (_err);
4923}
4924
6d2010ae
A
4925int
4926VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4927 struct vnode_attr *vap, vfs_context_t ctx)
4928{
4929 int _err;
4930 struct vnop_compound_mkdir_args a;
4931
4932 a.a_desc = &vnop_compound_mkdir_desc;
4933 a.a_dvp = dvp;
4934 a.a_vpp = vpp;
4935 a.a_cnp = &ndp->ni_cnd;
4936 a.a_vap = vap;
4937 a.a_flags = 0;
4938 a.a_context = ctx;
4939#if 0
4940 a.a_mkdir_authorizer = vn_authorize_mkdir;
4941#endif /* 0 */
4942 a.a_reserved = NULL;
4943
4944 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4945 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4946 /*
4947 * Remove stale Apple Double file (if any).
4948 */
4949 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4950 }
4951
4952 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4953
4954 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4955 if (*vpp && _err && _err != EKEEPLOOKING) {
4956 vnode_put(*vpp);
4957 *vpp = NULLVP;
4958 }
4959
4960 return (_err);
4961}
4962
4963int
4964vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4965{
4966 if (vnode_compound_rmdir_available(dvp)) {
4967 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4968 } else {
4969 if (*vpp == NULLVP) {
4970 panic("NULL vp, but not a compound VNOP?");
4971 }
4972 if (vap != NULL) {
4973 panic("Non-NULL vap, but not a compound VNOP?");
4974 }
4975 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4976 }
4977}
91447636
A
4978
4979#if 0
4980/*
4981 *#
4982 *#% rmdir dvp L U U
4983 *#% rmdir vp L U U
4984 *#
4985 */
4986struct vnop_rmdir_args {
4987 struct vnodeop_desc *a_desc;
4988 vnode_t a_dvp;
4989 vnode_t a_vp;
4990 struct componentname *a_cnp;
4991 vfs_context_t a_context;
4992};
4993
4994#endif /* 0*/
4995errno_t
2d21ac55 4996VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
91447636
A
4997{
4998 int _err;
4999 struct vnop_rmdir_args a;
316670eb 5000#if CONFIG_VFS_FUNNEL
91447636
A
5001 int thread_safe;
5002 int funnel_state = 0;
316670eb 5003#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5004
5005 a.a_desc = &vnop_rmdir_desc;
5006 a.a_dvp = dvp;
5007 a.a_vp = vp;
5008 a.a_cnp = cnp;
2d21ac55 5009 a.a_context = ctx;
91447636 5010
316670eb 5011#if CONFIG_VFS_FUNNEL
b0d623f7 5012 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
5013 if (!thread_safe) {
5014 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5015 return (_err);
5016 }
5017 }
316670eb 5018#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5019
91447636
A
5020 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
5021
5022 if (_err == 0) {
5023 vnode_setneedinactive(vp);
5024
5025 if ( !(NATIVE_XATTR(dvp)) ) {
5026 /*
2d21ac55 5027 * Remove any associated extended attribute file (._ AppleDouble file).
91447636 5028 */
b0d623f7 5029 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
91447636
A
5030 }
5031 }
b0d623f7 5032
316670eb 5033#if CONFIG_VFS_FUNNEL
91447636
A
5034 if (!thread_safe) {
5035 unlock_fsnode(vp, &funnel_state);
5036 }
316670eb 5037#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
5038
5039 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5040 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5041 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5042
91447636
A
5043 return (_err);
5044}
5045
6d2010ae
A
5046int
5047VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5048 struct vnode_attr *vap, vfs_context_t ctx)
5049{
5050 int _err;
5051 struct vnop_compound_rmdir_args a;
5052 int no_vp;
5053
5054 a.a_desc = &vnop_mkdir_desc;
5055 a.a_dvp = dvp;
5056 a.a_vpp = vpp;
5057 a.a_cnp = &ndp->ni_cnd;
5058 a.a_vap = vap;
5059 a.a_flags = 0;
5060 a.a_context = ctx;
5061 a.a_rmdir_authorizer = vn_authorize_rmdir;
5062 a.a_reserved = NULL;
5063
5064 no_vp = (*vpp == NULLVP);
5065
5066 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5067 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5068 /*
5069 * Remove stale Apple Double file (if any).
5070 */
5071 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5072 }
5073
5074 if (*vpp) {
5075 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5076 }
5077 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5078
5079 if (no_vp) {
5080 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5081
5082#if 0 /* Removing orphaned ._ files requires a vp.... */
5083 if (*vpp && _err && _err != EKEEPLOOKING) {
5084 vnode_put(*vpp);
5085 *vpp = NULLVP;
5086 }
5087#endif /* 0 */
5088 }
5089
5090 return (_err);
5091}
5092
91447636
A
5093/*
5094 * Remove a ._ AppleDouble file
5095 */
5096#define AD_STALE_SECS (180)
5097static void
b0d623f7
A
5098xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5099{
91447636
A
5100 vnode_t xvp;
5101 struct nameidata nd;
5102 char smallname[64];
5103 char *filename = NULL;
5104 size_t len;
5105
5106 if ((basename == NULL) || (basename[0] == '\0') ||
5107 (basename[0] == '.' && basename[1] == '_')) {
5108 return;
5109 }
5110 filename = &smallname[0];
5111 len = snprintf(filename, sizeof(smallname), "._%s", basename);
5112 if (len >= sizeof(smallname)) {
5113 len++; /* snprintf result doesn't include '\0' */
5114 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
5115 len = snprintf(filename, len, "._%s", basename);
5116 }
6d2010ae 5117 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
2d21ac55 5118 CAST_USER_ADDR_T(filename), ctx);
91447636
A
5119 nd.ni_dvp = dvp;
5120 if (namei(&nd) != 0)
5121 goto out2;
5122
5123 xvp = nd.ni_vp;
5124 nameidone(&nd);
5125 if (xvp->v_type != VREG)
5126 goto out1;
5127
5128 /*
5129 * When creating a new object and a "._" file already
5130 * exists, check to see if its a stale "._" file.
5131 *
5132 */
5133 if (!force) {
5134 struct vnode_attr va;
5135
5136 VATTR_INIT(&va);
5137 VATTR_WANTED(&va, va_data_size);
5138 VATTR_WANTED(&va, va_modify_time);
2d21ac55 5139 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
91447636
A
5140 VATTR_IS_SUPPORTED(&va, va_data_size) &&
5141 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
5142 va.va_data_size != 0) {
5143 struct timeval tv;
5144
5145 microtime(&tv);
5146 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
5147 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
5148 force = 1; /* must be stale */
5149 }
5150 }
5151 }
5152 if (force) {
91447636
A
5153 int error;
5154
6d2010ae 5155 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
91447636
A
5156 if (error == 0)
5157 vnode_setneedinactive(xvp);
b0d623f7
A
5158
5159 post_event_if_success(xvp, error, NOTE_DELETE);
5160 post_event_if_success(dvp, error, NOTE_WRITE);
91447636 5161 }
b0d623f7 5162
91447636 5163out1:
2d21ac55 5164 vnode_put(dvp);
91447636
A
5165 vnode_put(xvp);
5166out2:
5167 if (filename && filename != &smallname[0]) {
5168 FREE(filename, M_TEMP);
5169 }
5170}
5171
5172/*
5173 * Shadow uid/gid/mod to a ._ AppleDouble file
5174 */
5175static void
5176xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
b0d623f7
A
5177 vfs_context_t ctx)
5178{
91447636
A
5179 vnode_t xvp;
5180 struct nameidata nd;
5181 char smallname[64];
5182 char *filename = NULL;
5183 size_t len;
5184
5185 if ((dvp == NULLVP) ||
5186 (basename == NULL) || (basename[0] == '\0') ||
5187 (basename[0] == '.' && basename[1] == '_')) {
5188 return;
5189 }
5190 filename = &smallname[0];
5191 len = snprintf(filename, sizeof(smallname), "._%s", basename);
5192 if (len >= sizeof(smallname)) {
5193 len++; /* snprintf result doesn't include '\0' */
5194 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
5195 len = snprintf(filename, len, "._%s", basename);
5196 }
6d2010ae 5197 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
2d21ac55 5198 CAST_USER_ADDR_T(filename), ctx);
91447636
A
5199 nd.ni_dvp = dvp;
5200 if (namei(&nd) != 0)
5201 goto out2;
5202
5203 xvp = nd.ni_vp;
5204 nameidone(&nd);
5205
5206 if (xvp->v_type == VREG) {
316670eb 5207#if CONFIG_VFS_FUNNEL
b0d623f7 5208 int thread_safe = THREAD_SAFE_FS(dvp);
316670eb 5209#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5210 struct vnop_setattr_args a;
5211
5212 a.a_desc = &vnop_setattr_desc;
5213 a.a_vp = xvp;
5214 a.a_vap = vap;
2d21ac55 5215 a.a_context = ctx;
91447636 5216
316670eb 5217#if CONFIG_VFS_FUNNEL
91447636
A
5218 if (!thread_safe) {
5219 if ( (lock_fsnode(xvp, NULL)) )
5220 goto out1;
5221 }
316670eb 5222#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5223
91447636 5224 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
b0d623f7 5225
316670eb 5226#if CONFIG_VFS_FUNNEL
91447636
A
5227 if (!thread_safe) {
5228 unlock_fsnode(xvp, NULL);
5229 }
316670eb 5230#endif /* CONFIG_VFS_FUNNEL */
91447636 5231 }
b0d623f7
A
5232
5233
316670eb 5234#if CONFIG_VFS_FUNNEL
91447636 5235out1:
316670eb 5236#endif /* CONFIG_VFS_FUNNEL */
91447636 5237 vnode_put(xvp);
b0d623f7 5238
91447636
A
5239out2:
5240 if (filename && filename != &smallname[0]) {
5241 FREE(filename, M_TEMP);
5242 }
5243}
5244
5245 #if 0
5246/*
5247 *#
5248 *#% symlink dvp L U U
5249 *#% symlink vpp - U -
5250 *#
5251 */
5252struct vnop_symlink_args {
5253 struct vnodeop_desc *a_desc;
5254 vnode_t a_dvp;
5255 vnode_t *a_vpp;
5256 struct componentname *a_cnp;
5257 struct vnode_attr *a_vap;
5258 char *a_target;
5259 vfs_context_t a_context;
5260};
5261
5262#endif /* 0*/
5263errno_t
5264VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
2d21ac55 5265 struct vnode_attr *vap, char *target, vfs_context_t ctx)
91447636
A
5266{
5267 int _err;
5268 struct vnop_symlink_args a;
316670eb 5269#if CONFIG_VFS_FUNNEL
91447636
A
5270 int thread_safe;
5271 int funnel_state = 0;
316670eb 5272#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5273
5274 a.a_desc = &vnop_symlink_desc;
5275 a.a_dvp = dvp;
5276 a.a_vpp = vpp;
5277 a.a_cnp = cnp;
5278 a.a_vap = vap;
5279 a.a_target = target;
2d21ac55 5280 a.a_context = ctx;
91447636 5281
316670eb 5282#if CONFIG_VFS_FUNNEL
b0d623f7 5283 thread_safe = THREAD_SAFE_FS(dvp);
91447636
A
5284 if (!thread_safe) {
5285 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
5286 return (_err);
5287 }
5288 }
316670eb 5289#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5290
91447636
A
5291 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5292 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5293 /*
b0d623f7 5294 * Remove stale Apple Double file (if any). Posts its own knotes
91447636 5295 */
b0d623f7 5296 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
91447636 5297 }
b0d623f7 5298
316670eb 5299#if CONFIG_VFS_FUNNEL
b0d623f7
A
5300 if (!thread_safe) {
5301 unlock_fsnode(dvp, &funnel_state);
5302 }
316670eb 5303#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
5304
5305 post_event_if_success(dvp, _err, NOTE_WRITE);
5306
5307 return (_err);
91447636
A
5308}
5309
5310#if 0
5311/*
5312 *#
5313 *#% readdir vp L L L
5314 *#
5315 */
5316struct vnop_readdir_args {
5317 struct vnodeop_desc *a_desc;
5318 vnode_t a_vp;
5319 struct uio *a_uio;
5320 int a_flags;
5321 int *a_eofflag;
5322 int *a_numdirent;
5323 vfs_context_t a_context;
5324};
5325
5326#endif /* 0*/
5327errno_t
5328VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
2d21ac55 5329 int *numdirent, vfs_context_t ctx)
91447636
A
5330{
5331 int _err;
5332 struct vnop_readdir_args a;
316670eb 5333#if CONFIG_VFS_FUNNEL
91447636
A
5334 int thread_safe;
5335 int funnel_state = 0;
316670eb 5336#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5337
5338 a.a_desc = &vnop_readdir_desc;
5339 a.a_vp = vp;
5340 a.a_uio = uio;
5341 a.a_flags = flags;
5342 a.a_eofflag = eofflag;
5343 a.a_numdirent = numdirent;
2d21ac55 5344 a.a_context = ctx;
316670eb 5345#if CONFIG_VFS_FUNNEL
91447636
A
5346 thread_safe = THREAD_SAFE_FS(vp);
5347
5348 if (!thread_safe) {
5349 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5350 return (_err);
5351 }
5352 }
316670eb 5353#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5354
91447636 5355 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
b0d623f7 5356
316670eb 5357#if CONFIG_VFS_FUNNEL
91447636
A
5358 if (!thread_safe) {
5359 unlock_fsnode(vp, &funnel_state);
5360 }
316670eb 5361#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5362 return (_err);
5363}
5364
5365#if 0
5366/*
5367 *#
5368 *#% readdirattr vp L L L
5369 *#
5370 */
5371struct vnop_readdirattr_args {
5372 struct vnodeop_desc *a_desc;
5373 vnode_t a_vp;
5374 struct attrlist *a_alist;
5375 struct uio *a_uio;
b0d623f7
A
5376 uint32_t a_maxcount;
5377 uint32_t a_options;
5378 uint32_t *a_newstate;
91447636 5379 int *a_eofflag;
b0d623f7 5380 uint32_t *a_actualcount;
91447636
A
5381 vfs_context_t a_context;
5382};
5383
5384#endif /* 0*/
5385errno_t
b0d623f7
A
5386VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5387 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
91447636
A
5388{
5389 int _err;
5390 struct vnop_readdirattr_args a;
316670eb 5391#if CONFIG_VFS_FUNNEL
91447636
A
5392 int thread_safe;
5393 int funnel_state = 0;
316670eb 5394#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5395
5396 a.a_desc = &vnop_readdirattr_desc;
5397 a.a_vp = vp;
5398 a.a_alist = alist;
5399 a.a_uio = uio;
5400 a.a_maxcount = maxcount;
5401 a.a_options = options;
5402 a.a_newstate = newstate;
5403 a.a_eofflag = eofflag;
5404 a.a_actualcount = actualcount;
2d21ac55 5405 a.a_context = ctx;
91447636 5406
316670eb 5407#if CONFIG_VFS_FUNNEL
b0d623f7 5408 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5409 if (!thread_safe) {
5410 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5411 return (_err);
5412 }
5413 }
316670eb 5414#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5415
91447636 5416 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
b0d623f7 5417
316670eb 5418#if CONFIG_VFS_FUNNEL
91447636
A
5419 if (!thread_safe) {
5420 unlock_fsnode(vp, &funnel_state);
5421 }
316670eb 5422#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5423
91447636
A
5424 return (_err);
5425}
5426
5427#if 0
5428/*
5429 *#
5430 *#% readlink vp L L L
5431 *#
5432 */
5433struct vnop_readlink_args {
5434 struct vnodeop_desc *a_desc;
5435 vnode_t a_vp;
5436 struct uio *a_uio;
5437 vfs_context_t a_context;
5438};
5439#endif /* 0 */
5440
2d21ac55
A
5441/*
5442 * Returns: 0 Success
5443 * lock_fsnode:ENOENT No such file or directory [only for VFS
5444 * that is not thread safe & vnode is
5445 * currently being/has been terminated]
5446 * <vfs_readlink>:EINVAL
5447 * <vfs_readlink>:???
5448 *
5449 * Note: The return codes from the underlying VFS's readlink routine
5450 * can't be fully enumerated here, since third party VFS authors
5451 * may not limit their error returns to the ones documented here,
5452 * even though this may result in some programs functioning
5453 * incorrectly.
5454 *
5455 * The return codes documented above are those which may currently
5456 * be returned by HFS from hfs_vnop_readlink, not including
5457 * additional error code which may be propagated from underlying
5458 * routines.
5459 */
91447636 5460errno_t
2d21ac55 5461VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
91447636
A
5462{
5463 int _err;
5464 struct vnop_readlink_args a;
316670eb 5465#if CONFIG_VFS_FUNNEL
91447636
A
5466 int thread_safe;
5467 int funnel_state = 0;
316670eb 5468#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5469
5470 a.a_desc = &vnop_readlink_desc;
5471 a.a_vp = vp;
5472 a.a_uio = uio;
2d21ac55 5473 a.a_context = ctx;
91447636 5474
316670eb 5475#if CONFIG_VFS_FUNNEL
b0d623f7 5476 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5477 if (!thread_safe) {
5478 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5479 return (_err);
5480 }
5481 }
316670eb 5482#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5483
91447636 5484 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
b0d623f7 5485
316670eb 5486#if CONFIG_VFS_FUNNEL
91447636
A
5487 if (!thread_safe) {
5488 unlock_fsnode(vp, &funnel_state);
5489 }
316670eb 5490#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5491
91447636
A
5492 return (_err);
5493}
5494
5495#if 0
5496/*
5497 *#
5498 *#% inactive vp L U U
5499 *#
5500 */
5501struct vnop_inactive_args {
5502 struct vnodeop_desc *a_desc;
5503 vnode_t a_vp;
5504 vfs_context_t a_context;
5505};
5506#endif /* 0*/
5507errno_t
2d21ac55 5508VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
91447636
A
5509{
5510 int _err;
5511 struct vnop_inactive_args a;
316670eb 5512#if CONFIG_VFS_FUNNEL
91447636
A
5513 int thread_safe;
5514 int funnel_state = 0;
316670eb 5515#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5516
5517 a.a_desc = &vnop_inactive_desc;
5518 a.a_vp = vp;
2d21ac55 5519 a.a_context = ctx;
b0d623f7 5520
316670eb 5521#if CONFIG_VFS_FUNNEL
91447636 5522 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5523 if (!thread_safe) {
5524 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5525 return (_err);
5526 }
5527 }
316670eb 5528#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5529
91447636 5530 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
b0d623f7 5531
316670eb 5532#if CONFIG_VFS_FUNNEL
91447636
A
5533 if (!thread_safe) {
5534 unlock_fsnode(vp, &funnel_state);
5535 }
316670eb 5536#endif /* CONFIG_VFS_FUNNEL */
cf7d32b8
A
5537
5538#if NAMEDSTREAMS
b0d623f7
A
5539 /* For file systems that do not support namedstream natively, mark
5540 * the shadow stream file vnode to be recycled as soon as the last
5541 * reference goes away. To avoid re-entering reclaim code, do not
5542 * call recycle on terminating namedstream vnodes.
cf7d32b8
A
5543 */
5544 if (vnode_isnamedstream(vp) &&
b0d623f7
A
5545 (vp->v_parent != NULLVP) &&
5546 vnode_isshadow(vp) &&
5547 ((vp->v_lflag & VL_TERMINATE) == 0)) {
cf7d32b8
A
5548 vnode_recycle(vp);
5549 }
5550#endif
5551
91447636
A
5552 return (_err);
5553}
5554
5555
5556#if 0
5557/*
5558 *#
5559 *#% reclaim vp U U U
5560 *#
5561 */
5562struct vnop_reclaim_args {
5563 struct vnodeop_desc *a_desc;
5564 vnode_t a_vp;
5565 vfs_context_t a_context;
5566};
5567#endif /* 0*/
5568errno_t
2d21ac55 5569VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
91447636
A
5570{
5571 int _err;
5572 struct vnop_reclaim_args a;
316670eb 5573#if CONFIG_VFS_FUNNEL
91447636
A
5574 int thread_safe;
5575 int funnel_state = 0;
316670eb 5576#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5577
5578 a.a_desc = &vnop_reclaim_desc;
5579 a.a_vp = vp;
2d21ac55 5580 a.a_context = ctx;
91447636 5581
316670eb 5582#if CONFIG_VFS_FUNNEL
b0d623f7 5583 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5584 if (!thread_safe) {
5585 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5586 }
316670eb 5587#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5588
91447636 5589 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
b0d623f7 5590
316670eb 5591#if CONFIG_VFS_FUNNEL
91447636
A
5592 if (!thread_safe) {
5593 (void) thread_funnel_set(kernel_flock, funnel_state);
5594 }
316670eb 5595#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5596
91447636
A
5597 return (_err);
5598}
5599
5600
2d21ac55
A
5601/*
5602 * Returns: 0 Success
5603 * lock_fsnode:ENOENT No such file or directory [only for VFS
5604 * that is not thread safe & vnode is
5605 * currently being/has been terminated]
5606 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5607 */
91447636
A
5608#if 0
5609/*
5610 *#
5611 *#% pathconf vp L L L
5612 *#
5613 */
5614struct vnop_pathconf_args {
5615 struct vnodeop_desc *a_desc;
5616 vnode_t a_vp;
5617 int a_name;
b0d623f7 5618 int32_t *a_retval;
91447636
A
5619 vfs_context_t a_context;
5620};
5621#endif /* 0*/
5622errno_t
b0d623f7 5623VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
91447636
A
5624{
5625 int _err;
5626 struct vnop_pathconf_args a;
316670eb 5627#if CONFIG_VFS_FUNNEL
91447636
A
5628 int thread_safe;
5629 int funnel_state = 0;
316670eb 5630#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5631
5632 a.a_desc = &vnop_pathconf_desc;
5633 a.a_vp = vp;
5634 a.a_name = name;
5635 a.a_retval = retval;
2d21ac55 5636 a.a_context = ctx;
91447636 5637
316670eb 5638#if CONFIG_VFS_FUNNEL
b0d623f7 5639 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5640 if (!thread_safe) {
5641 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5642 return (_err);
5643 }
5644 }
316670eb 5645#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5646
91447636 5647 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
b0d623f7 5648
316670eb 5649#if CONFIG_VFS_FUNNEL
91447636
A
5650 if (!thread_safe) {
5651 unlock_fsnode(vp, &funnel_state);
5652 }
316670eb 5653#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5654
91447636
A
5655 return (_err);
5656}
5657
2d21ac55
A
5658/*
5659 * Returns: 0 Success
5660 * err_advlock:ENOTSUP
5661 * lf_advlock:???
5662 * <vnop_advlock_desc>:???
5663 *
5664 * Notes: VFS implementations of advisory locking using calls through
5665 * <vnop_advlock_desc> because lock enforcement does not occur
5666 * locally should try to limit themselves to the return codes
5667 * documented above for lf_advlock and err_advlock.
5668 */
91447636
A
5669#if 0
5670/*
5671 *#
5672 *#% advlock vp U U U
5673 *#
5674 */
5675struct vnop_advlock_args {
5676 struct vnodeop_desc *a_desc;
5677 vnode_t a_vp;
5678 caddr_t a_id;
5679 int a_op;
5680 struct flock *a_fl;
5681 int a_flags;
5682 vfs_context_t a_context;
5683};
5684#endif /* 0*/
5685errno_t
2d21ac55 5686VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx)
91447636
A
5687{
5688 int _err;
5689 struct vnop_advlock_args a;
316670eb 5690#if CONFIG_VFS_FUNNEL
91447636
A
5691 int thread_safe;
5692 int funnel_state = 0;
316670eb 5693#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5694
5695 a.a_desc = &vnop_advlock_desc;
5696 a.a_vp = vp;
5697 a.a_id = id;
5698 a.a_op = op;
5699 a.a_fl = fl;
5700 a.a_flags = flags;
2d21ac55 5701 a.a_context = ctx;
91447636 5702
316670eb 5703#if CONFIG_VFS_FUNNEL
b0d623f7 5704 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5705 if (!thread_safe) {
5706 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5707 }
316670eb 5708#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5709
91447636
A
5710 /* Disallow advisory locking on non-seekable vnodes */
5711 if (vnode_isfifo(vp)) {
5712 _err = err_advlock(&a);
5713 } else {
5714 if ((vp->v_flag & VLOCKLOCAL)) {
5715 /* Advisory locking done at this layer */
5716 _err = lf_advlock(&a);
5717 } else {
5718 /* Advisory locking done by underlying filesystem */
5719 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5720 }
5721 }
b0d623f7 5722
316670eb 5723#if CONFIG_VFS_FUNNEL
91447636
A
5724 if (!thread_safe) {
5725 (void) thread_funnel_set(kernel_flock, funnel_state);
5726 }
316670eb 5727#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5728
91447636
A
5729 return (_err);
5730}
5731
5732
5733
5734#if 0
5735/*
5736 *#
5737 *#% allocate vp L L L
5738 *#
5739 */
5740struct vnop_allocate_args {
5741 struct vnodeop_desc *a_desc;
5742 vnode_t a_vp;
5743 off_t a_length;
5744 u_int32_t a_flags;
5745 off_t *a_bytesallocated;
5746 off_t a_offset;
5747 vfs_context_t a_context;
5748};
5749
5750#endif /* 0*/
5751errno_t
2d21ac55 5752VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
91447636
A
5753{
5754 int _err;
5755 struct vnop_allocate_args a;
316670eb 5756#if CONFIG_VFS_FUNNEL
91447636
A
5757 int thread_safe;
5758 int funnel_state = 0;
316670eb 5759#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5760
5761 a.a_desc = &vnop_allocate_desc;
5762 a.a_vp = vp;
5763 a.a_length = length;
5764 a.a_flags = flags;
5765 a.a_bytesallocated = bytesallocated;
5766 a.a_offset = offset;
2d21ac55 5767 a.a_context = ctx;
91447636 5768
316670eb 5769#if CONFIG_VFS_FUNNEL
b0d623f7 5770 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5771 if (!thread_safe) {
5772 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5773 return (_err);
5774 }
5775 }
316670eb 5776#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5777
91447636 5778 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
b0d623f7
A
5779#if CONFIG_FSE
5780 if (_err == 0) {
5781 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5782 }
5783#endif
5784
316670eb 5785#if CONFIG_VFS_FUNNEL
91447636
A
5786 if (!thread_safe) {
5787 unlock_fsnode(vp, &funnel_state);
5788 }
316670eb 5789#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5790
91447636
A
5791 return (_err);
5792}
5793
5794#if 0
5795/*
5796 *#
5797 *#% pagein vp = = =
5798 *#
5799 */
5800struct vnop_pagein_args {
5801 struct vnodeop_desc *a_desc;
5802 vnode_t a_vp;
5803 upl_t a_pl;
b0d623f7 5804 upl_offset_t a_pl_offset;
91447636
A
5805 off_t a_f_offset;
5806 size_t a_size;
5807 int a_flags;
5808 vfs_context_t a_context;
5809};
5810#endif /* 0*/
5811errno_t
b0d623f7 5812VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
91447636
A
5813{
5814 int _err;
5815 struct vnop_pagein_args a;
316670eb 5816#if CONFIG_VFS_FUNNEL
91447636
A
5817 int thread_safe;
5818 int funnel_state = 0;
316670eb 5819#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5820
5821 a.a_desc = &vnop_pagein_desc;
5822 a.a_vp = vp;
5823 a.a_pl = pl;
5824 a.a_pl_offset = pl_offset;
5825 a.a_f_offset = f_offset;
5826 a.a_size = size;
5827 a.a_flags = flags;
2d21ac55 5828 a.a_context = ctx;
91447636 5829
316670eb 5830#if CONFIG_VFS_FUNNEL
b0d623f7 5831 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5832 if (!thread_safe) {
5833 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5834 }
316670eb 5835#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5836
91447636 5837 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
b0d623f7 5838
316670eb 5839#if CONFIG_VFS_FUNNEL
91447636
A
5840 if (!thread_safe) {
5841 (void) thread_funnel_set(kernel_flock, funnel_state);
5842 }
316670eb 5843#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5844
91447636
A
5845 return (_err);
5846}
5847
5848#if 0
5849/*
5850 *#
5851 *#% pageout vp = = =
5852 *#
5853 */
5854struct vnop_pageout_args {
5855 struct vnodeop_desc *a_desc;
5856 vnode_t a_vp;
5857 upl_t a_pl;
b0d623f7 5858 upl_offset_t a_pl_offset;
91447636
A
5859 off_t a_f_offset;
5860 size_t a_size;
5861 int a_flags;
5862 vfs_context_t a_context;
5863};
5864
5865#endif /* 0*/
5866errno_t
b0d623f7 5867VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
91447636
A
5868{
5869 int _err;
5870 struct vnop_pageout_args a;
316670eb 5871#if CONFIG_VFS_FUNNEL
91447636
A
5872 int thread_safe;
5873 int funnel_state = 0;
316670eb 5874#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5875
5876 a.a_desc = &vnop_pageout_desc;
5877 a.a_vp = vp;
5878 a.a_pl = pl;
5879 a.a_pl_offset = pl_offset;
5880 a.a_f_offset = f_offset;
5881 a.a_size = size;
5882 a.a_flags = flags;
2d21ac55 5883 a.a_context = ctx;
91447636 5884
316670eb 5885#if CONFIG_VFS_FUNNEL
b0d623f7 5886 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5887 if (!thread_safe) {
5888 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5889 }
316670eb 5890#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5891
91447636 5892 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
b0d623f7 5893
316670eb 5894#if CONFIG_VFS_FUNNEL
91447636
A
5895 if (!thread_safe) {
5896 (void) thread_funnel_set(kernel_flock, funnel_state);
5897 }
316670eb 5898#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
5899
5900 post_event_if_success(vp, _err, NOTE_WRITE);
5901
91447636
A
5902 return (_err);
5903}
5904
6d2010ae
A
5905int
5906vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5907{
5908 if (vnode_compound_remove_available(dvp)) {
5909 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5910 } else {
5911 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5912 }
5913}
5914
316670eb 5915#if CONFIG_SEARCHFS
91447636
A
5916
5917#if 0
5918/*
5919 *#
5920 *#% searchfs vp L L L
5921 *#
5922 */
5923struct vnop_searchfs_args {
5924 struct vnodeop_desc *a_desc;
5925 vnode_t a_vp;
5926 void *a_searchparams1;
5927 void *a_searchparams2;
5928 struct attrlist *a_searchattrs;
b0d623f7 5929 uint32_t a_maxmatches;
91447636
A
5930 struct timeval *a_timelimit;
5931 struct attrlist *a_returnattrs;
b0d623f7
A
5932 uint32_t *a_nummatches;
5933 uint32_t a_scriptcode;
5934 uint32_t a_options;
91447636
A
5935 struct uio *a_uio;
5936 struct searchstate *a_searchstate;
5937 vfs_context_t a_context;
5938};
5939
5940#endif /* 0*/
5941errno_t
b0d623f7 5942VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
91447636
A
5943{
5944 int _err;
5945 struct vnop_searchfs_args a;
316670eb 5946#if CONFIG_VFS_FUNNEL
91447636
A
5947 int thread_safe;
5948 int funnel_state = 0;
316670eb 5949#endif /* CONFIG_VFS_FUNNEL */
91447636
A
5950
5951 a.a_desc = &vnop_searchfs_desc;
5952 a.a_vp = vp;
5953 a.a_searchparams1 = searchparams1;
5954 a.a_searchparams2 = searchparams2;
5955 a.a_searchattrs = searchattrs;
5956 a.a_maxmatches = maxmatches;
5957 a.a_timelimit = timelimit;
5958 a.a_returnattrs = returnattrs;
5959 a.a_nummatches = nummatches;
5960 a.a_scriptcode = scriptcode;
5961 a.a_options = options;
5962 a.a_uio = uio;
5963 a.a_searchstate = searchstate;
2d21ac55 5964 a.a_context = ctx;
91447636 5965
316670eb 5966#if CONFIG_VFS_FUNNEL
b0d623f7 5967 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
5968 if (!thread_safe) {
5969 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5970 return (_err);
5971 }
5972 }
316670eb 5973#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5974
91447636 5975 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
b0d623f7 5976
316670eb 5977#if CONFIG_VFS_FUNNEL
91447636
A
5978 if (!thread_safe) {
5979 unlock_fsnode(vp, &funnel_state);
5980 }
316670eb 5981#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 5982
91447636
A
5983 return (_err);
5984}
316670eb 5985#endif /* CONFIG_SEARCHFS */
91447636
A
5986
5987#if 0
5988/*
5989 *#
5990 *#% copyfile fvp U U U
5991 *#% copyfile tdvp L U U
5992 *#% copyfile tvp X U U
5993 *#
5994 */
5995struct vnop_copyfile_args {
5996 struct vnodeop_desc *a_desc;
5997 vnode_t a_fvp;
5998 vnode_t a_tdvp;
5999 vnode_t a_tvp;
6000 struct componentname *a_tcnp;
6001 int a_mode;
6002 int a_flags;
6003 vfs_context_t a_context;
6004};
6005#endif /* 0*/
6006errno_t
6007VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
2d21ac55 6008 int mode, int flags, vfs_context_t ctx)
91447636
A
6009{
6010 int _err;
6011 struct vnop_copyfile_args a;
6012 a.a_desc = &vnop_copyfile_desc;
6013 a.a_fvp = fvp;
6014 a.a_tdvp = tdvp;
6015 a.a_tvp = tvp;
6016 a.a_tcnp = tcnp;
6017 a.a_mode = mode;
6018 a.a_flags = flags;
2d21ac55 6019 a.a_context = ctx;
91447636
A
6020 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
6021 return (_err);
6022}
6023
91447636 6024errno_t
2d21ac55 6025VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
91447636
A
6026{
6027 struct vnop_getxattr_args a;
6028 int error;
316670eb 6029#if CONFIG_VFS_FUNNEL
91447636
A
6030 int thread_safe;
6031 int funnel_state = 0;
316670eb 6032#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6033
6034 a.a_desc = &vnop_getxattr_desc;
6035 a.a_vp = vp;
6036 a.a_name = name;
6037 a.a_uio = uio;
6038 a.a_size = size;
6039 a.a_options = options;
2d21ac55 6040 a.a_context = ctx;
91447636 6041
316670eb 6042#if CONFIG_VFS_FUNNEL
91447636
A
6043 thread_safe = THREAD_SAFE_FS(vp);
6044 if (!thread_safe) {
6045 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6046 return (error);
6047 }
6048 }
316670eb 6049#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6050
91447636 6051 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
b0d623f7 6052
316670eb 6053#if CONFIG_VFS_FUNNEL
91447636
A
6054 if (!thread_safe) {
6055 unlock_fsnode(vp, &funnel_state);
6056 }
316670eb 6057#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6058
91447636
A
6059 return (error);
6060}
6061
6062errno_t
2d21ac55 6063VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
91447636
A
6064{
6065 struct vnop_setxattr_args a;
6066 int error;
316670eb 6067#if CONFIG_VFS_FUNNEL
91447636
A
6068 int thread_safe;
6069 int funnel_state = 0;
316670eb 6070#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6071
6072 a.a_desc = &vnop_setxattr_desc;
6073 a.a_vp = vp;
6074 a.a_name = name;
6075 a.a_uio = uio;
6076 a.a_options = options;
2d21ac55 6077 a.a_context = ctx;
91447636 6078
316670eb 6079#if CONFIG_VFS_FUNNEL
91447636
A
6080 thread_safe = THREAD_SAFE_FS(vp);
6081 if (!thread_safe) {
6082 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6083 return (error);
6084 }
6085 }
316670eb 6086#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6087
91447636 6088 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
b0d623f7 6089
316670eb 6090#if CONFIG_VFS_FUNNEL
91447636
A
6091 if (!thread_safe) {
6092 unlock_fsnode(vp, &funnel_state);
6093 }
316670eb 6094#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6095
2d21ac55
A
6096 if (error == 0)
6097 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
b0d623f7
A
6098
6099 post_event_if_success(vp, error, NOTE_ATTRIB);
6100
91447636
A
6101 return (error);
6102}
6103
6104errno_t
2d21ac55 6105VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
91447636
A
6106{
6107 struct vnop_removexattr_args a;
6108 int error;
316670eb 6109#if CONFIG_VFS_FUNNEL
91447636
A
6110 int thread_safe;
6111 int funnel_state = 0;
316670eb 6112#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6113
6114 a.a_desc = &vnop_removexattr_desc;
6115 a.a_vp = vp;
6116 a.a_name = name;
6117 a.a_options = options;
2d21ac55 6118 a.a_context = ctx;
91447636 6119
316670eb 6120#if CONFIG_VFS_FUNNEL
91447636
A
6121 thread_safe = THREAD_SAFE_FS(vp);
6122 if (!thread_safe) {
6123 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6124 return (error);
6125 }
6126 }
316670eb 6127#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6128
91447636 6129 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
b0d623f7 6130
316670eb 6131#if CONFIG_VFS_FUNNEL
91447636
A
6132 if (!thread_safe) {
6133 unlock_fsnode(vp, &funnel_state);
6134 }
316670eb 6135#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
6136
6137 post_event_if_success(vp, error, NOTE_ATTRIB);
6138
91447636
A
6139 return (error);
6140}
6141
6142errno_t
2d21ac55 6143VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
91447636
A
6144{
6145 struct vnop_listxattr_args a;
6146 int error;
316670eb 6147#if CONFIG_VFS_FUNNEL
91447636
A
6148 int thread_safe;
6149 int funnel_state = 0;
316670eb 6150#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6151
6152 a.a_desc = &vnop_listxattr_desc;
6153 a.a_vp = vp;
6154 a.a_uio = uio;
6155 a.a_size = size;
6156 a.a_options = options;
2d21ac55 6157 a.a_context = ctx;
91447636 6158
316670eb 6159#if CONFIG_VFS_FUNNEL
91447636
A
6160 thread_safe = THREAD_SAFE_FS(vp);
6161 if (!thread_safe) {
6162 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6163 return (error);
6164 }
6165 }
316670eb 6166#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6167
91447636 6168 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
b0d623f7 6169
316670eb 6170#if CONFIG_VFS_FUNNEL
91447636
A
6171 if (!thread_safe) {
6172 unlock_fsnode(vp, &funnel_state);
6173 }
316670eb 6174#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6175
91447636
A
6176 return (error);
6177}
6178
6179
6180#if 0
6181/*
6182 *#
6183 *#% blktooff vp = = =
6184 *#
6185 */
6186struct vnop_blktooff_args {
6187 struct vnodeop_desc *a_desc;
6188 vnode_t a_vp;
6189 daddr64_t a_lblkno;
6190 off_t *a_offset;
6191};
6192#endif /* 0*/
6193errno_t
6194VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6195{
6196 int _err;
6197 struct vnop_blktooff_args a;
316670eb 6198#if CONFIG_VFS_FUNNEL
91447636
A
6199 int thread_safe;
6200 int funnel_state = 0;
316670eb 6201#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6202
6203 a.a_desc = &vnop_blktooff_desc;
6204 a.a_vp = vp;
6205 a.a_lblkno = lblkno;
6206 a.a_offset = offset;
91447636 6207
316670eb 6208#if CONFIG_VFS_FUNNEL
b0d623f7 6209 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
6210 if (!thread_safe) {
6211 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6212 }
316670eb 6213#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6214
91447636 6215 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
b0d623f7 6216
316670eb 6217#if CONFIG_VFS_FUNNEL
91447636
A
6218 if (!thread_safe) {
6219 (void) thread_funnel_set(kernel_flock, funnel_state);
6220 }
316670eb 6221#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6222
91447636
A
6223 return (_err);
6224}
6225
6226#if 0
6227/*
6228 *#
6229 *#% offtoblk vp = = =
6230 *#
6231 */
6232struct vnop_offtoblk_args {
6233 struct vnodeop_desc *a_desc;
6234 vnode_t a_vp;
6235 off_t a_offset;
6236 daddr64_t *a_lblkno;
6237};
6238#endif /* 0*/
6239errno_t
6240VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6241{
6242 int _err;
6243 struct vnop_offtoblk_args a;
316670eb 6244#if CONFIG_VFS_FUNNEL
91447636
A
6245 int thread_safe;
6246 int funnel_state = 0;
316670eb 6247#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6248
6249 a.a_desc = &vnop_offtoblk_desc;
6250 a.a_vp = vp;
6251 a.a_offset = offset;
6252 a.a_lblkno = lblkno;
91447636 6253
316670eb 6254#if CONFIG_VFS_FUNNEL
b0d623f7 6255 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
6256 if (!thread_safe) {
6257 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6258 }
316670eb 6259#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6260
91447636 6261 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
b0d623f7 6262
316670eb 6263#if CONFIG_VFS_FUNNEL
91447636
A
6264 if (!thread_safe) {
6265 (void) thread_funnel_set(kernel_flock, funnel_state);
6266 }
316670eb 6267#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6268
91447636
A
6269 return (_err);
6270}
6271
6272#if 0
6273/*
6274 *#
6275 *#% blockmap vp L L L
6276 *#
6277 */
6278struct vnop_blockmap_args {
6279 struct vnodeop_desc *a_desc;
6280 vnode_t a_vp;
6281 off_t a_foffset;
6282 size_t a_size;
6283 daddr64_t *a_bpn;
6284 size_t *a_run;
6285 void *a_poff;
6286 int a_flags;
6287 vfs_context_t a_context;
6288};
6289#endif /* 0*/
6290errno_t
2d21ac55 6291VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
91447636
A
6292{
6293 int _err;
6294 struct vnop_blockmap_args a;
316670eb
A
6295 size_t localrun = 0;
6296#if CONFIG_VFS_FUNNEL
91447636
A
6297 int thread_safe;
6298 int funnel_state = 0;
316670eb 6299#endif /* CONFIG_VFS_FUNNEL */
91447636 6300
2d21ac55
A
6301 if (ctx == NULL) {
6302 ctx = vfs_context_current();
91447636
A
6303 }
6304 a.a_desc = &vnop_blockmap_desc;
6305 a.a_vp = vp;
6306 a.a_foffset = foffset;
6307 a.a_size = size;
6308 a.a_bpn = bpn;
316670eb 6309 a.a_run = &localrun;
91447636
A
6310 a.a_poff = poff;
6311 a.a_flags = flags;
2d21ac55 6312 a.a_context = ctx;
91447636 6313
316670eb 6314#if CONFIG_VFS_FUNNEL
b0d623f7 6315 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
6316 if (!thread_safe) {
6317 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6318 }
316670eb 6319#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6320
91447636 6321 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
b0d623f7 6322
316670eb 6323#if CONFIG_VFS_FUNNEL
91447636
A
6324 if (!thread_safe) {
6325 (void) thread_funnel_set(kernel_flock, funnel_state);
6326 }
316670eb
A
6327#endif /* CONFIG_VFS_FUNNEL */
6328
6329 /*
6330 * We used a local variable to request information from the underlying
6331 * filesystem about the length of the I/O run in question. If
6332 * we get malformed output from the filesystem, we cap it to the length
6333 * requested, at most. Update 'run' on the way out.
6334 */
6335 if (_err == 0) {
6336 if (localrun > size) {
6337 localrun = size;
6338 }
6339
6340 if (run) {
6341 *run = localrun;
6342 }
6343 }
b0d623f7 6344
91447636
A
6345 return (_err);
6346}
6347
6348#if 0
6349struct vnop_strategy_args {
6350 struct vnodeop_desc *a_desc;
6351 struct buf *a_bp;
6352};
6353
6354#endif /* 0*/
6355errno_t
6356VNOP_STRATEGY(struct buf *bp)
6357{
6358 int _err;
6359 struct vnop_strategy_args a;
6360 a.a_desc = &vnop_strategy_desc;
6361 a.a_bp = bp;
6362 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6363 return (_err);
6364}
6365
6366#if 0
6367struct vnop_bwrite_args {
6368 struct vnodeop_desc *a_desc;
6369 buf_t a_bp;
6370};
6371#endif /* 0*/
6372errno_t
6373VNOP_BWRITE(struct buf *bp)
6374{
6375 int _err;
6376 struct vnop_bwrite_args a;
6377 a.a_desc = &vnop_bwrite_desc;
6378 a.a_bp = bp;
6379 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6380 return (_err);
6381}
6382
6383#if 0
6384struct vnop_kqfilt_add_args {
6385 struct vnodeop_desc *a_desc;
6386 struct vnode *a_vp;
6387 struct knote *a_kn;
6388 vfs_context_t a_context;
6389};
6390#endif
6391errno_t
2d21ac55 6392VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
91447636
A
6393{
6394 int _err;
6395 struct vnop_kqfilt_add_args a;
316670eb 6396#if CONFIG_VFS_FUNNEL
91447636
A
6397 int thread_safe;
6398 int funnel_state = 0;
316670eb 6399#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6400
6401 a.a_desc = VDESC(vnop_kqfilt_add);
6402 a.a_vp = vp;
6403 a.a_kn = kn;
2d21ac55 6404 a.a_context = ctx;
91447636 6405
316670eb 6406#if CONFIG_VFS_FUNNEL
b0d623f7 6407 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
6408 if (!thread_safe) {
6409 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6410 return (_err);
6411 }
6412 }
316670eb 6413#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6414
91447636 6415 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
b0d623f7 6416
316670eb 6417#if CONFIG_VFS_FUNNEL
91447636
A
6418 if (!thread_safe) {
6419 unlock_fsnode(vp, &funnel_state);
6420 }
316670eb 6421#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6422
91447636
A
6423 return(_err);
6424}
6425
6426#if 0
6427struct vnop_kqfilt_remove_args {
6428 struct vnodeop_desc *a_desc;
6429 struct vnode *a_vp;
6430 uintptr_t a_ident;
6431 vfs_context_t a_context;
6432};
6433#endif
6434errno_t
2d21ac55 6435VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
91447636
A
6436{
6437 int _err;
6438 struct vnop_kqfilt_remove_args a;
316670eb 6439#if CONFIG_VFS_FUNNEL
91447636
A
6440 int thread_safe;
6441 int funnel_state = 0;
316670eb 6442#endif /* CONFIG_VFS_FUNNEL */
91447636
A
6443
6444 a.a_desc = VDESC(vnop_kqfilt_remove);
6445 a.a_vp = vp;
6446 a.a_ident = ident;
2d21ac55 6447 a.a_context = ctx;
91447636 6448
316670eb 6449#if CONFIG_VFS_FUNNEL
b0d623f7 6450 thread_safe = THREAD_SAFE_FS(vp);
91447636
A
6451 if (!thread_safe) {
6452 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6453 return (_err);
6454 }
6455 }
316670eb 6456#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6457
91447636 6458 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
b0d623f7 6459
316670eb 6460#if CONFIG_VFS_FUNNEL
91447636
A
6461 if (!thread_safe) {
6462 unlock_fsnode(vp, &funnel_state);
6463 }
316670eb 6464#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
6465
6466 return(_err);
6467}
6468
6469errno_t
6470VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6471{
6472 int _err;
6473 struct vnop_monitor_args a;
316670eb 6474#if CONFIG_VFS_FUNNEL
b0d623f7
A
6475 int thread_safe;
6476 int funnel_state = 0;
316670eb 6477#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
6478
6479 a.a_desc = VDESC(vnop_monitor);
6480 a.a_vp = vp;
6481 a.a_events = events;
6482 a.a_flags = flags;
6483 a.a_handle = handle;
6484 a.a_context = ctx;
6485
316670eb 6486#if CONFIG_VFS_FUNNEL
b0d623f7
A
6487 thread_safe = THREAD_SAFE_FS(vp);
6488 if (!thread_safe) {
6489 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6490 return (_err);
6491 }
6492 }
316670eb 6493#endif /* CONFIG_VFS_FUNNEL */
b0d623f7
A
6494
6495 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6496
316670eb 6497#if CONFIG_VFS_FUNNEL
b0d623f7
A
6498 if (!thread_safe) {
6499 unlock_fsnode(vp, &funnel_state);
6500 }
316670eb 6501#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6502
91447636
A
6503 return(_err);
6504}
6505
2d21ac55
A
6506#if 0
6507struct vnop_setlabel_args {
6508 struct vnodeop_desc *a_desc;
6509 struct vnode *a_vp;
6510 struct label *a_vl;
6511 vfs_context_t a_context;
6512};
6513#endif
6514errno_t
6515VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6516{
6517 int _err;
6518 struct vnop_setlabel_args a;
316670eb 6519#if CONFIG_VFS_FUNNEL
2d21ac55
A
6520 int thread_safe;
6521 int funnel_state = 0;
316670eb 6522#endif /* CONFIG_VFS_FUNNEL */
2d21ac55
A
6523
6524 a.a_desc = VDESC(vnop_setlabel);
6525 a.a_vp = vp;
6526 a.a_vl = label;
6527 a.a_context = ctx;
2d21ac55 6528
316670eb 6529#if CONFIG_VFS_FUNNEL
b0d623f7 6530 thread_safe = THREAD_SAFE_FS(vp);
2d21ac55
A
6531 if (!thread_safe) {
6532 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6533 return (_err);
6534 }
6535 }
316670eb 6536#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6537
2d21ac55 6538 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
b0d623f7 6539
316670eb 6540#if CONFIG_VFS_FUNNEL
2d21ac55
A
6541 if (!thread_safe) {
6542 unlock_fsnode(vp, &funnel_state);
6543 }
316670eb 6544#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6545
2d21ac55
A
6546 return(_err);
6547}
6548
6549
6550#if NAMEDSTREAMS
6551/*
6552 * Get a named streamed
6553 */
6554errno_t
6555VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6556{
6557 struct vnop_getnamedstream_args a;
6558
316670eb 6559#if CONFIG_VFS_FUNNEL
2d21ac55
A
6560 if (!THREAD_SAFE_FS(vp))
6561 return (ENOTSUP);
316670eb 6562#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6563
2d21ac55
A
6564 a.a_desc = &vnop_getnamedstream_desc;
6565 a.a_vp = vp;
6566 a.a_svpp = svpp;
6567 a.a_name = name;
6568 a.a_operation = operation;
6569 a.a_flags = flags;
6570 a.a_context = ctx;
6571
6572 return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6573}
6574
6575/*
6576 * Create a named streamed
6577 */
6578errno_t
6579VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6580{
6581 struct vnop_makenamedstream_args a;
6582
316670eb 6583#if CONFIG_VFS_FUNNEL
2d21ac55
A
6584 if (!THREAD_SAFE_FS(vp))
6585 return (ENOTSUP);
316670eb 6586#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6587
2d21ac55
A
6588 a.a_desc = &vnop_makenamedstream_desc;
6589 a.a_vp = vp;
6590 a.a_svpp = svpp;
6591 a.a_name = name;
6592 a.a_flags = flags;
6593 a.a_context = ctx;
6594
6595 return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6596}
6597
6598
6599/*
6600 * Remove a named streamed
6601 */
6602errno_t
6603VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6604{
6605 struct vnop_removenamedstream_args a;
6606
316670eb 6607#if CONFIG_VFS_FUNNEL
2d21ac55
A
6608 if (!THREAD_SAFE_FS(vp))
6609 return (ENOTSUP);
316670eb 6610#endif /* CONFIG_VFS_FUNNEL */
b0d623f7 6611
2d21ac55
A
6612 a.a_desc = &vnop_removenamedstream_desc;
6613 a.a_vp = vp;
6614 a.a_svp = svp;
6615 a.a_name = name;
6616 a.a_flags = flags;
6617 a.a_context = ctx;
6618
6619 return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6620}
6621#endif