]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/kern_memorystatus.h>
109 #include <miscfs/fifofs/fifo.h>
110
111 #include <string.h>
112 #include <machine/spl.h>
113
114
115 #include <kern/assert.h>
116
117 #include <miscfs/specfs/specdev.h>
118
119 #include <mach/mach_types.h>
120 #include <mach/memory_object_types.h>
121 #include <mach/memory_object_control.h>
122
123 #include <kern/kalloc.h> /* kalloc()/kfree() */
124 #include <kern/clock.h> /* delay_for_interval() */
125 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
126
127
128 #ifdef JOE_DEBUG
129 #include <libkern/OSDebug.h>
130 #endif
131
132 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
133
134 #if CONFIG_MACF
135 #include <security/mac_framework.h>
136 #endif
137
138 extern lck_grp_t *vnode_lck_grp;
139 extern lck_attr_t *vnode_lck_attr;
140
141 #if CONFIG_TRIGGERS
142 extern lck_grp_t *trigger_vnode_lck_grp;
143 extern lck_attr_t *trigger_vnode_lck_attr;
144 #endif
145
146 extern lck_mtx_t * mnt_list_mtx_lock;
147
148 enum vtype iftovt_tab[16] = {
149 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
150 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
151 };
152 int vttoif_tab[9] = {
153 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
154 S_IFSOCK, S_IFIFO, S_IFMT,
155 };
156
157
158 /* XXX These should be in a BSD accessible Mach header, but aren't. */
159 extern void memory_object_mark_used(
160 memory_object_control_t control);
161
162 extern void memory_object_mark_unused(
163 memory_object_control_t control,
164 boolean_t rage);
165
166
167 /* XXX next protptype should be from <nfs/nfs.h> */
168 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
169
170 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
171 __private_extern__ void qsort(
172 void * array,
173 size_t nmembers,
174 size_t member_size,
175 int (*)(const void *, const void *));
176
177 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
178 __private_extern__ void vntblinit(void);
179 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
180 unsigned int val2);
181 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
182
183 extern int system_inshutdown;
184
185 static void vnode_list_add(vnode_t);
186 static void vnode_list_remove(vnode_t);
187 static void vnode_list_remove_locked(vnode_t);
188
189 static errno_t vnode_drain(vnode_t);
190 static void vgone(vnode_t, int flags);
191 static void vclean(vnode_t vp, int flag);
192 static void vnode_reclaim_internal(vnode_t, int, int, int);
193
194 static void vnode_dropiocount (vnode_t);
195
196 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
197 static int vnode_reload(vnode_t);
198 static int vnode_isinuse_locked(vnode_t, int, int);
199
200 static void insmntque(vnode_t vp, mount_t mp);
201 static int mount_getvfscnt(void);
202 static int mount_fillfsids(fsid_t *, int );
203 static void vnode_iterate_setup(mount_t);
204 int vnode_umount_preflight(mount_t, vnode_t, int);
205 static int vnode_iterate_prepare(mount_t);
206 static int vnode_iterate_reloadq(mount_t);
207 static void vnode_iterate_clear(mount_t);
208 static mount_t vfs_getvfs_locked(fsid_t *);
209 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
210 struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
211 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
212
213 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
214
215 #ifdef JOE_DEBUG
216 static void record_vp(vnode_t vp, int count);
217 #endif
218
219 #if CONFIG_TRIGGERS
220 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
221 static void vnode_resolver_detach(vnode_t);
222 #endif
223
224 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
225 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
226
227 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
228 struct timeval rage_tv;
229 int rage_limit = 0;
230 int ragevnodes = 0;
231
232 #define RAGE_LIMIT_MIN 100
233 #define RAGE_TIME_LIMIT 5
234
235 struct mntlist mountlist; /* mounted filesystem list */
236 static int nummounts = 0;
237
238 #if DIAGNOSTIC
239 #define VLISTCHECK(fun, vp, list) \
240 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
241 panic("%s: %s vnode not on %slist", (fun), (list), (list));
242 #else
243 #define VLISTCHECK(fun, vp, list)
244 #endif /* DIAGNOSTIC */
245
246 #define VLISTNONE(vp) \
247 do { \
248 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
249 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
250 } while(0)
251
252 #define VONLIST(vp) \
253 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
254
255 /* remove a vnode from free vnode list */
256 #define VREMFREE(fun, vp) \
257 do { \
258 VLISTCHECK((fun), (vp), "free"); \
259 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
260 VLISTNONE((vp)); \
261 freevnodes--; \
262 } while(0)
263
264
265
266 /* remove a vnode from dead vnode list */
267 #define VREMDEAD(fun, vp) \
268 do { \
269 VLISTCHECK((fun), (vp), "dead"); \
270 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
271 VLISTNONE((vp)); \
272 vp->v_listflag &= ~VLIST_DEAD; \
273 deadvnodes--; \
274 } while(0)
275
276
277 /* remove a vnode from rage vnode list */
278 #define VREMRAGE(fun, vp) \
279 do { \
280 if ( !(vp->v_listflag & VLIST_RAGE)) \
281 panic("VREMRAGE: vp not on rage list"); \
282 VLISTCHECK((fun), (vp), "rage"); \
283 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
284 VLISTNONE((vp)); \
285 vp->v_listflag &= ~VLIST_RAGE; \
286 ragevnodes--; \
287 } while(0)
288
289
290 /*
291 * vnodetarget hasn't been used in a long time, but
292 * it was exported for some reason... I'm leaving in
293 * place for now... it should be deprecated out of the
294 * exports and removed eventually.
295 */
296 u_int32_t vnodetarget; /* target for vnreclaim() */
297 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
298
299 /*
300 * We need quite a few vnodes on the free list to sustain the
301 * rapid stat() the compilation process does, and still benefit from the name
302 * cache. Having too few vnodes on the free list causes serious disk
303 * thrashing as we cycle through them.
304 */
305 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
306
307 /*
308 * Initialize the vnode management data structures.
309 */
310 __private_extern__ void
311 vntblinit(void)
312 {
313 TAILQ_INIT(&vnode_free_list);
314 TAILQ_INIT(&vnode_rage_list);
315 TAILQ_INIT(&vnode_dead_list);
316 TAILQ_INIT(&mountlist);
317
318 if (!vnodetarget)
319 vnodetarget = VNODE_FREE_TARGET;
320
321 microuptime(&rage_tv);
322 rage_limit = desiredvnodes / 100;
323
324 if (rage_limit < RAGE_LIMIT_MIN)
325 rage_limit = RAGE_LIMIT_MIN;
326
327 /*
328 * Scale the vm_object_cache to accomodate the vnodes
329 * we want to cache
330 */
331 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
332 }
333
334 /* Reset the VM Object Cache with the values passed in */
335 __private_extern__ kern_return_t
336 reset_vmobjectcache(unsigned int val1, unsigned int val2)
337 {
338 vm_size_t oval = val1 - VNODE_FREE_MIN;
339 vm_size_t nval;
340
341 if (val1 == val2) {
342 return KERN_SUCCESS;
343 }
344
345 if(val2 < VNODE_FREE_MIN)
346 nval = 0;
347 else
348 nval = val2 - VNODE_FREE_MIN;
349
350 return(adjust_vm_object_cache(oval, nval));
351 }
352
353
354 /* the timeout is in 10 msecs */
355 int
356 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
357 int error = 0;
358 struct timespec ts;
359
360 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
361
362 if (vp->v_numoutput > output_target) {
363
364 slpflag |= PDROP;
365
366 vnode_lock_spin(vp);
367
368 while ((vp->v_numoutput > output_target) && error == 0) {
369 if (output_target)
370 vp->v_flag |= VTHROTTLED;
371 else
372 vp->v_flag |= VBWAIT;
373
374 ts.tv_sec = (slptimeout/100);
375 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
376 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
377
378 vnode_lock_spin(vp);
379 }
380 vnode_unlock(vp);
381 }
382 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
383
384 return error;
385 }
386
387
388 void
389 vnode_startwrite(vnode_t vp) {
390
391 OSAddAtomic(1, &vp->v_numoutput);
392 }
393
394
395 void
396 vnode_writedone(vnode_t vp)
397 {
398 if (vp) {
399 int need_wakeup = 0;
400
401 OSAddAtomic(-1, &vp->v_numoutput);
402
403 vnode_lock_spin(vp);
404
405 if (vp->v_numoutput < 0)
406 panic("vnode_writedone: numoutput < 0");
407
408 if ((vp->v_flag & VTHROTTLED)) {
409 vp->v_flag &= ~VTHROTTLED;
410 need_wakeup = 1;
411 }
412 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
413 vp->v_flag &= ~VBWAIT;
414 need_wakeup = 1;
415 }
416 vnode_unlock(vp);
417
418 if (need_wakeup)
419 wakeup((caddr_t)&vp->v_numoutput);
420 }
421 }
422
423
424
425 int
426 vnode_hasdirtyblks(vnode_t vp)
427 {
428 struct cl_writebehind *wbp;
429
430 /*
431 * Not taking the buf_mtxp as there is little
432 * point doing it. Even if the lock is taken the
433 * state can change right after that. If their
434 * needs to be a synchronization, it must be driven
435 * by the caller
436 */
437 if (vp->v_dirtyblkhd.lh_first)
438 return (1);
439
440 if (!UBCINFOEXISTS(vp))
441 return (0);
442
443 wbp = vp->v_ubcinfo->cl_wbehind;
444
445 if (wbp && (wbp->cl_number || wbp->cl_scmap))
446 return (1);
447
448 return (0);
449 }
450
451 int
452 vnode_hascleanblks(vnode_t vp)
453 {
454 /*
455 * Not taking the buf_mtxp as there is little
456 * point doing it. Even if the lock is taken the
457 * state can change right after that. If their
458 * needs to be a synchronization, it must be driven
459 * by the caller
460 */
461 if (vp->v_cleanblkhd.lh_first)
462 return (1);
463 return (0);
464 }
465
466 void
467 vnode_iterate_setup(mount_t mp)
468 {
469 while (mp->mnt_lflag & MNT_LITER) {
470 mp->mnt_lflag |= MNT_LITERWAIT;
471 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
472 }
473
474 mp->mnt_lflag |= MNT_LITER;
475
476 }
477
478 int
479 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
480 {
481 vnode_t vp;
482
483 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
484 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
485 if (vp->v_tag == VT_UDF)
486 return 0;
487 if (vp->v_type == VDIR)
488 continue;
489 if (vp == skipvp)
490 continue;
491 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
492 (vp->v_flag & VNOFLUSH)))
493 continue;
494 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
495 continue;
496 if ((flags & WRITECLOSE) &&
497 (vp->v_writecount == 0 || vp->v_type != VREG))
498 continue;
499 /* Look for busy vnode */
500 if (((vp->v_usecount != 0) &&
501 ((vp->v_usecount - vp->v_kusecount) != 0)))
502 return(1);
503 }
504
505 return(0);
506 }
507
508 /*
509 * This routine prepares iteration by moving all the vnodes to worker queue
510 * called with mount lock held
511 */
512 int
513 vnode_iterate_prepare(mount_t mp)
514 {
515 vnode_t vp;
516
517 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
518 /* nothing to do */
519 return (0);
520 }
521
522 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
523 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
524 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
525 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
526
527 TAILQ_INIT(&mp->mnt_vnodelist);
528 if (mp->mnt_newvnodes.tqh_first != NULL)
529 panic("vnode_iterate_prepare: newvnode when entering vnode");
530 TAILQ_INIT(&mp->mnt_newvnodes);
531
532 return (1);
533 }
534
535
536 /* called with mount lock held */
537 int
538 vnode_iterate_reloadq(mount_t mp)
539 {
540 int moved = 0;
541
542 /* add the remaining entries in workerq to the end of mount vnode list */
543 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
544 struct vnode * mvp;
545 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
546
547 /* Joining the workerque entities to mount vnode list */
548 if (mvp)
549 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
550 else
551 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
552 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
553 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
554 TAILQ_INIT(&mp->mnt_workerqueue);
555 }
556
557 /* add the newvnodes to the head of mount vnode list */
558 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
559 struct vnode * nlvp;
560 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
561
562 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
563 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
564 if(mp->mnt_vnodelist.tqh_first)
565 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
566 else
567 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
568 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
569 TAILQ_INIT(&mp->mnt_newvnodes);
570 moved = 1;
571 }
572
573 return(moved);
574 }
575
576
577 void
578 vnode_iterate_clear(mount_t mp)
579 {
580 mp->mnt_lflag &= ~MNT_LITER;
581 if (mp->mnt_lflag & MNT_LITERWAIT) {
582 mp->mnt_lflag &= ~MNT_LITERWAIT;
583 wakeup(mp);
584 }
585 }
586
587
588 int
589 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
590 void *arg)
591 {
592 struct vnode *vp;
593 int vid, retval;
594 int ret = 0;
595
596 mount_lock(mp);
597
598 vnode_iterate_setup(mp);
599
600 /* it is returns 0 then there is nothing to do */
601 retval = vnode_iterate_prepare(mp);
602
603 if (retval == 0) {
604 vnode_iterate_clear(mp);
605 mount_unlock(mp);
606 return(ret);
607 }
608
609 /* iterate over all the vnodes */
610 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
611 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
612 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
613 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
614 vid = vp->v_id;
615 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
616 continue;
617 }
618 mount_unlock(mp);
619
620 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
621 mount_lock(mp);
622 continue;
623 }
624 if (flags & VNODE_RELOAD) {
625 /*
626 * we're reloading the filesystem
627 * cast out any inactive vnodes...
628 */
629 if (vnode_reload(vp)) {
630 /* vnode will be recycled on the refcount drop */
631 vnode_put(vp);
632 mount_lock(mp);
633 continue;
634 }
635 }
636
637 retval = callout(vp, arg);
638
639 switch (retval) {
640 case VNODE_RETURNED:
641 case VNODE_RETURNED_DONE:
642 vnode_put(vp);
643 if (retval == VNODE_RETURNED_DONE) {
644 mount_lock(mp);
645 ret = 0;
646 goto out;
647 }
648 break;
649
650 case VNODE_CLAIMED_DONE:
651 mount_lock(mp);
652 ret = 0;
653 goto out;
654 case VNODE_CLAIMED:
655 default:
656 break;
657 }
658 mount_lock(mp);
659 }
660
661 out:
662 (void)vnode_iterate_reloadq(mp);
663 vnode_iterate_clear(mp);
664 mount_unlock(mp);
665 return (ret);
666 }
667
668 void
669 mount_lock_renames(mount_t mp)
670 {
671 lck_mtx_lock(&mp->mnt_renamelock);
672 }
673
674 void
675 mount_unlock_renames(mount_t mp)
676 {
677 lck_mtx_unlock(&mp->mnt_renamelock);
678 }
679
680 void
681 mount_lock(mount_t mp)
682 {
683 lck_mtx_lock(&mp->mnt_mlock);
684 }
685
686 void
687 mount_lock_spin(mount_t mp)
688 {
689 lck_mtx_lock_spin(&mp->mnt_mlock);
690 }
691
692 void
693 mount_unlock(mount_t mp)
694 {
695 lck_mtx_unlock(&mp->mnt_mlock);
696 }
697
698
699 void
700 mount_ref(mount_t mp, int locked)
701 {
702 if ( !locked)
703 mount_lock_spin(mp);
704
705 mp->mnt_count++;
706
707 if ( !locked)
708 mount_unlock(mp);
709 }
710
711
712 void
713 mount_drop(mount_t mp, int locked)
714 {
715 if ( !locked)
716 mount_lock_spin(mp);
717
718 mp->mnt_count--;
719
720 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
721 wakeup(&mp->mnt_lflag);
722
723 if ( !locked)
724 mount_unlock(mp);
725 }
726
727
728 int
729 mount_iterref(mount_t mp, int locked)
730 {
731 int retval = 0;
732
733 if (!locked)
734 mount_list_lock();
735 if (mp->mnt_iterref < 0) {
736 retval = 1;
737 } else {
738 mp->mnt_iterref++;
739 }
740 if (!locked)
741 mount_list_unlock();
742 return(retval);
743 }
744
745 int
746 mount_isdrained(mount_t mp, int locked)
747 {
748 int retval;
749
750 if (!locked)
751 mount_list_lock();
752 if (mp->mnt_iterref < 0)
753 retval = 1;
754 else
755 retval = 0;
756 if (!locked)
757 mount_list_unlock();
758 return(retval);
759 }
760
761 void
762 mount_iterdrop(mount_t mp)
763 {
764 mount_list_lock();
765 mp->mnt_iterref--;
766 wakeup(&mp->mnt_iterref);
767 mount_list_unlock();
768 }
769
770 void
771 mount_iterdrain(mount_t mp)
772 {
773 mount_list_lock();
774 while (mp->mnt_iterref)
775 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
776 /* mount iterations drained */
777 mp->mnt_iterref = -1;
778 mount_list_unlock();
779 }
780 void
781 mount_iterreset(mount_t mp)
782 {
783 mount_list_lock();
784 if (mp->mnt_iterref == -1)
785 mp->mnt_iterref = 0;
786 mount_list_unlock();
787 }
788
789 /* always called with mount lock held */
790 int
791 mount_refdrain(mount_t mp)
792 {
793 if (mp->mnt_lflag & MNT_LDRAIN)
794 panic("already in drain");
795 mp->mnt_lflag |= MNT_LDRAIN;
796
797 while (mp->mnt_count)
798 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
799
800 if (mp->mnt_vnodelist.tqh_first != NULL)
801 panic("mount_refdrain: dangling vnode");
802
803 mp->mnt_lflag &= ~MNT_LDRAIN;
804
805 return(0);
806 }
807
808 /* Tags the mount point as not supportine extended readdir for NFS exports */
809 void
810 mount_set_noreaddirext(mount_t mp) {
811 mount_lock (mp);
812 mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
813 mount_unlock (mp);
814 }
815
816 /*
817 * Mark a mount point as busy. Used to synchronize access and to delay
818 * unmounting.
819 */
820 int
821 vfs_busy(mount_t mp, int flags)
822 {
823
824 restart:
825 if (mp->mnt_lflag & MNT_LDEAD)
826 return(ENOENT);
827
828 if (mp->mnt_lflag & MNT_LUNMOUNT) {
829 if (flags & LK_NOWAIT)
830 return (ENOENT);
831
832 mount_lock(mp);
833
834 if (mp->mnt_lflag & MNT_LDEAD) {
835 mount_unlock(mp);
836 return(ENOENT);
837 }
838 if (mp->mnt_lflag & MNT_LUNMOUNT) {
839 mp->mnt_lflag |= MNT_LWAIT;
840 /*
841 * Since all busy locks are shared except the exclusive
842 * lock granted when unmounting, the only place that a
843 * wakeup needs to be done is at the release of the
844 * exclusive lock at the end of dounmount.
845 */
846 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
847 return (ENOENT);
848 }
849 mount_unlock(mp);
850 }
851
852 lck_rw_lock_shared(&mp->mnt_rwlock);
853
854 /*
855 * until we are granted the rwlock, it's possible for the mount point to
856 * change state, so reevaluate before granting the vfs_busy
857 */
858 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
859 lck_rw_done(&mp->mnt_rwlock);
860 goto restart;
861 }
862 return (0);
863 }
864
865 /*
866 * Free a busy filesystem.
867 */
868
869 void
870 vfs_unbusy(mount_t mp)
871 {
872 lck_rw_done(&mp->mnt_rwlock);
873 }
874
875
876
877 static void
878 vfs_rootmountfailed(mount_t mp) {
879
880 mount_list_lock();
881 mp->mnt_vtable->vfc_refcount--;
882 mount_list_unlock();
883
884 vfs_unbusy(mp);
885
886 mount_lock_destroy(mp);
887
888 #if CONFIG_MACF
889 mac_mount_label_destroy(mp);
890 #endif
891
892 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
893 }
894
895 /*
896 * Lookup a filesystem type, and if found allocate and initialize
897 * a mount structure for it.
898 *
899 * Devname is usually updated by mount(8) after booting.
900 */
901 static mount_t
902 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
903 {
904 mount_t mp;
905
906 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
907 bzero((char *)mp, sizeof(struct mount));
908
909 /* Initialize the default IO constraints */
910 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
911 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
912 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
913 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
914 mp->mnt_devblocksize = DEV_BSIZE;
915 mp->mnt_alignmentmask = PAGE_MASK;
916 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
917 mp->mnt_ioscale = 1;
918 mp->mnt_ioflags = 0;
919 mp->mnt_realrootvp = NULLVP;
920 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
921 mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
922 mp->mnt_devbsdunit = 0;
923
924 mount_lock_init(mp);
925 (void)vfs_busy(mp, LK_NOWAIT);
926
927 TAILQ_INIT(&mp->mnt_vnodelist);
928 TAILQ_INIT(&mp->mnt_workerqueue);
929 TAILQ_INIT(&mp->mnt_newvnodes);
930
931 mp->mnt_vtable = vfsp;
932 mp->mnt_op = vfsp->vfc_vfsops;
933 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
934 mp->mnt_vnodecovered = NULLVP;
935 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
936 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
937
938 mount_list_lock();
939 vfsp->vfc_refcount++;
940 mount_list_unlock();
941
942 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
943 mp->mnt_vfsstat.f_mntonname[0] = '/';
944 /* XXX const poisoning layering violation */
945 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
946
947 #if CONFIG_MACF
948 mac_mount_label_init(mp);
949 mac_mount_label_associate(vfs_context_kernel(), mp);
950 #endif
951 return (mp);
952 }
953
954 errno_t
955 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
956 {
957 struct vfstable *vfsp;
958
959 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
960 if (!strncmp(vfsp->vfc_name, fstypename,
961 sizeof(vfsp->vfc_name)))
962 break;
963 if (vfsp == NULL)
964 return (ENODEV);
965
966 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
967
968 if (*mpp)
969 return (0);
970
971 return (ENOMEM);
972 }
973
974
975 /*
976 * Find an appropriate filesystem to use for the root. If a filesystem
977 * has not been preselected, walk through the list of known filesystems
978 * trying those that have mountroot routines, and try them until one
979 * works or we have tried them all.
980 */
981 extern int (*mountroot)(void);
982
983 int
984 vfs_mountroot(void)
985 {
986 #if CONFIG_MACF
987 struct vnode *vp;
988 #endif
989 struct vfstable *vfsp;
990 vfs_context_t ctx = vfs_context_kernel();
991 struct vfs_attr vfsattr;
992 int error;
993 mount_t mp;
994 vnode_t bdevvp_rootvp;
995
996 if (mountroot != NULL) {
997 /*
998 * used for netboot which follows a different set of rules
999 */
1000 error = (*mountroot)();
1001 return (error);
1002 }
1003 if ((error = bdevvp(rootdev, &rootvp))) {
1004 printf("vfs_mountroot: can't setup bdevvp\n");
1005 return (error);
1006 }
1007 /*
1008 * 4951998 - code we call in vfc_mountroot may replace rootvp
1009 * so keep a local copy for some house keeping.
1010 */
1011 bdevvp_rootvp = rootvp;
1012
1013 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1014 if (vfsp->vfc_mountroot == NULL)
1015 continue;
1016
1017 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1018 mp->mnt_devvp = rootvp;
1019
1020 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
1021 if ( bdevvp_rootvp != rootvp ) {
1022 /*
1023 * rootvp changed...
1024 * bump the iocount and fix up mnt_devvp for the
1025 * new rootvp (it will already have a usecount taken)...
1026 * drop the iocount and the usecount on the orignal
1027 * since we are no longer going to use it...
1028 */
1029 vnode_getwithref(rootvp);
1030 mp->mnt_devvp = rootvp;
1031
1032 vnode_rele(bdevvp_rootvp);
1033 vnode_put(bdevvp_rootvp);
1034 }
1035 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1036
1037 vfs_unbusy(mp);
1038
1039 mount_list_add(mp);
1040
1041 /*
1042 * cache the IO attributes for the underlying physical media...
1043 * an error return indicates the underlying driver doesn't
1044 * support all the queries necessary... however, reasonable
1045 * defaults will have been set, so no reason to bail or care
1046 */
1047 vfs_init_io_attributes(rootvp, mp);
1048
1049 /*
1050 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1051 */
1052 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1053 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1054 }
1055 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1056 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1057 }
1058
1059 /*
1060 * Probe root file system for additional features.
1061 */
1062 (void)VFS_START(mp, 0, ctx);
1063
1064 VFSATTR_INIT(&vfsattr);
1065 VFSATTR_WANTED(&vfsattr, f_capabilities);
1066 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1067 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1068 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1069 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1070 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1071 }
1072 #if NAMEDSTREAMS
1073 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1074 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1075 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1076 }
1077 #endif
1078 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1079 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1080 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1081 }
1082 }
1083
1084 /*
1085 * get rid of iocount reference returned
1086 * by bdevvp (or picked up by us on the substitued
1087 * rootvp)... it (or we) will have also taken
1088 * a usecount reference which we want to keep
1089 */
1090 vnode_put(rootvp);
1091
1092 #if CONFIG_MACF
1093 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1094 return (0);
1095
1096 error = VFS_ROOT(mp, &vp, ctx);
1097 if (error) {
1098 printf("%s() VFS_ROOT() returned %d\n",
1099 __func__, error);
1100 dounmount(mp, MNT_FORCE, 0, ctx);
1101 goto fail;
1102 }
1103 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1104 /*
1105 * get rid of reference provided by VFS_ROOT
1106 */
1107 vnode_put(vp);
1108
1109 if (error) {
1110 printf("%s() vnode_label() returned %d\n",
1111 __func__, error);
1112 dounmount(mp, MNT_FORCE, 0, ctx);
1113 goto fail;
1114 }
1115 #endif
1116 return (0);
1117 }
1118 #if CONFIG_MACF
1119 fail:
1120 #endif
1121 vfs_rootmountfailed(mp);
1122
1123 if (error != EINVAL)
1124 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1125 }
1126 return (ENODEV);
1127 }
1128
1129 /*
1130 * Lookup a mount point by filesystem identifier.
1131 */
1132
1133 struct mount *
1134 vfs_getvfs(fsid_t *fsid)
1135 {
1136 return (mount_list_lookupby_fsid(fsid, 0, 0));
1137 }
1138
1139 static struct mount *
1140 vfs_getvfs_locked(fsid_t *fsid)
1141 {
1142 return(mount_list_lookupby_fsid(fsid, 1, 0));
1143 }
1144
1145 struct mount *
1146 vfs_getvfs_by_mntonname(char *path)
1147 {
1148 mount_t retmp = (mount_t)0;
1149 mount_t mp;
1150
1151 mount_list_lock();
1152 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1153 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1154 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1155 retmp = mp;
1156 if (mount_iterref(retmp, 1))
1157 retmp = NULL;
1158 goto out;
1159 }
1160 }
1161 out:
1162 mount_list_unlock();
1163 return (retmp);
1164 }
1165
1166 /* generation number for creation of new fsids */
1167 u_short mntid_gen = 0;
1168 /*
1169 * Get a new unique fsid
1170 */
1171 void
1172 vfs_getnewfsid(struct mount *mp)
1173 {
1174
1175 fsid_t tfsid;
1176 int mtype;
1177 mount_t nmp;
1178
1179 mount_list_lock();
1180
1181 /* generate a new fsid */
1182 mtype = mp->mnt_vtable->vfc_typenum;
1183 if (++mntid_gen == 0)
1184 mntid_gen++;
1185 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1186 tfsid.val[1] = mtype;
1187
1188 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1189 while (vfs_getvfs_locked(&tfsid)) {
1190 if (++mntid_gen == 0)
1191 mntid_gen++;
1192 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1193 }
1194 }
1195 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1196 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1197 mount_list_unlock();
1198 }
1199
1200 /*
1201 * Routines having to do with the management of the vnode table.
1202 */
1203 extern int (**dead_vnodeop_p)(void *);
1204 long numvnodes, freevnodes, deadvnodes;
1205
1206
1207 /*
1208 * Move a vnode from one mount queue to another.
1209 */
1210 static void
1211 insmntque(vnode_t vp, mount_t mp)
1212 {
1213 mount_t lmp;
1214 /*
1215 * Delete from old mount point vnode list, if on one.
1216 */
1217 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1218 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1219 panic("insmntque: vp not in mount vnode list");
1220 vp->v_lflag &= ~VNAMED_MOUNT;
1221
1222 mount_lock_spin(lmp);
1223
1224 mount_drop(lmp, 1);
1225
1226 if (vp->v_mntvnodes.tqe_next == NULL) {
1227 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1228 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1229 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1230 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1231 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1232 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1233 } else {
1234 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1235 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1236 }
1237 vp->v_mntvnodes.tqe_next = NULL;
1238 vp->v_mntvnodes.tqe_prev = NULL;
1239 mount_unlock(lmp);
1240 return;
1241 }
1242
1243 /*
1244 * Insert into list of vnodes for the new mount point, if available.
1245 */
1246 if ((vp->v_mount = mp) != NULL) {
1247 mount_lock_spin(mp);
1248 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1249 panic("vp already in mount list");
1250 if (mp->mnt_lflag & MNT_LITER)
1251 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1252 else
1253 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1254 if (vp->v_lflag & VNAMED_MOUNT)
1255 panic("insmntque: vp already in mount vnode list");
1256 vp->v_lflag |= VNAMED_MOUNT;
1257 mount_ref(mp, 1);
1258 mount_unlock(mp);
1259 }
1260 }
1261
1262
1263 /*
1264 * Create a vnode for a block device.
1265 * Used for root filesystem, argdev, and swap areas.
1266 * Also used for memory file system special devices.
1267 */
1268 int
1269 bdevvp(dev_t dev, vnode_t *vpp)
1270 {
1271 vnode_t nvp;
1272 int error;
1273 struct vnode_fsparam vfsp;
1274 struct vfs_context context;
1275
1276 if (dev == NODEV) {
1277 *vpp = NULLVP;
1278 return (ENODEV);
1279 }
1280
1281 context.vc_thread = current_thread();
1282 context.vc_ucred = FSCRED;
1283
1284 vfsp.vnfs_mp = (struct mount *)0;
1285 vfsp.vnfs_vtype = VBLK;
1286 vfsp.vnfs_str = "bdevvp";
1287 vfsp.vnfs_dvp = NULL;
1288 vfsp.vnfs_fsnode = NULL;
1289 vfsp.vnfs_cnp = NULL;
1290 vfsp.vnfs_vops = spec_vnodeop_p;
1291 vfsp.vnfs_rdev = dev;
1292 vfsp.vnfs_filesize = 0;
1293
1294 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1295
1296 vfsp.vnfs_marksystem = 0;
1297 vfsp.vnfs_markroot = 0;
1298
1299 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1300 *vpp = NULLVP;
1301 return (error);
1302 }
1303 vnode_lock_spin(nvp);
1304 nvp->v_flag |= VBDEVVP;
1305 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1306 vnode_unlock(nvp);
1307 if ( (error = vnode_ref(nvp)) ) {
1308 panic("bdevvp failed: vnode_ref");
1309 return (error);
1310 }
1311 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1312 panic("bdevvp failed: fsync");
1313 return (error);
1314 }
1315 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1316 panic("bdevvp failed: invalidateblks");
1317 return (error);
1318 }
1319
1320 #if CONFIG_MACF
1321 /*
1322 * XXXMAC: We can't put a MAC check here, the system will
1323 * panic without this vnode.
1324 */
1325 #endif /* MAC */
1326
1327 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1328 panic("bdevvp failed: open");
1329 return (error);
1330 }
1331 *vpp = nvp;
1332
1333 return (0);
1334 }
1335
1336 /*
1337 * Check to see if the new vnode represents a special device
1338 * for which we already have a vnode (either because of
1339 * bdevvp() or because of a different vnode representing
1340 * the same block device). If such an alias exists, deallocate
1341 * the existing contents and return the aliased vnode. The
1342 * caller is responsible for filling it with its new contents.
1343 */
1344 static vnode_t
1345 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1346 {
1347 struct vnode *vp;
1348 struct vnode **vpp;
1349 struct specinfo *sin = NULL;
1350 int vid = 0;
1351
1352 vpp = &speclisth[SPECHASH(nvp_rdev)];
1353 loop:
1354 SPECHASH_LOCK();
1355
1356 for (vp = *vpp; vp; vp = vp->v_specnext) {
1357 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1358 vid = vp->v_id;
1359 break;
1360 }
1361 }
1362 SPECHASH_UNLOCK();
1363
1364 if (vp) {
1365 found_alias:
1366 if (vnode_getwithvid(vp,vid)) {
1367 goto loop;
1368 }
1369 /*
1370 * Termination state is checked in vnode_getwithvid
1371 */
1372 vnode_lock(vp);
1373
1374 /*
1375 * Alias, but not in use, so flush it out.
1376 */
1377 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1378 vnode_reclaim_internal(vp, 1, 1, 0);
1379 vnode_put_locked(vp);
1380 vnode_unlock(vp);
1381 goto loop;
1382 }
1383
1384 }
1385 if (vp == NULL || vp->v_tag != VT_NON) {
1386 if (sin == NULL) {
1387 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1388 M_SPECINFO, M_WAITOK);
1389 }
1390
1391 nvp->v_specinfo = sin;
1392 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1393 nvp->v_rdev = nvp_rdev;
1394 nvp->v_specflags = 0;
1395 nvp->v_speclastr = -1;
1396 nvp->v_specinfo->si_opencount = 0;
1397
1398 SPECHASH_LOCK();
1399
1400 /* We dropped the lock, someone could have added */
1401 if (vp == NULLVP) {
1402 for (vp = *vpp; vp; vp = vp->v_specnext) {
1403 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1404 vid = vp->v_id;
1405 SPECHASH_UNLOCK();
1406 goto found_alias;
1407 }
1408 }
1409 }
1410
1411 nvp->v_hashchain = vpp;
1412 nvp->v_specnext = *vpp;
1413 *vpp = nvp;
1414
1415 if (vp != NULLVP) {
1416 nvp->v_specflags |= SI_ALIASED;
1417 vp->v_specflags |= SI_ALIASED;
1418 SPECHASH_UNLOCK();
1419 vnode_put_locked(vp);
1420 vnode_unlock(vp);
1421 } else {
1422 SPECHASH_UNLOCK();
1423 }
1424
1425 return (NULLVP);
1426 }
1427
1428 if (sin) {
1429 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1430 }
1431
1432 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1433 return(vp);
1434
1435 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1436
1437 return (vp);
1438 }
1439
1440
1441 /*
1442 * Get a reference on a particular vnode and lock it if requested.
1443 * If the vnode was on the inactive list, remove it from the list.
1444 * If the vnode was on the free list, remove it from the list and
1445 * move it to inactive list as needed.
1446 * The vnode lock bit is set if the vnode is being eliminated in
1447 * vgone. The process is awakened when the transition is completed,
1448 * and an error returned to indicate that the vnode is no longer
1449 * usable (possibly having been changed to a new file system type).
1450 */
1451 int
1452 vget_internal(vnode_t vp, int vid, int vflags)
1453 {
1454 int error = 0;
1455
1456 vnode_lock_spin(vp);
1457
1458 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1459 /*
1460 * vnode to be returned only if it has writers opened
1461 */
1462 error = EINVAL;
1463 else
1464 error = vnode_getiocount(vp, vid, vflags);
1465
1466 vnode_unlock(vp);
1467
1468 return (error);
1469 }
1470
1471 /*
1472 * Returns: 0 Success
1473 * ENOENT No such file or directory [terminating]
1474 */
1475 int
1476 vnode_ref(vnode_t vp)
1477 {
1478
1479 return (vnode_ref_ext(vp, 0, 0));
1480 }
1481
1482 /*
1483 * Returns: 0 Success
1484 * ENOENT No such file or directory [terminating]
1485 */
1486 int
1487 vnode_ref_ext(vnode_t vp, int fmode, int flags)
1488 {
1489 int error = 0;
1490
1491 vnode_lock_spin(vp);
1492
1493 /*
1494 * once all the current call sites have been fixed to insure they have
1495 * taken an iocount, we can toughen this assert up and insist that the
1496 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1497 */
1498 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1499 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1500
1501 /*
1502 * if you are the owner of drain/termination, can acquire usecount
1503 */
1504 if ((flags & VNODE_REF_FORCE) == 0) {
1505 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1506 if (vp->v_owner != current_thread()) {
1507 error = ENOENT;
1508 goto out;
1509 }
1510 }
1511 }
1512 vp->v_usecount++;
1513
1514 if (fmode & FWRITE) {
1515 if (++vp->v_writecount <= 0)
1516 panic("vnode_ref_ext: v_writecount");
1517 }
1518 if (fmode & O_EVTONLY) {
1519 if (++vp->v_kusecount <= 0)
1520 panic("vnode_ref_ext: v_kusecount");
1521 }
1522 if (vp->v_flag & VRAGE) {
1523 struct uthread *ut;
1524
1525 ut = get_bsdthread_info(current_thread());
1526
1527 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1528 !(ut->uu_flag & UT_RAGE_VNODES)) {
1529 /*
1530 * a 'normal' process accessed this vnode
1531 * so make sure its no longer marked
1532 * for rapid aging... also, make sure
1533 * it gets removed from the rage list...
1534 * when v_usecount drops back to 0, it
1535 * will be put back on the real free list
1536 */
1537 vp->v_flag &= ~VRAGE;
1538 vp->v_references = 0;
1539 vnode_list_remove(vp);
1540 }
1541 }
1542 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
1543
1544 if (vp->v_ubcinfo) {
1545 vnode_lock_convert(vp);
1546 memory_object_mark_used(vp->v_ubcinfo->ui_control);
1547 }
1548 }
1549 out:
1550 vnode_unlock(vp);
1551
1552 return (error);
1553 }
1554
1555
1556 /*
1557 * put the vnode on appropriate free list.
1558 * called with vnode LOCKED
1559 */
1560 static void
1561 vnode_list_add(vnode_t vp)
1562 {
1563 #if DIAGNOSTIC
1564 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1565 #endif
1566 /*
1567 * if it is already on a list or non zero references return
1568 */
1569 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1570 return;
1571
1572 vnode_list_lock();
1573
1574 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1575 /*
1576 * add the new guy to the appropriate end of the RAGE list
1577 */
1578 if ((vp->v_flag & VAGE))
1579 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1580 else
1581 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1582
1583 vp->v_listflag |= VLIST_RAGE;
1584 ragevnodes++;
1585
1586 /*
1587 * reset the timestamp for the last inserted vp on the RAGE
1588 * queue to let new_vnode know that its not ok to start stealing
1589 * from this list... as long as we're actively adding to this list
1590 * we'll push out the vnodes we want to donate to the real free list
1591 * once we stop pushing, we'll let some time elapse before we start
1592 * stealing them in the new_vnode routine
1593 */
1594 microuptime(&rage_tv);
1595 } else {
1596 /*
1597 * if VL_DEAD, insert it at head of the dead list
1598 * else insert at tail of LRU list or at head if VAGE is set
1599 */
1600 if ( (vp->v_lflag & VL_DEAD)) {
1601 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1602 vp->v_listflag |= VLIST_DEAD;
1603 deadvnodes++;
1604 } else if ((vp->v_flag & VAGE)) {
1605 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1606 vp->v_flag &= ~VAGE;
1607 freevnodes++;
1608 } else {
1609 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1610 freevnodes++;
1611 }
1612 }
1613 vnode_list_unlock();
1614 }
1615
1616
1617 /*
1618 * remove the vnode from appropriate free list.
1619 * called with vnode LOCKED and
1620 * the list lock held
1621 */
1622 static void
1623 vnode_list_remove_locked(vnode_t vp)
1624 {
1625 if (VONLIST(vp)) {
1626 /*
1627 * the v_listflag field is
1628 * protected by the vnode_list_lock
1629 */
1630 if (vp->v_listflag & VLIST_RAGE)
1631 VREMRAGE("vnode_list_remove", vp);
1632 else if (vp->v_listflag & VLIST_DEAD)
1633 VREMDEAD("vnode_list_remove", vp);
1634 else
1635 VREMFREE("vnode_list_remove", vp);
1636 }
1637 }
1638
1639
1640 /*
1641 * remove the vnode from appropriate free list.
1642 * called with vnode LOCKED
1643 */
1644 static void
1645 vnode_list_remove(vnode_t vp)
1646 {
1647 #if DIAGNOSTIC
1648 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1649 #endif
1650 /*
1651 * we want to avoid taking the list lock
1652 * in the case where we're not on the free
1653 * list... this will be true for most
1654 * directories and any currently in use files
1655 *
1656 * we're guaranteed that we can't go from
1657 * the not-on-list state to the on-list
1658 * state since we hold the vnode lock...
1659 * all calls to vnode_list_add are done
1660 * under the vnode lock... so we can
1661 * check for that condition (the prevelant one)
1662 * without taking the list lock
1663 */
1664 if (VONLIST(vp)) {
1665 vnode_list_lock();
1666 /*
1667 * however, we're not guaranteed that
1668 * we won't go from the on-list state
1669 * to the not-on-list state until we
1670 * hold the vnode_list_lock... this
1671 * is due to "new_vnode" removing vnodes
1672 * from the free list uder the list_lock
1673 * w/o the vnode lock... so we need to
1674 * check again whether we're currently
1675 * on the free list
1676 */
1677 vnode_list_remove_locked(vp);
1678
1679 vnode_list_unlock();
1680 }
1681 }
1682
1683
1684 void
1685 vnode_rele(vnode_t vp)
1686 {
1687 vnode_rele_internal(vp, 0, 0, 0);
1688 }
1689
1690
1691 void
1692 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1693 {
1694 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1695 }
1696
1697
1698 void
1699 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1700 {
1701
1702 if ( !locked)
1703 vnode_lock_spin(vp);
1704 #if DIAGNOSTIC
1705 else
1706 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1707 #endif
1708 if (--vp->v_usecount < 0)
1709 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1710
1711 if (fmode & FWRITE) {
1712 if (--vp->v_writecount < 0)
1713 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1714 }
1715 if (fmode & O_EVTONLY) {
1716 if (--vp->v_kusecount < 0)
1717 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1718 }
1719 if (vp->v_kusecount > vp->v_usecount)
1720 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1721
1722 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1723 /*
1724 * vnode is still busy... if we're the last
1725 * usecount, mark for a future call to VNOP_INACTIVE
1726 * when the iocount finally drops to 0
1727 */
1728 if (vp->v_usecount == 0) {
1729 vp->v_lflag |= VL_NEEDINACTIVE;
1730 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1731 }
1732 goto done;
1733 }
1734 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1735
1736 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1737 /*
1738 * vnode is being cleaned, or
1739 * we've requested that we don't reenter
1740 * the filesystem on this release... in
1741 * this case, we'll mark the vnode aged
1742 * if it's been marked for termination
1743 */
1744 if (dont_reenter) {
1745 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1746 vp->v_lflag |= VL_NEEDINACTIVE;
1747 vp->v_flag |= VAGE;
1748 }
1749 vnode_list_add(vp);
1750
1751 goto done;
1752 }
1753 /*
1754 * at this point both the iocount and usecount
1755 * are zero
1756 * pick up an iocount so that we can call
1757 * VNOP_INACTIVE with the vnode lock unheld
1758 */
1759 vp->v_iocount++;
1760 #ifdef JOE_DEBUG
1761 record_vp(vp, 1);
1762 #endif
1763 vp->v_lflag &= ~VL_NEEDINACTIVE;
1764 vnode_unlock(vp);
1765
1766 VNOP_INACTIVE(vp, vfs_context_current());
1767
1768 vnode_lock_spin(vp);
1769 /*
1770 * because we dropped the vnode lock to call VNOP_INACTIVE
1771 * the state of the vnode may have changed... we may have
1772 * picked up an iocount, usecount or the MARKTERM may have
1773 * been set... we need to reevaluate the reference counts
1774 * to determine if we can call vnode_reclaim_internal at
1775 * this point... if the reference counts are up, we'll pick
1776 * up the MARKTERM state when they get subsequently dropped
1777 */
1778 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1779 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1780 struct uthread *ut;
1781
1782 ut = get_bsdthread_info(current_thread());
1783
1784 if (ut->uu_defer_reclaims) {
1785 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1786 ut->uu_vreclaims = vp;
1787 goto done;
1788 }
1789 vnode_lock_convert(vp);
1790 vnode_reclaim_internal(vp, 1, 1, 0);
1791 }
1792 vnode_dropiocount(vp);
1793 vnode_list_add(vp);
1794 done:
1795 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
1796
1797 if (vp->v_ubcinfo) {
1798 vnode_lock_convert(vp);
1799 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
1800 }
1801 }
1802 if ( !locked)
1803 vnode_unlock(vp);
1804 return;
1805 }
1806
1807 /*
1808 * Remove any vnodes in the vnode table belonging to mount point mp.
1809 *
1810 * If MNT_NOFORCE is specified, there should not be any active ones,
1811 * return error if any are found (nb: this is a user error, not a
1812 * system error). If MNT_FORCE is specified, detach any active vnodes
1813 * that are found.
1814 */
1815 #if DIAGNOSTIC
1816 int busyprt = 0; /* print out busy vnodes */
1817 #if 0
1818 struct ctldebug debug1 = { "busyprt", &busyprt };
1819 #endif /* 0 */
1820 #endif
1821
1822 int
1823 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1824 {
1825 struct vnode *vp;
1826 int busy = 0;
1827 int reclaimed = 0;
1828 int retval;
1829 unsigned int vid;
1830
1831 mount_lock(mp);
1832 vnode_iterate_setup(mp);
1833 /*
1834 * On regular unmounts(not forced) do a
1835 * quick check for vnodes to be in use. This
1836 * preserves the caching of vnodes. automounter
1837 * tries unmounting every so often to see whether
1838 * it is still busy or not.
1839 */
1840 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1841 if (vnode_umount_preflight(mp, skipvp, flags)) {
1842 vnode_iterate_clear(mp);
1843 mount_unlock(mp);
1844 return(EBUSY);
1845 }
1846 }
1847 loop:
1848 /* it is returns 0 then there is nothing to do */
1849 retval = vnode_iterate_prepare(mp);
1850
1851 if (retval == 0) {
1852 vnode_iterate_clear(mp);
1853 mount_unlock(mp);
1854 return(retval);
1855 }
1856
1857 /* iterate over all the vnodes */
1858 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1859
1860 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1861 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1862 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1863
1864 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1865 continue;
1866 }
1867 vid = vp->v_id;
1868 mount_unlock(mp);
1869
1870 vnode_lock_spin(vp);
1871
1872 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1873 vnode_unlock(vp);
1874 mount_lock(mp);
1875 continue;
1876 }
1877
1878 /*
1879 * If requested, skip over vnodes marked VSYSTEM.
1880 * Skip over all vnodes marked VNOFLUSH.
1881 */
1882 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1883 (vp->v_flag & VNOFLUSH))) {
1884 vnode_unlock(vp);
1885 mount_lock(mp);
1886 continue;
1887 }
1888 /*
1889 * If requested, skip over vnodes marked VSWAP.
1890 */
1891 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1892 vnode_unlock(vp);
1893 mount_lock(mp);
1894 continue;
1895 }
1896 /*
1897 * If requested, skip over vnodes marked VROOT.
1898 */
1899 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1900 vnode_unlock(vp);
1901 mount_lock(mp);
1902 continue;
1903 }
1904 /*
1905 * If WRITECLOSE is set, only flush out regular file
1906 * vnodes open for writing.
1907 */
1908 if ((flags & WRITECLOSE) &&
1909 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1910 vnode_unlock(vp);
1911 mount_lock(mp);
1912 continue;
1913 }
1914 /*
1915 * If the real usecount is 0, all we need to do is clear
1916 * out the vnode data structures and we are done.
1917 */
1918 if (((vp->v_usecount == 0) ||
1919 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1920
1921 vnode_lock_convert(vp);
1922 vp->v_iocount++; /* so that drain waits for * other iocounts */
1923 #ifdef JOE_DEBUG
1924 record_vp(vp, 1);
1925 #endif
1926 vnode_reclaim_internal(vp, 1, 1, 0);
1927 vnode_dropiocount(vp);
1928 vnode_list_add(vp);
1929 vnode_unlock(vp);
1930
1931 reclaimed++;
1932 mount_lock(mp);
1933 continue;
1934 }
1935 /*
1936 * If FORCECLOSE is set, forcibly close the vnode.
1937 * For block or character devices, revert to an
1938 * anonymous device. For all other files, just kill them.
1939 */
1940 if (flags & FORCECLOSE) {
1941 vnode_lock_convert(vp);
1942
1943 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1944 vp->v_iocount++; /* so that drain waits * for other iocounts */
1945 #ifdef JOE_DEBUG
1946 record_vp(vp, 1);
1947 #endif
1948 vnode_reclaim_internal(vp, 1, 1, 0);
1949 vnode_dropiocount(vp);
1950 vnode_list_add(vp);
1951 vnode_unlock(vp);
1952 } else {
1953 vclean(vp, 0);
1954 vp->v_lflag &= ~VL_DEAD;
1955 vp->v_op = spec_vnodeop_p;
1956 vp->v_flag |= VDEVFLUSH;
1957 vnode_unlock(vp);
1958 }
1959 mount_lock(mp);
1960 continue;
1961 }
1962 #if DIAGNOSTIC
1963 if (busyprt)
1964 vprint("vflush: busy vnode", vp);
1965 #endif
1966 vnode_unlock(vp);
1967 mount_lock(mp);
1968 busy++;
1969 }
1970
1971 /* At this point the worker queue is completed */
1972 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1973 busy = 0;
1974 reclaimed = 0;
1975 (void)vnode_iterate_reloadq(mp);
1976 /* returned with mount lock held */
1977 goto loop;
1978 }
1979
1980 /* if new vnodes were created in between retry the reclaim */
1981 if ( vnode_iterate_reloadq(mp) != 0) {
1982 if (!(busy && ((flags & FORCECLOSE)==0)))
1983 goto loop;
1984 }
1985 vnode_iterate_clear(mp);
1986 mount_unlock(mp);
1987
1988 if (busy && ((flags & FORCECLOSE)==0))
1989 return (EBUSY);
1990 return (0);
1991 }
1992
1993 long num_recycledvnodes = 0;
1994 /*
1995 * Disassociate the underlying file system from a vnode.
1996 * The vnode lock is held on entry.
1997 */
1998 static void
1999 vclean(vnode_t vp, int flags)
2000 {
2001 vfs_context_t ctx = vfs_context_current();
2002 int active;
2003 int need_inactive;
2004 int already_terminating;
2005 int clflags = 0;
2006 #if NAMEDSTREAMS
2007 int is_namedstream;
2008 #endif
2009
2010 /*
2011 * Check to see if the vnode is in use.
2012 * If so we have to reference it before we clean it out
2013 * so that its count cannot fall to zero and generate a
2014 * race against ourselves to recycle it.
2015 */
2016 active = vp->v_usecount;
2017
2018 /*
2019 * just in case we missed sending a needed
2020 * VNOP_INACTIVE, we'll do it now
2021 */
2022 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
2023
2024 vp->v_lflag &= ~VL_NEEDINACTIVE;
2025
2026 /*
2027 * Prevent the vnode from being recycled or
2028 * brought into use while we clean it out.
2029 */
2030 already_terminating = (vp->v_lflag & VL_TERMINATE);
2031
2032 vp->v_lflag |= VL_TERMINATE;
2033
2034 /*
2035 * remove the vnode from any mount list
2036 * it might be on...
2037 */
2038 insmntque(vp, (struct mount *)0);
2039
2040 #if NAMEDSTREAMS
2041 is_namedstream = vnode_isnamedstream(vp);
2042 #endif
2043
2044 vnode_unlock(vp);
2045
2046 OSAddAtomicLong(1, &num_recycledvnodes);
2047
2048 if (flags & DOCLOSE)
2049 clflags |= IO_NDELAY;
2050 if (flags & REVOKEALL)
2051 clflags |= IO_REVOKE;
2052
2053 if (active && (flags & DOCLOSE))
2054 VNOP_CLOSE(vp, clflags, ctx);
2055
2056 /*
2057 * Clean out any buffers associated with the vnode.
2058 */
2059 if (flags & DOCLOSE) {
2060 #if NFSCLIENT
2061 if (vp->v_tag == VT_NFS)
2062 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2063 else
2064 #endif
2065 {
2066 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2067 buf_invalidateblks(vp, BUF_WRITE_DATA | BUF_INVALIDATE_LOCKED, 0, 0);
2068 }
2069 if (UBCINFOEXISTS(vp))
2070 /*
2071 * Clean the pages in VM.
2072 */
2073 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2074 }
2075 if (active || need_inactive)
2076 VNOP_INACTIVE(vp, ctx);
2077
2078 #if NAMEDSTREAMS
2079 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2080 vnode_t pvp = vp->v_parent;
2081
2082 /* Delete the shadow stream file before we reclaim its vnode */
2083 if (vnode_isshadow(vp)) {
2084 vnode_relenamedstream(pvp, vp, ctx);
2085 }
2086
2087 /*
2088 * No more streams associated with the parent. We
2089 * have a ref on it, so its identity is stable.
2090 * If the parent is on an opaque volume, then we need to know
2091 * whether it has associated named streams.
2092 */
2093 if (vfs_authopaque(pvp->v_mount)) {
2094 vnode_lock_spin(pvp);
2095 pvp->v_lflag &= ~VL_HASSTREAMS;
2096 vnode_unlock(pvp);
2097 }
2098 }
2099 #endif
2100
2101 /*
2102 * Destroy ubc named reference
2103 * cluster_release is done on this path
2104 * along with dropping the reference on the ucred
2105 */
2106 ubc_destroy_named(vp);
2107
2108 #if CONFIG_TRIGGERS
2109 /*
2110 * cleanup trigger info from vnode (if any)
2111 */
2112 if (vp->v_resolve)
2113 vnode_resolver_detach(vp);
2114 #endif
2115
2116 /*
2117 * Reclaim the vnode.
2118 */
2119 if (VNOP_RECLAIM(vp, ctx))
2120 panic("vclean: cannot reclaim");
2121
2122 // make sure the name & parent ptrs get cleaned out!
2123 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2124
2125 vnode_lock(vp);
2126
2127 vp->v_mount = dead_mountp;
2128 vp->v_op = dead_vnodeop_p;
2129 vp->v_tag = VT_NON;
2130 vp->v_data = NULL;
2131
2132 vp->v_lflag |= VL_DEAD;
2133
2134 if (already_terminating == 0) {
2135 vp->v_lflag &= ~VL_TERMINATE;
2136 /*
2137 * Done with purge, notify sleepers of the grim news.
2138 */
2139 if (vp->v_lflag & VL_TERMWANT) {
2140 vp->v_lflag &= ~VL_TERMWANT;
2141 wakeup(&vp->v_lflag);
2142 }
2143 }
2144 }
2145
2146 /*
2147 * Eliminate all activity associated with the requested vnode
2148 * and with all vnodes aliased to the requested vnode.
2149 */
2150 int
2151 #if DIAGNOSTIC
2152 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2153 #else
2154 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2155 #endif
2156 {
2157 struct vnode *vq;
2158 int vid;
2159
2160 #if DIAGNOSTIC
2161 if ((flags & REVOKEALL) == 0)
2162 panic("vnop_revoke");
2163 #endif
2164
2165 if (vnode_isaliased(vp)) {
2166 /*
2167 * If a vgone (or vclean) is already in progress,
2168 * return an immediate error
2169 */
2170 if (vp->v_lflag & VL_TERMINATE)
2171 return(ENOENT);
2172
2173 /*
2174 * Ensure that vp will not be vgone'd while we
2175 * are eliminating its aliases.
2176 */
2177 SPECHASH_LOCK();
2178 while ((vp->v_specflags & SI_ALIASED)) {
2179 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2180 if (vq->v_rdev != vp->v_rdev ||
2181 vq->v_type != vp->v_type || vp == vq)
2182 continue;
2183 vid = vq->v_id;
2184 SPECHASH_UNLOCK();
2185 if (vnode_getwithvid(vq,vid)){
2186 SPECHASH_LOCK();
2187 break;
2188 }
2189 vnode_reclaim_internal(vq, 0, 1, 0);
2190 vnode_put(vq);
2191 SPECHASH_LOCK();
2192 break;
2193 }
2194 }
2195 SPECHASH_UNLOCK();
2196 }
2197 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2198
2199 return (0);
2200 }
2201
2202 /*
2203 * Recycle an unused vnode to the front of the free list.
2204 * Release the passed interlock if the vnode will be recycled.
2205 */
2206 int
2207 vnode_recycle(struct vnode *vp)
2208 {
2209 vnode_lock_spin(vp);
2210
2211 if (vp->v_iocount || vp->v_usecount) {
2212 vp->v_lflag |= VL_MARKTERM;
2213 vnode_unlock(vp);
2214 return(0);
2215 }
2216 vnode_lock_convert(vp);
2217 vnode_reclaim_internal(vp, 1, 0, 0);
2218
2219 vnode_unlock(vp);
2220
2221 return (1);
2222 }
2223
2224 static int
2225 vnode_reload(vnode_t vp)
2226 {
2227 vnode_lock_spin(vp);
2228
2229 if ((vp->v_iocount > 1) || vp->v_usecount) {
2230 vnode_unlock(vp);
2231 return(0);
2232 }
2233 if (vp->v_iocount <= 0)
2234 panic("vnode_reload with no iocount %d", vp->v_iocount);
2235
2236 /* mark for release when iocount is dopped */
2237 vp->v_lflag |= VL_MARKTERM;
2238 vnode_unlock(vp);
2239
2240 return (1);
2241 }
2242
2243
2244 static void
2245 vgone(vnode_t vp, int flags)
2246 {
2247 struct vnode *vq;
2248 struct vnode *vx;
2249
2250 /*
2251 * Clean out the filesystem specific data.
2252 * vclean also takes care of removing the
2253 * vnode from any mount list it might be on
2254 */
2255 vclean(vp, flags | DOCLOSE);
2256
2257 /*
2258 * If special device, remove it from special device alias list
2259 * if it is on one.
2260 */
2261 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2262 SPECHASH_LOCK();
2263 if (*vp->v_hashchain == vp) {
2264 *vp->v_hashchain = vp->v_specnext;
2265 } else {
2266 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2267 if (vq->v_specnext != vp)
2268 continue;
2269 vq->v_specnext = vp->v_specnext;
2270 break;
2271 }
2272 if (vq == NULL)
2273 panic("missing bdev");
2274 }
2275 if (vp->v_specflags & SI_ALIASED) {
2276 vx = NULL;
2277 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2278 if (vq->v_rdev != vp->v_rdev ||
2279 vq->v_type != vp->v_type)
2280 continue;
2281 if (vx)
2282 break;
2283 vx = vq;
2284 }
2285 if (vx == NULL)
2286 panic("missing alias");
2287 if (vq == NULL)
2288 vx->v_specflags &= ~SI_ALIASED;
2289 vp->v_specflags &= ~SI_ALIASED;
2290 }
2291 SPECHASH_UNLOCK();
2292 {
2293 struct specinfo *tmp = vp->v_specinfo;
2294 vp->v_specinfo = NULL;
2295 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2296 }
2297 }
2298 }
2299
2300 /*
2301 * Lookup a vnode by device number.
2302 */
2303 int
2304 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2305 {
2306 vnode_t vp;
2307 int rc = 0;
2308 int vid;
2309
2310 loop:
2311 SPECHASH_LOCK();
2312 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2313 if (dev != vp->v_rdev || type != vp->v_type)
2314 continue;
2315 vid = vp->v_id;
2316 SPECHASH_UNLOCK();
2317 if (vnode_getwithvid(vp,vid))
2318 goto loop;
2319 vnode_lock_spin(vp);
2320 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2321 vnode_unlock(vp);
2322 if ((*errorp = vfs_mountedon(vp)) != 0)
2323 rc = 1;
2324 } else
2325 vnode_unlock(vp);
2326 vnode_put(vp);
2327 return(rc);
2328 }
2329 SPECHASH_UNLOCK();
2330 return (0);
2331 }
2332
2333 /*
2334 * Calculate the total number of references to a special device.
2335 */
2336 int
2337 vcount(vnode_t vp)
2338 {
2339 vnode_t vq, vnext;
2340 int count;
2341 int vid;
2342
2343 loop:
2344 if (!vnode_isaliased(vp))
2345 return (vp->v_specinfo->si_opencount);
2346 count = 0;
2347
2348 SPECHASH_LOCK();
2349 /*
2350 * Grab first vnode and its vid.
2351 */
2352 vq = *vp->v_hashchain;
2353 vid = vq ? vq->v_id : 0;
2354
2355 SPECHASH_UNLOCK();
2356
2357 while (vq) {
2358 /*
2359 * Attempt to get the vnode outside the SPECHASH lock.
2360 */
2361 if (vnode_getwithvid(vq, vid)) {
2362 goto loop;
2363 }
2364 vnode_lock(vq);
2365
2366 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2367 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2368 /*
2369 * Alias, but not in use, so flush it out.
2370 */
2371 vnode_reclaim_internal(vq, 1, 1, 0);
2372 vnode_put_locked(vq);
2373 vnode_unlock(vq);
2374 goto loop;
2375 }
2376 count += vq->v_specinfo->si_opencount;
2377 }
2378 vnode_unlock(vq);
2379
2380 SPECHASH_LOCK();
2381 /*
2382 * must do this with the reference still held on 'vq'
2383 * so that it can't be destroyed while we're poking
2384 * through v_specnext
2385 */
2386 vnext = vq->v_specnext;
2387 vid = vnext ? vnext->v_id : 0;
2388
2389 SPECHASH_UNLOCK();
2390
2391 vnode_put(vq);
2392
2393 vq = vnext;
2394 }
2395
2396 return (count);
2397 }
2398
2399 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2400
2401 /*
2402 * Print out a description of a vnode.
2403 */
2404 static const char *typename[] =
2405 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2406
2407 void
2408 vprint(const char *label, struct vnode *vp)
2409 {
2410 char sbuf[64];
2411
2412 if (label != NULL)
2413 printf("%s: ", label);
2414 printf("type %s, usecount %d, writecount %d",
2415 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2416 sbuf[0] = '\0';
2417 if (vp->v_flag & VROOT)
2418 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2419 if (vp->v_flag & VTEXT)
2420 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2421 if (vp->v_flag & VSYSTEM)
2422 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2423 if (vp->v_flag & VNOFLUSH)
2424 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2425 if (vp->v_flag & VBWAIT)
2426 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2427 if (vnode_isaliased(vp))
2428 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2429 if (sbuf[0] != '\0')
2430 printf(" flags (%s)", &sbuf[1]);
2431 }
2432
2433
2434 int
2435 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2436 {
2437 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2438 }
2439
2440 int
2441 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2442 {
2443 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2444 }
2445
2446 int
2447 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2448 {
2449 return ubc_cs_getcdhash(vp, offset, cdhash);
2450 }
2451
2452
2453 static char *extension_table=NULL;
2454 static int nexts;
2455 static int max_ext_width;
2456
2457 static int
2458 extension_cmp(const void *a, const void *b)
2459 {
2460 return (strlen((const char *)a) - strlen((const char *)b));
2461 }
2462
2463
2464 //
2465 // This is the api LaunchServices uses to inform the kernel
2466 // the list of package extensions to ignore.
2467 //
2468 // Internally we keep the list sorted by the length of the
2469 // the extension (from longest to shortest). We sort the
2470 // list of extensions so that we can speed up our searches
2471 // when comparing file names -- we only compare extensions
2472 // that could possibly fit into the file name, not all of
2473 // them (i.e. a short 8 character name can't have an 8
2474 // character extension).
2475 //
2476 extern lck_mtx_t *pkg_extensions_lck;
2477
2478 __private_extern__ int
2479 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2480 {
2481 char *new_exts, *old_exts;
2482 int error;
2483
2484 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2485 return EINVAL;
2486 }
2487
2488
2489 // allocate one byte extra so we can guarantee null termination
2490 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2491 if (new_exts == NULL) {
2492 return ENOMEM;
2493 }
2494
2495 error = copyin(data, new_exts, nentries * maxwidth);
2496 if (error) {
2497 FREE(new_exts, M_TEMP);
2498 return error;
2499 }
2500
2501 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2502
2503 qsort(new_exts, nentries, maxwidth, extension_cmp);
2504
2505 lck_mtx_lock(pkg_extensions_lck);
2506
2507 old_exts = extension_table;
2508 extension_table = new_exts;
2509 nexts = nentries;
2510 max_ext_width = maxwidth;
2511
2512 lck_mtx_unlock(pkg_extensions_lck);
2513
2514 if (old_exts) {
2515 FREE(old_exts, M_TEMP);
2516 }
2517
2518 return 0;
2519 }
2520
2521
2522 __private_extern__ int
2523 is_package_name(const char *name, int len)
2524 {
2525 int i, extlen;
2526 const char *ptr, *name_ext;
2527
2528 if (len <= 3) {
2529 return 0;
2530 }
2531
2532 name_ext = NULL;
2533 for(ptr=name; *ptr != '\0'; ptr++) {
2534 if (*ptr == '.') {
2535 name_ext = ptr;
2536 }
2537 }
2538
2539 // if there is no "." extension, it can't match
2540 if (name_ext == NULL) {
2541 return 0;
2542 }
2543
2544 // advance over the "."
2545 name_ext++;
2546
2547 lck_mtx_lock(pkg_extensions_lck);
2548
2549 // now iterate over all the extensions to see if any match
2550 ptr = &extension_table[0];
2551 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2552 extlen = strlen(ptr);
2553 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2554 // aha, a match!
2555 lck_mtx_unlock(pkg_extensions_lck);
2556 return 1;
2557 }
2558 }
2559
2560 lck_mtx_unlock(pkg_extensions_lck);
2561
2562 // if we get here, no extension matched
2563 return 0;
2564 }
2565
2566 int
2567 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2568 {
2569 char *ptr, *end;
2570 int comp=0;
2571
2572 *component = -1;
2573 if (*path != '/') {
2574 return EINVAL;
2575 }
2576
2577 end = path + 1;
2578 while(end < path + pathlen && *end != '\0') {
2579 while(end < path + pathlen && *end == '/' && *end != '\0') {
2580 end++;
2581 }
2582
2583 ptr = end;
2584
2585 while(end < path + pathlen && *end != '/' && *end != '\0') {
2586 end++;
2587 }
2588
2589 if (end > path + pathlen) {
2590 // hmm, string wasn't null terminated
2591 return EINVAL;
2592 }
2593
2594 *end = '\0';
2595 if (is_package_name(ptr, end - ptr)) {
2596 *component = comp;
2597 break;
2598 }
2599
2600 end++;
2601 comp++;
2602 }
2603
2604 return 0;
2605 }
2606
2607 /*
2608 * Determine if a name is inappropriate for a searchfs query.
2609 * This list consists of /System currently.
2610 */
2611
2612 int vn_searchfs_inappropriate_name(const char *name, int len) {
2613 const char *bad_names[] = { "System" };
2614 int bad_len[] = { 6 };
2615 int i;
2616
2617 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2618 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2619 return 1;
2620 }
2621 }
2622
2623 // if we get here, no name matched
2624 return 0;
2625 }
2626
2627 /*
2628 * Top level filesystem related information gathering.
2629 */
2630 extern unsigned int vfs_nummntops;
2631
2632 int
2633 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2634 user_addr_t newp, size_t newlen, proc_t p)
2635 {
2636 struct vfstable *vfsp;
2637 int *username;
2638 u_int usernamelen;
2639 int error;
2640 struct vfsconf vfsc;
2641
2642 /* All non VFS_GENERIC and in VFS_GENERIC,
2643 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2644 * needs to have root priv to have modifiers.
2645 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2646 */
2647 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2648 ((name[1] == VFS_MAXTYPENUM) ||
2649 (name[1] == VFS_CONF) ||
2650 (name[1] == VFS_SET_PACKAGE_EXTS)))
2651 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2652 return(error);
2653 }
2654 /*
2655 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2656 * is a VFS generic variable. So now we must check
2657 * namelen so we don't end up covering any UFS
2658 * variables (sinc UFS vfc_typenum is 1).
2659 *
2660 * It should have been:
2661 * name[0]: VFS_GENERIC
2662 * name[1]: VFS_NUMMNTOPS
2663 */
2664 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2665 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2666 }
2667
2668 /* all sysctl names at this level are at least name and field */
2669 if (namelen < 2)
2670 return (EISDIR); /* overloaded */
2671 if (name[0] != VFS_GENERIC) {
2672
2673 mount_list_lock();
2674 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2675 if (vfsp->vfc_typenum == name[0]) {
2676 vfsp->vfc_refcount++;
2677 break;
2678 }
2679 mount_list_unlock();
2680
2681 if (vfsp == NULL)
2682 return (ENOTSUP);
2683
2684 /* XXX current context proxy for proc p? */
2685 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2686 oldp, oldlenp, newp, newlen,
2687 vfs_context_current()));
2688
2689 mount_list_lock();
2690 vfsp->vfc_refcount--;
2691 mount_list_unlock();
2692 return error;
2693 }
2694 switch (name[1]) {
2695 case VFS_MAXTYPENUM:
2696 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2697 case VFS_CONF:
2698 if (namelen < 3)
2699 return (ENOTDIR); /* overloaded */
2700
2701 mount_list_lock();
2702 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2703 if (vfsp->vfc_typenum == name[2])
2704 break;
2705
2706 if (vfsp == NULL) {
2707 mount_list_unlock();
2708 return (ENOTSUP);
2709 }
2710
2711 vfsc.vfc_reserved1 = 0;
2712 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2713 vfsc.vfc_typenum = vfsp->vfc_typenum;
2714 vfsc.vfc_refcount = vfsp->vfc_refcount;
2715 vfsc.vfc_flags = vfsp->vfc_flags;
2716 vfsc.vfc_reserved2 = 0;
2717 vfsc.vfc_reserved3 = 0;
2718
2719 mount_list_unlock();
2720 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2721 sizeof(struct vfsconf)));
2722
2723 case VFS_SET_PACKAGE_EXTS:
2724 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2725 }
2726 /*
2727 * We need to get back into the general MIB, so we need to re-prepend
2728 * CTL_VFS to our name and try userland_sysctl().
2729 */
2730 usernamelen = namelen + 1;
2731 MALLOC(username, int *, usernamelen * sizeof(*username),
2732 M_TEMP, M_WAITOK);
2733 bcopy(name, username + 1, namelen * sizeof(*name));
2734 username[0] = CTL_VFS;
2735 error = userland_sysctl(p, username, usernamelen, oldp,
2736 oldlenp, newp, newlen, oldlenp);
2737 FREE(username, M_TEMP);
2738 return (error);
2739 }
2740
2741 /*
2742 * Dump vnode list (via sysctl) - defunct
2743 * use "pstat" instead
2744 */
2745 /* ARGSUSED */
2746 int
2747 sysctl_vnode
2748 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2749 {
2750 return(EINVAL);
2751 }
2752
2753 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2754 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
2755 0, 0, sysctl_vnode, "S,", "");
2756
2757
2758 /*
2759 * Check to see if a filesystem is mounted on a block device.
2760 */
2761 int
2762 vfs_mountedon(struct vnode *vp)
2763 {
2764 struct vnode *vq;
2765 int error = 0;
2766
2767 SPECHASH_LOCK();
2768 if (vp->v_specflags & SI_MOUNTEDON) {
2769 error = EBUSY;
2770 goto out;
2771 }
2772 if (vp->v_specflags & SI_ALIASED) {
2773 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2774 if (vq->v_rdev != vp->v_rdev ||
2775 vq->v_type != vp->v_type)
2776 continue;
2777 if (vq->v_specflags & SI_MOUNTEDON) {
2778 error = EBUSY;
2779 break;
2780 }
2781 }
2782 }
2783 out:
2784 SPECHASH_UNLOCK();
2785 return (error);
2786 }
2787
2788 /*
2789 * Unmount all filesystems. The list is traversed in reverse order
2790 * of mounting to avoid dependencies.
2791 */
2792 __private_extern__ void
2793 vfs_unmountall(void)
2794 {
2795 struct mount *mp;
2796 int error;
2797
2798 /*
2799 * Since this only runs when rebooting, it is not interlocked.
2800 */
2801 mount_list_lock();
2802 while(!TAILQ_EMPTY(&mountlist)) {
2803 mp = TAILQ_LAST(&mountlist, mntlist);
2804 mount_list_unlock();
2805 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2806 if ((error != 0) && (error != EBUSY)) {
2807 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2808 printf("%d)\n", error);
2809 mount_list_lock();
2810 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2811 continue;
2812 } else if (error == EBUSY) {
2813 /* If EBUSY is returned, the unmount was already in progress */
2814 printf("unmount of %p failed (", mp);
2815 printf("BUSY)\n");
2816 }
2817 mount_list_lock();
2818 }
2819 mount_list_unlock();
2820 }
2821
2822
2823 /*
2824 * This routine is called from vnode_pager_deallocate out of the VM
2825 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2826 * on a vnode that has a UBCINFO
2827 */
2828 __private_extern__ void
2829 vnode_pager_vrele(vnode_t vp)
2830 {
2831 struct ubc_info *uip;
2832
2833 vnode_lock_spin(vp);
2834
2835 vp->v_lflag &= ~VNAMED_UBC;
2836
2837 uip = vp->v_ubcinfo;
2838 vp->v_ubcinfo = UBC_INFO_NULL;
2839
2840 vnode_unlock(vp);
2841
2842 ubc_info_deallocate(uip);
2843 }
2844
2845
2846 #include <sys/disk.h>
2847
2848 u_int32_t rootunit = (u_int32_t)-1;
2849
2850 errno_t
2851 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2852 {
2853 int error;
2854 off_t readblockcnt = 0;
2855 off_t writeblockcnt = 0;
2856 off_t readmaxcnt = 0;
2857 off_t writemaxcnt = 0;
2858 off_t readsegcnt = 0;
2859 off_t writesegcnt = 0;
2860 off_t readsegsize = 0;
2861 off_t writesegsize = 0;
2862 off_t alignment = 0;
2863 off_t ioqueue_depth = 0;
2864 u_int32_t blksize;
2865 u_int64_t temp;
2866 u_int32_t features;
2867 vfs_context_t ctx = vfs_context_current();
2868 int isssd = 0;
2869 int isvirtual = 0;
2870
2871
2872 VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
2873 /*
2874 * as a reasonable approximation, only use the lowest bit of the mask
2875 * to generate a disk unit number
2876 */
2877 mp->mnt_devbsdunit = num_trailing_0(mp->mnt_throttle_mask);
2878
2879 if (devvp == rootvp)
2880 rootunit = mp->mnt_devbsdunit;
2881
2882 if (mp->mnt_devbsdunit == rootunit) {
2883 /*
2884 * this mount point exists on the same device as the root
2885 * partition, so it comes under the hard throttle control...
2886 * this is true even for the root mount point itself
2887 */
2888 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2889 }
2890 /*
2891 * force the spec device to re-cache
2892 * the underlying block size in case
2893 * the filesystem overrode the initial value
2894 */
2895 set_fsblocksize(devvp);
2896
2897
2898 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2899 (caddr_t)&blksize, 0, ctx)))
2900 return (error);
2901
2902 mp->mnt_devblocksize = blksize;
2903
2904 /*
2905 * set the maximum possible I/O size
2906 * this may get clipped to a smaller value
2907 * based on which constraints are being advertised
2908 * and if those advertised constraints result in a smaller
2909 * limit for a given I/O
2910 */
2911 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2912 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2913
2914 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2915 if (isvirtual)
2916 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2917 }
2918 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
2919 if (isssd)
2920 mp->mnt_kern_flag |= MNTK_SSD;
2921 }
2922 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2923 (caddr_t)&features, 0, ctx)))
2924 return (error);
2925
2926 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2927 (caddr_t)&readblockcnt, 0, ctx)))
2928 return (error);
2929
2930 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2931 (caddr_t)&writeblockcnt, 0, ctx)))
2932 return (error);
2933
2934 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2935 (caddr_t)&readmaxcnt, 0, ctx)))
2936 return (error);
2937
2938 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2939 (caddr_t)&writemaxcnt, 0, ctx)))
2940 return (error);
2941
2942 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2943 (caddr_t)&readsegcnt, 0, ctx)))
2944 return (error);
2945
2946 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2947 (caddr_t)&writesegcnt, 0, ctx)))
2948 return (error);
2949
2950 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2951 (caddr_t)&readsegsize, 0, ctx)))
2952 return (error);
2953
2954 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2955 (caddr_t)&writesegsize, 0, ctx)))
2956 return (error);
2957
2958 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2959 (caddr_t)&alignment, 0, ctx)))
2960 return (error);
2961
2962 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2963 (caddr_t)&ioqueue_depth, 0, ctx)))
2964 return (error);
2965
2966 if (readmaxcnt)
2967 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2968
2969 if (readblockcnt) {
2970 temp = readblockcnt * blksize;
2971 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2972
2973 if (temp < mp->mnt_maxreadcnt)
2974 mp->mnt_maxreadcnt = (u_int32_t)temp;
2975 }
2976
2977 if (writemaxcnt)
2978 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2979
2980 if (writeblockcnt) {
2981 temp = writeblockcnt * blksize;
2982 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2983
2984 if (temp < mp->mnt_maxwritecnt)
2985 mp->mnt_maxwritecnt = (u_int32_t)temp;
2986 }
2987
2988 if (readsegcnt) {
2989 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2990 } else {
2991 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2992
2993 if (temp > UINT16_MAX)
2994 temp = UINT16_MAX;
2995 }
2996 mp->mnt_segreadcnt = (u_int16_t)temp;
2997
2998 if (writesegcnt) {
2999 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
3000 } else {
3001 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
3002
3003 if (temp > UINT16_MAX)
3004 temp = UINT16_MAX;
3005 }
3006 mp->mnt_segwritecnt = (u_int16_t)temp;
3007
3008 if (readsegsize)
3009 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
3010 else
3011 temp = mp->mnt_maxreadcnt;
3012 mp->mnt_maxsegreadsize = (u_int32_t)temp;
3013
3014 if (writesegsize)
3015 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
3016 else
3017 temp = mp->mnt_maxwritecnt;
3018 mp->mnt_maxsegwritesize = (u_int32_t)temp;
3019
3020 if (alignment)
3021 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
3022 else
3023 temp = 0;
3024 mp->mnt_alignmentmask = temp;
3025
3026
3027 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
3028 temp = ioqueue_depth;
3029 else
3030 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
3031
3032 mp->mnt_ioqueue_depth = temp;
3033 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
3034
3035 if (mp->mnt_ioscale > 1)
3036 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
3037
3038 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
3039 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
3040 if (features & DK_FEATURE_UNMAP)
3041 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
3042 return (error);
3043 }
3044
3045 static struct klist fs_klist;
3046 lck_grp_t *fs_klist_lck_grp;
3047 lck_mtx_t *fs_klist_lock;
3048
3049 void
3050 vfs_event_init(void)
3051 {
3052
3053 klist_init(&fs_klist);
3054 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
3055 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
3056 }
3057
3058 void
3059 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3060 {
3061 lck_mtx_lock(fs_klist_lock);
3062 KNOTE(&fs_klist, event);
3063 lck_mtx_unlock(fs_klist_lock);
3064 }
3065
3066 /*
3067 * return the number of mounted filesystems.
3068 */
3069 static int
3070 sysctl_vfs_getvfscnt(void)
3071 {
3072 return(mount_getvfscnt());
3073 }
3074
3075
3076 static int
3077 mount_getvfscnt(void)
3078 {
3079 int ret;
3080
3081 mount_list_lock();
3082 ret = nummounts;
3083 mount_list_unlock();
3084 return (ret);
3085
3086 }
3087
3088
3089
3090 static int
3091 mount_fillfsids(fsid_t *fsidlst, int count)
3092 {
3093 struct mount *mp;
3094 int actual=0;
3095
3096 actual = 0;
3097 mount_list_lock();
3098 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3099 if (actual <= count) {
3100 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3101 actual++;
3102 }
3103 }
3104 mount_list_unlock();
3105 return (actual);
3106
3107 }
3108
3109 /*
3110 * fill in the array of fsid_t's up to a max of 'count', the actual
3111 * number filled in will be set in '*actual'. If there are more fsid_t's
3112 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3113 * have the actual count.
3114 * having *actual filled out even in the error case is depended upon.
3115 */
3116 static int
3117 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3118 {
3119 struct mount *mp;
3120
3121 *actual = 0;
3122 mount_list_lock();
3123 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3124 (*actual)++;
3125 if (*actual <= count)
3126 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3127 }
3128 mount_list_unlock();
3129 return (*actual <= count ? 0 : ENOMEM);
3130 }
3131
3132 static int
3133 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3134 __unused int arg2, struct sysctl_req *req)
3135 {
3136 int actual, error;
3137 size_t space;
3138 fsid_t *fsidlst;
3139
3140 /* This is a readonly node. */
3141 if (req->newptr != USER_ADDR_NULL)
3142 return (EPERM);
3143
3144 /* they are querying us so just return the space required. */
3145 if (req->oldptr == USER_ADDR_NULL) {
3146 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3147 return 0;
3148 }
3149 again:
3150 /*
3151 * Retrieve an accurate count of the amount of space required to copy
3152 * out all the fsids in the system.
3153 */
3154 space = req->oldlen;
3155 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3156
3157 /* they didn't give us enough space. */
3158 if (space < req->oldlen)
3159 return (ENOMEM);
3160
3161 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3162 if (fsidlst == NULL) {
3163 return (ENOMEM);
3164 }
3165
3166 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3167 &actual);
3168 /*
3169 * If we get back ENOMEM, then another mount has been added while we
3170 * slept in malloc above. If this is the case then try again.
3171 */
3172 if (error == ENOMEM) {
3173 FREE(fsidlst, M_TEMP);
3174 req->oldlen = space;
3175 goto again;
3176 }
3177 if (error == 0) {
3178 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3179 }
3180 FREE(fsidlst, M_TEMP);
3181 return (error);
3182 }
3183
3184 /*
3185 * Do a sysctl by fsid.
3186 */
3187 static int
3188 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3189 struct sysctl_req *req)
3190 {
3191 union union_vfsidctl vc;
3192 struct mount *mp;
3193 struct vfsstatfs *sp;
3194 int *name, flags, namelen;
3195 int error=0, gotref=0;
3196 vfs_context_t ctx = vfs_context_current();
3197 proc_t p = req->p; /* XXX req->p != current_proc()? */
3198 boolean_t is_64_bit;
3199
3200 name = arg1;
3201 namelen = arg2;
3202 is_64_bit = proc_is64bit(p);
3203
3204 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3205 if (error)
3206 goto out;
3207 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3208 error = EINVAL;
3209 goto out;
3210 }
3211 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3212 if (mp == NULL) {
3213 error = ENOENT;
3214 goto out;
3215 }
3216 gotref = 1;
3217 /* reset so that the fs specific code can fetch it. */
3218 req->newidx = 0;
3219 /*
3220 * Note if this is a VFS_CTL then we pass the actual sysctl req
3221 * in for "oldp" so that the lower layer can DTRT and use the
3222 * SYSCTL_IN/OUT routines.
3223 */
3224 if (mp->mnt_op->vfs_sysctl != NULL) {
3225 if (is_64_bit) {
3226 if (vfs_64bitready(mp)) {
3227 error = mp->mnt_op->vfs_sysctl(name, namelen,
3228 CAST_USER_ADDR_T(req),
3229 NULL, USER_ADDR_NULL, 0,
3230 ctx);
3231 }
3232 else {
3233 error = ENOTSUP;
3234 }
3235 }
3236 else {
3237 error = mp->mnt_op->vfs_sysctl(name, namelen,
3238 CAST_USER_ADDR_T(req),
3239 NULL, USER_ADDR_NULL, 0,
3240 ctx);
3241 }
3242 if (error != ENOTSUP) {
3243 goto out;
3244 }
3245 }
3246 switch (name[0]) {
3247 case VFS_CTL_UMOUNT:
3248 req->newidx = 0;
3249 if (is_64_bit) {
3250 req->newptr = vc.vc64.vc_ptr;
3251 req->newlen = (size_t)vc.vc64.vc_len;
3252 }
3253 else {
3254 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3255 req->newlen = vc.vc32.vc_len;
3256 }
3257 error = SYSCTL_IN(req, &flags, sizeof(flags));
3258 if (error)
3259 break;
3260
3261 mount_ref(mp, 0);
3262 mount_iterdrop(mp);
3263 gotref = 0;
3264 /* safedounmount consumes a ref */
3265 error = safedounmount(mp, flags, ctx);
3266 break;
3267 case VFS_CTL_STATFS:
3268 req->newidx = 0;
3269 if (is_64_bit) {
3270 req->newptr = vc.vc64.vc_ptr;
3271 req->newlen = (size_t)vc.vc64.vc_len;
3272 }
3273 else {
3274 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3275 req->newlen = vc.vc32.vc_len;
3276 }
3277 error = SYSCTL_IN(req, &flags, sizeof(flags));
3278 if (error)
3279 break;
3280 sp = &mp->mnt_vfsstat;
3281 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3282 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3283 goto out;
3284 if (is_64_bit) {
3285 struct user64_statfs sfs;
3286 bzero(&sfs, sizeof(sfs));
3287 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3288 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3289 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3290 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3291 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3292 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3293 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3294 sfs.f_files = (user64_long_t)sp->f_files;
3295 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3296 sfs.f_fsid = sp->f_fsid;
3297 sfs.f_owner = sp->f_owner;
3298
3299 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
3300 strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
3301 } else {
3302 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3303 }
3304 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3305 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3306
3307 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3308 }
3309 else {
3310 struct user32_statfs sfs;
3311 bzero(&sfs, sizeof(sfs));
3312 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3313 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3314
3315 /*
3316 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3317 * have to fudge the numbers here in that case. We inflate the blocksize in order
3318 * to reflect the filesystem size as best we can.
3319 */
3320 if (sp->f_blocks > INT_MAX) {
3321 int shift;
3322
3323 /*
3324 * Work out how far we have to shift the block count down to make it fit.
3325 * Note that it's possible to have to shift so far that the resulting
3326 * blocksize would be unreportably large. At that point, we will clip
3327 * any values that don't fit.
3328 *
3329 * For safety's sake, we also ensure that f_iosize is never reported as
3330 * being smaller than f_bsize.
3331 */
3332 for (shift = 0; shift < 32; shift++) {
3333 if ((sp->f_blocks >> shift) <= INT_MAX)
3334 break;
3335 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3336 break;
3337 }
3338 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3339 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3340 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3341 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3342 #undef __SHIFT_OR_CLIP
3343 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3344 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3345 } else {
3346 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3347 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3348 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3349 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3350 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3351 }
3352 sfs.f_files = (user32_long_t)sp->f_files;
3353 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3354 sfs.f_fsid = sp->f_fsid;
3355 sfs.f_owner = sp->f_owner;
3356
3357 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
3358 strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
3359 } else {
3360 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3361 }
3362 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3363 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3364
3365 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3366 }
3367 break;
3368 default:
3369 error = ENOTSUP;
3370 goto out;
3371 }
3372 out:
3373 if(gotref != 0)
3374 mount_iterdrop(mp);
3375 return (error);
3376 }
3377
3378 static int filt_fsattach(struct knote *kn);
3379 static void filt_fsdetach(struct knote *kn);
3380 static int filt_fsevent(struct knote *kn, long hint);
3381 struct filterops fs_filtops = {
3382 .f_attach = filt_fsattach,
3383 .f_detach = filt_fsdetach,
3384 .f_event = filt_fsevent,
3385 };
3386
3387 static int
3388 filt_fsattach(struct knote *kn)
3389 {
3390
3391 lck_mtx_lock(fs_klist_lock);
3392 kn->kn_flags |= EV_CLEAR;
3393 KNOTE_ATTACH(&fs_klist, kn);
3394 lck_mtx_unlock(fs_klist_lock);
3395 return (0);
3396 }
3397
3398 static void
3399 filt_fsdetach(struct knote *kn)
3400 {
3401 lck_mtx_lock(fs_klist_lock);
3402 KNOTE_DETACH(&fs_klist, kn);
3403 lck_mtx_unlock(fs_klist_lock);
3404 }
3405
3406 static int
3407 filt_fsevent(struct knote *kn, long hint)
3408 {
3409 /*
3410 * Backwards compatibility:
3411 * Other filters would do nothing if kn->kn_sfflags == 0
3412 */
3413
3414 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3415 kn->kn_fflags |= hint;
3416 }
3417
3418 return (kn->kn_fflags != 0);
3419 }
3420
3421 static int
3422 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3423 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3424 {
3425 int out, error;
3426 pid_t pid;
3427 proc_t p;
3428
3429 /* We need a pid. */
3430 if (req->newptr == USER_ADDR_NULL)
3431 return (EINVAL);
3432
3433 error = SYSCTL_IN(req, &pid, sizeof(pid));
3434 if (error)
3435 return (error);
3436
3437 p = proc_find(pid < 0 ? -pid : pid);
3438 if (p == NULL)
3439 return (ESRCH);
3440
3441 /*
3442 * Fetching the value is ok, but we only fetch if the old
3443 * pointer is given.
3444 */
3445 if (req->oldptr != USER_ADDR_NULL) {
3446 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3447 proc_rele(p);
3448 error = SYSCTL_OUT(req, &out, sizeof(out));
3449 return (error);
3450 }
3451
3452 /* cansignal offers us enough security. */
3453 if (p != req->p && proc_suser(req->p) != 0) {
3454 proc_rele(p);
3455 return (EPERM);
3456 }
3457
3458 if (pid < 0)
3459 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3460 else
3461 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3462 proc_rele(p);
3463
3464 return (0);
3465 }
3466
3467 /* the vfs.generic. branch. */
3468 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3469 /* retreive a list of mounted filesystem fsid_t */
3470 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD | CTLFLAG_LOCKED,
3471 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3472 /* perform operations on filesystem via fsid_t */
3473 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
3474 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3475 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
3476 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3477
3478
3479 long num_reusedvnodes = 0;
3480
3481 static int
3482 new_vnode(vnode_t *vpp)
3483 {
3484 vnode_t vp;
3485 int retries = 0; /* retry incase of tablefull */
3486 int force_alloc = 0, walk_count = 0;
3487 unsigned int vpid;
3488 struct timespec ts;
3489 struct timeval current_tv;
3490 #ifndef __LP64__
3491 struct unsafe_fsnode *l_unsafefs = 0;
3492 #endif /* __LP64__ */
3493 proc_t curproc = current_proc();
3494
3495 retry:
3496 microuptime(&current_tv);
3497
3498 vp = NULLVP;
3499
3500 vnode_list_lock();
3501
3502 if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) {
3503 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3504 /*
3505 * Can always reuse a dead one
3506 */
3507 vp = TAILQ_FIRST(&vnode_dead_list);
3508 goto steal_this_vp;
3509 }
3510 /*
3511 * no dead vnodes available... if we're under
3512 * the limit, we'll create a new vnode
3513 */
3514 numvnodes++;
3515 vnode_list_unlock();
3516
3517 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3518 bzero((char *)vp, sizeof(*vp));
3519 VLISTNONE(vp); /* avoid double queue removal */
3520 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3521
3522 klist_init(&vp->v_knotes);
3523 nanouptime(&ts);
3524 vp->v_id = ts.tv_nsec;
3525 vp->v_flag = VSTANDARD;
3526
3527 #if CONFIG_MACF
3528 if (mac_vnode_label_init_needed(vp))
3529 mac_vnode_label_init(vp);
3530 #endif /* MAC */
3531
3532 vp->v_iocount = 1;
3533 goto done;
3534 }
3535
3536 #define MAX_WALK_COUNT 1000
3537
3538 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3539 (ragevnodes >= rage_limit ||
3540 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3541
3542 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3543 if ( !(vp->v_listflag & VLIST_RAGE))
3544 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3545
3546 // if we're a dependency-capable process, skip vnodes that can
3547 // cause recycling deadlocks. (i.e. this process is diskimages
3548 // helper and the vnode is in a disk image). Querying the
3549 // mnt_kern_flag for the mount's virtual device status
3550 // is safer than checking the mnt_dependent_process, which
3551 // may not be updated if there are multiple devnode layers
3552 // in between the disk image and the final consumer.
3553
3554 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
3555 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
3556 break;
3557 }
3558
3559 // don't iterate more than MAX_WALK_COUNT vnodes to
3560 // avoid keeping the vnode list lock held for too long.
3561 if (walk_count++ > MAX_WALK_COUNT) {
3562 vp = NULL;
3563 break;
3564 }
3565 }
3566
3567 }
3568
3569 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3570 /*
3571 * Pick the first vp for possible reuse
3572 */
3573 walk_count = 0;
3574 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3575
3576 // if we're a dependency-capable process, skip vnodes that can
3577 // cause recycling deadlocks. (i.e. this process is diskimages
3578 // helper and the vnode is in a disk image). Querying the
3579 // mnt_kern_flag for the mount's virtual device status
3580 // is safer than checking the mnt_dependent_process, which
3581 // may not be updated if there are multiple devnode layers
3582 // in between the disk image and the final consumer.
3583
3584 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
3585 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
3586 break;
3587 }
3588
3589 // don't iterate more than MAX_WALK_COUNT vnodes to
3590 // avoid keeping the vnode list lock held for too long.
3591 if (walk_count++ > MAX_WALK_COUNT) {
3592 vp = NULL;
3593 break;
3594 }
3595 }
3596
3597 }
3598
3599 //
3600 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3601 // then we're trying to create a vnode on behalf of a
3602 // process like diskimages-helper that has file systems
3603 // mounted on top of itself (and thus we can't reclaim
3604 // vnodes in the file systems on top of us). if we can't
3605 // find a vnode to reclaim then we'll just have to force
3606 // the allocation.
3607 //
3608 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3609 force_alloc = 1;
3610 vnode_list_unlock();
3611 goto retry;
3612 }
3613
3614 if (vp == NULL) {
3615 /*
3616 * we've reached the system imposed maximum number of vnodes
3617 * but there isn't a single one available
3618 * wait a bit and then retry... if we can't get a vnode
3619 * after 100 retries, than log a complaint
3620 */
3621 if (++retries <= 100) {
3622 vnode_list_unlock();
3623 delay_for_interval(1, 1000 * 1000);
3624 goto retry;
3625 }
3626
3627 vnode_list_unlock();
3628 tablefull("vnode");
3629 log(LOG_EMERG, "%d desired, %d numvnodes, "
3630 "%d free, %d dead, %d rage\n",
3631 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3632 #if CONFIG_EMBEDDED
3633 /*
3634 * Running out of vnodes tends to make a system unusable. Start killing
3635 * processes that jetsam knows are killable.
3636 */
3637 if (jetsam_kill_top_proc(TRUE, kJetsamFlagsKilledVnodes) < 0) {
3638 /*
3639 * If jetsam can't find any more processes to kill and there
3640 * still aren't any free vnodes, panic. Hopefully we'll get a
3641 * panic log to tell us why we ran out.
3642 */
3643 panic("vnode table is full\n");
3644 }
3645
3646 delay_for_interval(1, 1000 * 1000);
3647 goto retry;
3648 #endif
3649
3650 *vpp = NULL;
3651 return (ENFILE);
3652 }
3653 steal_this_vp:
3654 vpid = vp->v_id;
3655
3656 vnode_list_remove_locked(vp);
3657
3658 vnode_list_unlock();
3659
3660 vnode_lock_spin(vp);
3661
3662 /*
3663 * We could wait for the vnode_lock after removing the vp from the freelist
3664 * and the vid is bumped only at the very end of reclaim. So it is possible
3665 * that we are looking at a vnode that is being terminated. If so skip it.
3666 */
3667 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3668 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3669 /*
3670 * we lost the race between dropping the list lock
3671 * and picking up the vnode_lock... someone else
3672 * used this vnode and it is now in a new state
3673 * so we need to go back and try again
3674 */
3675 vnode_unlock(vp);
3676 goto retry;
3677 }
3678 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3679 /*
3680 * we did a vnode_rele_ext that asked for
3681 * us not to reenter the filesystem during
3682 * the release even though VL_NEEDINACTIVE was
3683 * set... we'll do it here by doing a
3684 * vnode_get/vnode_put
3685 *
3686 * pick up an iocount so that we can call
3687 * vnode_put and drive the VNOP_INACTIVE...
3688 * vnode_put will either leave us off
3689 * the freelist if a new ref comes in,
3690 * or put us back on the end of the freelist
3691 * or recycle us if we were marked for termination...
3692 * so we'll just go grab a new candidate
3693 */
3694 vp->v_iocount++;
3695 #ifdef JOE_DEBUG
3696 record_vp(vp, 1);
3697 #endif
3698 vnode_put_locked(vp);
3699 vnode_unlock(vp);
3700 goto retry;
3701 }
3702 OSAddAtomicLong(1, &num_reusedvnodes);
3703
3704 /* Checks for anyone racing us for recycle */
3705 if (vp->v_type != VBAD) {
3706 if (vp->v_lflag & VL_DEAD)
3707 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3708 vnode_lock_convert(vp);
3709 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3710
3711 if ((VONLIST(vp)))
3712 panic("new_vnode(%p): vp on list", vp);
3713 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3714 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3715 panic("new_vnode(%p): free vnode still referenced", vp);
3716 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3717 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3718 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3719 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3720 }
3721
3722 #ifndef __LP64__
3723 if (vp->v_unsafefs) {
3724 l_unsafefs = vp->v_unsafefs;
3725 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3726 }
3727 #endif /* __LP64__ */
3728
3729 #if CONFIG_MACF
3730 /*
3731 * We should never see VL_LABELWAIT or VL_LABEL here.
3732 * as those operations hold a reference.
3733 */
3734 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3735 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3736 if (vp->v_lflag & VL_LABELED) {
3737 vnode_lock_convert(vp);
3738 mac_vnode_label_recycle(vp);
3739 } else if (mac_vnode_label_init_needed(vp)) {
3740 vnode_lock_convert(vp);
3741 mac_vnode_label_init(vp);
3742 }
3743
3744 #endif /* MAC */
3745
3746 vp->v_iocount = 1;
3747 vp->v_lflag = 0;
3748 vp->v_writecount = 0;
3749 vp->v_references = 0;
3750 vp->v_iterblkflags = 0;
3751 vp->v_flag = VSTANDARD;
3752 /* vbad vnodes can point to dead_mountp */
3753 vp->v_mount = NULL;
3754 vp->v_defer_reclaimlist = (vnode_t)0;
3755
3756 vnode_unlock(vp);
3757
3758 #ifndef __LP64__
3759 if (l_unsafefs) {
3760 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3761 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3762 }
3763 #endif /* __LP64__ */
3764
3765 done:
3766 *vpp = vp;
3767
3768 return (0);
3769 }
3770
3771 void
3772 vnode_lock(vnode_t vp)
3773 {
3774 lck_mtx_lock(&vp->v_lock);
3775 }
3776
3777 void
3778 vnode_lock_spin(vnode_t vp)
3779 {
3780 lck_mtx_lock_spin(&vp->v_lock);
3781 }
3782
3783 void
3784 vnode_unlock(vnode_t vp)
3785 {
3786 lck_mtx_unlock(&vp->v_lock);
3787 }
3788
3789
3790
3791 int
3792 vnode_get(struct vnode *vp)
3793 {
3794 int retval;
3795
3796 vnode_lock_spin(vp);
3797 retval = vnode_get_locked(vp);
3798 vnode_unlock(vp);
3799
3800 return(retval);
3801 }
3802
3803 int
3804 vnode_get_locked(struct vnode *vp)
3805 {
3806 #if DIAGNOSTIC
3807 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3808 #endif
3809 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3810 return(ENOENT);
3811 }
3812 vp->v_iocount++;
3813 #ifdef JOE_DEBUG
3814 record_vp(vp, 1);
3815 #endif
3816 return (0);
3817 }
3818
3819 /*
3820 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
3821 * while the vnode is draining, but at no point after that) to prevent
3822 * deadlocks when getting vnodes from filesystem hashes while holding
3823 * resources that may prevent other iocounts from being released.
3824 */
3825 int
3826 vnode_getwithvid(vnode_t vp, uint32_t vid)
3827 {
3828 return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO )));
3829 }
3830
3831 /*
3832 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
3833 * drain; it exists for use in the VFS name cache, where we really do want to block behind
3834 * vnode drain to prevent holding off an unmount.
3835 */
3836 int
3837 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
3838 {
3839 return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID )));
3840 }
3841
3842 int
3843 vnode_getwithref(vnode_t vp)
3844 {
3845 return(vget_internal(vp, 0, 0));
3846 }
3847
3848
3849 __private_extern__ int
3850 vnode_getalways(vnode_t vp)
3851 {
3852 return(vget_internal(vp, 0, VNODE_ALWAYS));
3853 }
3854
3855 int
3856 vnode_put(vnode_t vp)
3857 {
3858 int retval;
3859
3860 vnode_lock_spin(vp);
3861 retval = vnode_put_locked(vp);
3862 vnode_unlock(vp);
3863
3864 return(retval);
3865 }
3866
3867 int
3868 vnode_put_locked(vnode_t vp)
3869 {
3870 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3871
3872 #if DIAGNOSTIC
3873 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3874 #endif
3875 retry:
3876 if (vp->v_iocount < 1)
3877 panic("vnode_put(%p): iocount < 1", vp);
3878
3879 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3880 vnode_dropiocount(vp);
3881 return(0);
3882 }
3883 if ((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3884
3885 vp->v_lflag &= ~VL_NEEDINACTIVE;
3886 vnode_unlock(vp);
3887
3888 VNOP_INACTIVE(vp, ctx);
3889
3890 vnode_lock_spin(vp);
3891 /*
3892 * because we had to drop the vnode lock before calling
3893 * VNOP_INACTIVE, the state of this vnode may have changed...
3894 * we may pick up both VL_MARTERM and either
3895 * an iocount or a usecount while in the VNOP_INACTIVE call
3896 * we don't want to call vnode_reclaim_internal on a vnode
3897 * that has active references on it... so loop back around
3898 * and reevaluate the state
3899 */
3900 goto retry;
3901 }
3902 vp->v_lflag &= ~VL_NEEDINACTIVE;
3903
3904 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3905 vnode_lock_convert(vp);
3906 vnode_reclaim_internal(vp, 1, 1, 0);
3907 }
3908 vnode_dropiocount(vp);
3909 vnode_list_add(vp);
3910
3911 return(0);
3912 }
3913
3914 /* is vnode_t in use by others? */
3915 int
3916 vnode_isinuse(vnode_t vp, int refcnt)
3917 {
3918 return(vnode_isinuse_locked(vp, refcnt, 0));
3919 }
3920
3921
3922 static int
3923 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3924 {
3925 int retval = 0;
3926
3927 if (!locked)
3928 vnode_lock_spin(vp);
3929 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3930 retval = 1;
3931 goto out;
3932 }
3933 if (vp->v_type == VREG) {
3934 retval = ubc_isinuse_locked(vp, refcnt, 1);
3935 }
3936
3937 out:
3938 if (!locked)
3939 vnode_unlock(vp);
3940 return(retval);
3941 }
3942
3943
3944 /* resume vnode_t */
3945 errno_t
3946 vnode_resume(vnode_t vp)
3947 {
3948 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3949
3950 vnode_lock_spin(vp);
3951 vp->v_lflag &= ~VL_SUSPENDED;
3952 vp->v_owner = NULL;
3953 vnode_unlock(vp);
3954
3955 wakeup(&vp->v_iocount);
3956 }
3957 return(0);
3958 }
3959
3960 /* suspend vnode_t
3961 * Please do not use on more than one vnode at a time as it may
3962 * cause deadlocks.
3963 * xxx should we explicity prevent this from happening?
3964 */
3965
3966 errno_t
3967 vnode_suspend(vnode_t vp)
3968 {
3969 if (vp->v_lflag & VL_SUSPENDED) {
3970 return(EBUSY);
3971 }
3972
3973 vnode_lock_spin(vp);
3974
3975 /*
3976 * xxx is this sufficient to check if a vnode_drain is
3977 * progress?
3978 */
3979
3980 if (vp->v_owner == NULL) {
3981 vp->v_lflag |= VL_SUSPENDED;
3982 vp->v_owner = current_thread();
3983 }
3984 vnode_unlock(vp);
3985
3986 return(0);
3987 }
3988
3989
3990
3991 static errno_t
3992 vnode_drain(vnode_t vp)
3993 {
3994
3995 if (vp->v_lflag & VL_DRAIN) {
3996 panic("vnode_drain: recursive drain");
3997 return(ENOENT);
3998 }
3999 vp->v_lflag |= VL_DRAIN;
4000 vp->v_owner = current_thread();
4001
4002 while (vp->v_iocount > 1)
4003 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
4004
4005 vp->v_lflag &= ~VL_DRAIN;
4006
4007 return(0);
4008 }
4009
4010
4011 /*
4012 * if the number of recent references via vnode_getwithvid or vnode_getwithref
4013 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
4014 * the LRU list if it's currently on it... once the iocount and usecount both drop
4015 * to 0, it will get put back on the end of the list, effectively making it younger
4016 * this allows us to keep actively referenced vnodes in the list without having
4017 * to constantly remove and add to the list each time a vnode w/o a usecount is
4018 * referenced which costs us taking and dropping a global lock twice.
4019 */
4020 #define UNAGE_THRESHHOLD 25
4021
4022 errno_t
4023 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
4024 {
4025 int nodead = vflags & VNODE_NODEAD;
4026 int nosusp = vflags & VNODE_NOSUSPEND;
4027 int always = vflags & VNODE_ALWAYS;
4028 int beatdrain = vflags & VNODE_DRAINO;
4029
4030 for (;;) {
4031 /*
4032 * if it is a dead vnode with deadfs
4033 */
4034 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
4035 return(ENOENT);
4036 }
4037 /*
4038 * will return VL_DEAD ones
4039 */
4040 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
4041 break;
4042 }
4043 /*
4044 * if suspended vnodes are to be failed
4045 */
4046 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
4047 return(ENOENT);
4048 }
4049 /*
4050 * if you are the owner of drain/suspend/termination , can acquire iocount
4051 * check for VL_TERMINATE; it does not set owner
4052 */
4053 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
4054 (vp->v_owner == current_thread())) {
4055 break;
4056 }
4057
4058 if (always != 0)
4059 break;
4060
4061 /*
4062 * In some situations, we want to get an iocount
4063 * even if the vnode is draining to prevent deadlock,
4064 * e.g. if we're in the filesystem, potentially holding
4065 * resources that could prevent other iocounts from
4066 * being released.
4067 */
4068 if (beatdrain && (vp->v_lflag & VL_DRAIN)) {
4069 break;
4070 }
4071
4072 vnode_lock_convert(vp);
4073
4074 if (vp->v_lflag & VL_TERMINATE) {
4075 vp->v_lflag |= VL_TERMWANT;
4076
4077 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
4078 } else
4079 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
4080 }
4081 if (((vflags & VNODE_WITHID) != 0) && vid != vp->v_id) {
4082 return(ENOENT);
4083 }
4084 if (++vp->v_references >= UNAGE_THRESHHOLD) {
4085 vp->v_references = 0;
4086 vnode_list_remove(vp);
4087 }
4088 vp->v_iocount++;
4089 #ifdef JOE_DEBUG
4090 record_vp(vp, 1);
4091 #endif
4092 return(0);
4093 }
4094
4095 static void
4096 vnode_dropiocount (vnode_t vp)
4097 {
4098 if (vp->v_iocount < 1)
4099 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
4100
4101 vp->v_iocount--;
4102 #ifdef JOE_DEBUG
4103 record_vp(vp, -1);
4104 #endif
4105 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
4106 wakeup(&vp->v_iocount);
4107 }
4108
4109
4110 void
4111 vnode_reclaim(struct vnode * vp)
4112 {
4113 vnode_reclaim_internal(vp, 0, 0, 0);
4114 }
4115
4116 __private_extern__
4117 void
4118 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4119 {
4120 int isfifo = 0;
4121
4122 if (!locked)
4123 vnode_lock(vp);
4124
4125 if (vp->v_lflag & VL_TERMINATE) {
4126 panic("vnode reclaim in progress");
4127 }
4128 vp->v_lflag |= VL_TERMINATE;
4129
4130 vn_clearunionwait(vp, 1);
4131
4132 vnode_drain(vp);
4133
4134 isfifo = (vp->v_type == VFIFO);
4135
4136 if (vp->v_type != VBAD)
4137 vgone(vp, flags); /* clean and reclaim the vnode */
4138
4139 /*
4140 * give the vnode a new identity so that vnode_getwithvid will fail
4141 * on any stale cache accesses...
4142 * grab the list_lock so that if we're in "new_vnode"
4143 * behind the list_lock trying to steal this vnode, the v_id is stable...
4144 * once new_vnode drops the list_lock, it will block trying to take
4145 * the vnode lock until we release it... at that point it will evaluate
4146 * whether the v_vid has changed
4147 * also need to make sure that the vnode isn't on a list where "new_vnode"
4148 * can find it after the v_id has been bumped until we are completely done
4149 * with the vnode (i.e. putting it back on a list has to be the very last
4150 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4151 * are holding an io_count on the vnode... they need to drop the io_count
4152 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4153 * they are completely done with the vnode
4154 */
4155 vnode_list_lock();
4156
4157 vnode_list_remove_locked(vp);
4158 vp->v_id++;
4159
4160 vnode_list_unlock();
4161
4162 if (isfifo) {
4163 struct fifoinfo * fip;
4164
4165 fip = vp->v_fifoinfo;
4166 vp->v_fifoinfo = NULL;
4167 FREE(fip, M_TEMP);
4168 }
4169 vp->v_type = VBAD;
4170
4171 if (vp->v_data)
4172 panic("vnode_reclaim_internal: cleaned vnode isn't");
4173 if (vp->v_numoutput)
4174 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4175 if (UBCINFOEXISTS(vp))
4176 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4177 if (vp->v_parent)
4178 panic("vnode_reclaim_internal: vparent not removed");
4179 if (vp->v_name)
4180 panic("vnode_reclaim_internal: vname not removed");
4181
4182 vp->v_socket = NULL;
4183
4184 vp->v_lflag &= ~VL_TERMINATE;
4185 vp->v_owner = NULL;
4186
4187 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4188
4189 /* Make sure that when we reuse the vnode, no knotes left over */
4190 klist_init(&vp->v_knotes);
4191
4192 if (vp->v_lflag & VL_TERMWANT) {
4193 vp->v_lflag &= ~VL_TERMWANT;
4194 wakeup(&vp->v_lflag);
4195 }
4196 if (!reuse) {
4197 /*
4198 * make sure we get on the
4199 * dead list if appropriate
4200 */
4201 vnode_list_add(vp);
4202 }
4203 if (!locked)
4204 vnode_unlock(vp);
4205 }
4206
4207 /* USAGE:
4208 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4209 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4210 * is obsoleted by this.
4211 */
4212 int
4213 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4214 {
4215 int error;
4216 int insert = 1;
4217 vnode_t vp;
4218 vnode_t nvp;
4219 vnode_t dvp;
4220 struct uthread *ut;
4221 struct componentname *cnp;
4222 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4223 #if CONFIG_TRIGGERS
4224 struct vnode_trigger_param *tinfo = NULL;
4225 #endif
4226 if (param == NULL)
4227 return (EINVAL);
4228
4229 #if CONFIG_TRIGGERS
4230 if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
4231 tinfo = (struct vnode_trigger_param *)data;
4232
4233 /* Validate trigger vnode input */
4234 if ((param->vnfs_vtype != VDIR) ||
4235 (tinfo->vnt_resolve_func == NULL) ||
4236 (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
4237 return (EINVAL);
4238 }
4239 /* Fall through a normal create (params will be the same) */
4240 flavor = VNCREATE_FLAVOR;
4241 size = VCREATESIZE;
4242 }
4243 #endif
4244 if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE))
4245 return (EINVAL);
4246
4247 if ( (error = new_vnode(&vp)) )
4248 return(error);
4249
4250 dvp = param->vnfs_dvp;
4251 cnp = param->vnfs_cnp;
4252
4253 vp->v_op = param->vnfs_vops;
4254 vp->v_type = param->vnfs_vtype;
4255 vp->v_data = param->vnfs_fsnode;
4256
4257 if (param->vnfs_markroot)
4258 vp->v_flag |= VROOT;
4259 if (param->vnfs_marksystem)
4260 vp->v_flag |= VSYSTEM;
4261 if (vp->v_type == VREG) {
4262 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4263 if (error) {
4264 #ifdef JOE_DEBUG
4265 record_vp(vp, 1);
4266 #endif
4267 vp->v_mount = NULL;
4268 vp->v_op = dead_vnodeop_p;
4269 vp->v_tag = VT_NON;
4270 vp->v_data = NULL;
4271 vp->v_type = VBAD;
4272 vp->v_lflag |= VL_DEAD;
4273
4274 vnode_put(vp);
4275 return(error);
4276 }
4277 }
4278 #ifdef JOE_DEBUG
4279 record_vp(vp, 1);
4280 #endif
4281
4282 #if CONFIG_TRIGGERS
4283 /*
4284 * For trigger vnodes, attach trigger info to vnode
4285 */
4286 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
4287 /*
4288 * Note: has a side effect of incrementing trigger count on the
4289 * mount if successful, which we would need to undo on a
4290 * subsequent failure.
4291 */
4292 #ifdef JOE_DEBUG
4293 record_vp(vp, -1);
4294 #endif
4295 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
4296 if (error) {
4297 printf("vnode_create: vnode_resolver_create() err %d\n", error);
4298 vp->v_mount = NULL;
4299 vp->v_op = dead_vnodeop_p;
4300 vp->v_tag = VT_NON;
4301 vp->v_data = NULL;
4302 vp->v_type = VBAD;
4303 vp->v_lflag |= VL_DEAD;
4304 #ifdef JOE_DEBUG
4305 record_vp(vp, 1);
4306 #endif
4307 vnode_put(vp);
4308 return (error);
4309 }
4310 }
4311 #endif
4312 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4313
4314 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4315
4316 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4317 /*
4318 * if checkalias returns a vnode, it will be locked
4319 *
4320 * first get rid of the unneeded vnode we acquired
4321 */
4322 vp->v_data = NULL;
4323 vp->v_op = spec_vnodeop_p;
4324 vp->v_type = VBAD;
4325 vp->v_lflag = VL_DEAD;
4326 vp->v_data = NULL;
4327 vp->v_tag = VT_NON;
4328 vnode_put(vp);
4329
4330 /*
4331 * switch to aliased vnode and finish
4332 * preparing it
4333 */
4334 vp = nvp;
4335
4336 vclean(vp, 0);
4337 vp->v_op = param->vnfs_vops;
4338 vp->v_type = param->vnfs_vtype;
4339 vp->v_data = param->vnfs_fsnode;
4340 vp->v_lflag = 0;
4341 vp->v_mount = NULL;
4342 insmntque(vp, param->vnfs_mp);
4343 insert = 0;
4344 vnode_unlock(vp);
4345 }
4346 }
4347
4348 if (vp->v_type == VFIFO) {
4349 struct fifoinfo *fip;
4350
4351 MALLOC(fip, struct fifoinfo *,
4352 sizeof(*fip), M_TEMP, M_WAITOK);
4353 bzero(fip, sizeof(struct fifoinfo ));
4354 vp->v_fifoinfo = fip;
4355 }
4356 /* The file systems must pass the address of the location where
4357 * they store the vnode pointer. When we add the vnode into the mount
4358 * list and name cache they become discoverable. So the file system node
4359 * must have the connection to vnode setup by then
4360 */
4361 *vpp = vp;
4362
4363 /* Add fs named reference. */
4364 if (param->vnfs_flags & VNFS_ADDFSREF) {
4365 vp->v_lflag |= VNAMED_FSHASH;
4366 }
4367 if (param->vnfs_mp) {
4368 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4369 vp->v_flag |= VLOCKLOCAL;
4370 if (insert) {
4371 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4372 panic("insmntque: vp on the free list\n");
4373
4374 /*
4375 * enter in mount vnode list
4376 */
4377 insmntque(vp, param->vnfs_mp);
4378 }
4379 #ifndef __LP64__
4380 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4381 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4382 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4383 vp->v_unsafefs->fsnode_count = 0;
4384 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4385 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4386 }
4387 #endif /* __LP64__ */
4388 }
4389 if (dvp && vnode_ref(dvp) == 0) {
4390 vp->v_parent = dvp;
4391 }
4392 if (cnp) {
4393 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4394 /*
4395 * enter into name cache
4396 * we've got the info to enter it into the name cache now
4397 * cache_enter_create will pick up an extra reference on
4398 * the name entered into the string cache
4399 */
4400 vp->v_name = cache_enter_create(dvp, vp, cnp);
4401 } else
4402 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4403
4404 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4405 vp->v_flag |= VISUNION;
4406 }
4407 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4408 /*
4409 * this vnode is being created as cacheable in the name cache
4410 * this allows us to re-enter it in the cache
4411 */
4412 vp->v_flag |= VNCACHEABLE;
4413 }
4414 ut = get_bsdthread_info(current_thread());
4415
4416 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4417 (ut->uu_flag & UT_RAGE_VNODES)) {
4418 /*
4419 * process has indicated that it wants any
4420 * vnodes created on its behalf to be rapidly
4421 * aged to reduce the impact on the cached set
4422 * of vnodes
4423 */
4424 vp->v_flag |= VRAGE;
4425 }
4426 return (0);
4427 }
4428
4429 int
4430 vnode_addfsref(vnode_t vp)
4431 {
4432 vnode_lock_spin(vp);
4433 if (vp->v_lflag & VNAMED_FSHASH)
4434 panic("add_fsref: vp already has named reference");
4435 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4436 panic("addfsref: vp on the free list\n");
4437 vp->v_lflag |= VNAMED_FSHASH;
4438 vnode_unlock(vp);
4439 return(0);
4440
4441 }
4442 int
4443 vnode_removefsref(vnode_t vp)
4444 {
4445 vnode_lock_spin(vp);
4446 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4447 panic("remove_fsref: no named reference");
4448 vp->v_lflag &= ~VNAMED_FSHASH;
4449 vnode_unlock(vp);
4450 return(0);
4451
4452 }
4453
4454
4455 int
4456 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
4457 {
4458 mount_t mp;
4459 int ret = 0;
4460 fsid_t * fsid_list;
4461 int count, actualcount, i;
4462 void * allocmem;
4463 int indx_start, indx_stop, indx_incr;
4464
4465 count = mount_getvfscnt();
4466 count += 10;
4467
4468 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4469 allocmem = (void *)fsid_list;
4470
4471 actualcount = mount_fillfsids(fsid_list, count);
4472
4473 /*
4474 * Establish the iteration direction
4475 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
4476 */
4477 if (flags & VFS_ITERATE_TAIL_FIRST) {
4478 indx_start = actualcount - 1;
4479 indx_stop = -1;
4480 indx_incr = -1;
4481 } else /* Head first by default */ {
4482 indx_start = 0;
4483 indx_stop = actualcount;
4484 indx_incr = 1;
4485 }
4486
4487 for (i=indx_start; i != indx_stop; i += indx_incr) {
4488
4489 /* obtain the mount point with iteration reference */
4490 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4491
4492 if(mp == (struct mount *)0)
4493 continue;
4494 mount_lock(mp);
4495 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4496 mount_unlock(mp);
4497 mount_iterdrop(mp);
4498 continue;
4499
4500 }
4501 mount_unlock(mp);
4502
4503 /* iterate over all the vnodes */
4504 ret = callout(mp, arg);
4505
4506 mount_iterdrop(mp);
4507
4508 switch (ret) {
4509 case VFS_RETURNED:
4510 case VFS_RETURNED_DONE:
4511 if (ret == VFS_RETURNED_DONE) {
4512 ret = 0;
4513 goto out;
4514 }
4515 break;
4516
4517 case VFS_CLAIMED_DONE:
4518 ret = 0;
4519 goto out;
4520 case VFS_CLAIMED:
4521 default:
4522 break;
4523 }
4524 ret = 0;
4525 }
4526
4527 out:
4528 kfree(allocmem, (count * sizeof(fsid_t)));
4529 return (ret);
4530 }
4531
4532 /*
4533 * Update the vfsstatfs structure in the mountpoint.
4534 * MAC: Parameter eventtype added, indicating whether the event that
4535 * triggered this update came from user space, via a system call
4536 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4537 */
4538 int
4539 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4540 {
4541 struct vfs_attr va;
4542 int error;
4543
4544 /*
4545 * Request the attributes we want to propagate into
4546 * the per-mount vfsstat structure.
4547 */
4548 VFSATTR_INIT(&va);
4549 VFSATTR_WANTED(&va, f_iosize);
4550 VFSATTR_WANTED(&va, f_blocks);
4551 VFSATTR_WANTED(&va, f_bfree);
4552 VFSATTR_WANTED(&va, f_bavail);
4553 VFSATTR_WANTED(&va, f_bused);
4554 VFSATTR_WANTED(&va, f_files);
4555 VFSATTR_WANTED(&va, f_ffree);
4556 VFSATTR_WANTED(&va, f_bsize);
4557 VFSATTR_WANTED(&va, f_fssubtype);
4558 #if CONFIG_MACF
4559 if (eventtype == VFS_USER_EVENT) {
4560 error = mac_mount_check_getattr(ctx, mp, &va);
4561 if (error != 0)
4562 return (error);
4563 }
4564 #endif
4565
4566 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4567 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4568 return(error);
4569 }
4570
4571 /*
4572 * Unpack into the per-mount structure.
4573 *
4574 * We only overwrite these fields, which are likely to change:
4575 * f_blocks
4576 * f_bfree
4577 * f_bavail
4578 * f_bused
4579 * f_files
4580 * f_ffree
4581 *
4582 * And these which are not, but which the FS has no other way
4583 * of providing to us:
4584 * f_bsize
4585 * f_iosize
4586 * f_fssubtype
4587 *
4588 */
4589 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4590 /* 4822056 - protect against malformed server mount */
4591 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4592 } else {
4593 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4594 }
4595 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4596 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4597 } else {
4598 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4599 }
4600 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4601 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4602 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4603 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4604 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4605 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4606 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4607 mp->mnt_vfsstat.f_bused = va.f_bused;
4608 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4609 mp->mnt_vfsstat.f_files = va.f_files;
4610 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4611 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4612
4613 /* this is unlikely to change, but has to be queried for */
4614 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4615 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4616
4617 return(0);
4618 }
4619
4620 int
4621 mount_list_add(mount_t mp)
4622 {
4623 int res;
4624
4625 mount_list_lock();
4626 if (system_inshutdown != 0) {
4627 res = -1;
4628 } else {
4629 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4630 nummounts++;
4631 res = 0;
4632 }
4633 mount_list_unlock();
4634
4635 return res;
4636 }
4637
4638 void
4639 mount_list_remove(mount_t mp)
4640 {
4641 mount_list_lock();
4642 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4643 nummounts--;
4644 mp->mnt_list.tqe_next = NULL;
4645 mp->mnt_list.tqe_prev = NULL;
4646 mount_list_unlock();
4647 }
4648
4649 mount_t
4650 mount_lookupby_volfsid(int volfs_id, int withref)
4651 {
4652 mount_t cur_mount = (mount_t)0;
4653 mount_t mp;
4654
4655 mount_list_lock();
4656 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4657 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4658 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4659 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4660 cur_mount = mp;
4661 if (withref) {
4662 if (mount_iterref(cur_mount, 1)) {
4663 cur_mount = (mount_t)0;
4664 mount_list_unlock();
4665 goto out;
4666 }
4667 }
4668 break;
4669 }
4670 }
4671 mount_list_unlock();
4672 if (withref && (cur_mount != (mount_t)0)) {
4673 mp = cur_mount;
4674 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4675 cur_mount = (mount_t)0;
4676 }
4677 mount_iterdrop(mp);
4678 }
4679 out:
4680 return(cur_mount);
4681 }
4682
4683 mount_t
4684 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4685 {
4686 mount_t retmp = (mount_t)0;
4687 mount_t mp;
4688
4689 if (!locked)
4690 mount_list_lock();
4691 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4692 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4693 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4694 retmp = mp;
4695 if (withref) {
4696 if (mount_iterref(retmp, 1))
4697 retmp = (mount_t)0;
4698 }
4699 goto out;
4700 }
4701 out:
4702 if (!locked)
4703 mount_list_unlock();
4704 return (retmp);
4705 }
4706
4707 errno_t
4708 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4709 {
4710 struct nameidata nd;
4711 int error;
4712 u_int32_t ndflags = 0;
4713
4714 if (ctx == NULL) { /* XXX technically an error */
4715 ctx = vfs_context_current();
4716 }
4717
4718 if (flags & VNODE_LOOKUP_NOFOLLOW)
4719 ndflags = NOFOLLOW;
4720 else
4721 ndflags = FOLLOW;
4722
4723 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4724 ndflags |= NOCROSSMOUNT;
4725 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4726 ndflags |= DOWHITEOUT;
4727
4728 /* XXX AUDITVNPATH1 needed ? */
4729 NDINIT(&nd, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
4730 CAST_USER_ADDR_T(path), ctx);
4731
4732 if ((error = namei(&nd)))
4733 return (error);
4734 *vpp = nd.ni_vp;
4735 nameidone(&nd);
4736
4737 return (0);
4738 }
4739
4740 errno_t
4741 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4742 {
4743 struct nameidata nd;
4744 int error;
4745 u_int32_t ndflags = 0;
4746 int lflags = flags;
4747
4748 if (ctx == NULL) { /* XXX technically an error */
4749 ctx = vfs_context_current();
4750 }
4751
4752 if (fmode & O_NOFOLLOW)
4753 lflags |= VNODE_LOOKUP_NOFOLLOW;
4754
4755 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4756 ndflags = NOFOLLOW;
4757 else
4758 ndflags = FOLLOW;
4759
4760 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4761 ndflags |= NOCROSSMOUNT;
4762 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4763 ndflags |= DOWHITEOUT;
4764
4765 /* XXX AUDITVNPATH1 needed ? */
4766 NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
4767 CAST_USER_ADDR_T(path), ctx);
4768
4769 if ((error = vn_open(&nd, fmode, cmode)))
4770 *vpp = NULL;
4771 else
4772 *vpp = nd.ni_vp;
4773
4774 return (error);
4775 }
4776
4777 errno_t
4778 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4779 {
4780 int error;
4781
4782 if (ctx == NULL) {
4783 ctx = vfs_context_current();
4784 }
4785
4786 error = vn_close(vp, flags, ctx);
4787 vnode_put(vp);
4788 return (error);
4789 }
4790
4791 /*
4792 * Returns: 0 Success
4793 * vnode_getattr:???
4794 */
4795 errno_t
4796 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4797 {
4798 struct vnode_attr va;
4799 int error;
4800
4801 VATTR_INIT(&va);
4802 VATTR_WANTED(&va, va_data_size);
4803 error = vnode_getattr(vp, &va, ctx);
4804 if (!error)
4805 *sizep = va.va_data_size;
4806 return(error);
4807 }
4808
4809 errno_t
4810 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4811 {
4812 struct vnode_attr va;
4813
4814 VATTR_INIT(&va);
4815 VATTR_SET(&va, va_data_size, size);
4816 va.va_vaflags = ioflag & 0xffff;
4817 return(vnode_setattr(vp, &va, ctx));
4818 }
4819
4820 static int
4821 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
4822 {
4823 /* Only use compound VNOP for compound operation */
4824 if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
4825 *vpp = NULLVP;
4826 return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, VNOP_COMPOUND_OPEN_DO_CREATE, fmode, statusp, vap, ctx);
4827 } else {
4828 return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4829 }
4830 }
4831
4832 /*
4833 * Create a filesystem object of arbitrary type with arbitrary attributes in
4834 * the spevied directory with the specified name.
4835 *
4836 * Parameters: dvp Pointer to the vnode of the directory
4837 * in which to create the object.
4838 * vpp Pointer to the area into which to
4839 * return the vnode of the created object.
4840 * cnp Component name pointer from the namei
4841 * data structure, containing the name to
4842 * use for the create object.
4843 * vap Pointer to the vnode_attr structure
4844 * describing the object to be created,
4845 * including the type of object.
4846 * flags VN_* flags controlling ACL inheritance
4847 * and whether or not authorization is to
4848 * be required for the operation.
4849 *
4850 * Returns: 0 Success
4851 * !0 errno value
4852 *
4853 * Implicit: *vpp Contains the vnode of the object that
4854 * was created, if successful.
4855 * *cnp May be modified by the underlying VFS.
4856 * *vap May be modified by the underlying VFS.
4857 * modified by either ACL inheritance or
4858 *
4859 *
4860 * be modified, even if the operation is
4861 *
4862 *
4863 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4864 *
4865 * Modification of '*cnp' and '*vap' by the underlying VFS is
4866 * strongly discouraged.
4867 *
4868 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4869 *
4870 * XXX: We should enummerate the possible errno values here, and where
4871 * in the code they originated.
4872 */
4873 errno_t
4874 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
4875 {
4876 errno_t error, old_error;
4877 vnode_t vp = (vnode_t)0;
4878 boolean_t batched;
4879 struct componentname *cnp;
4880 uint32_t defaulted;
4881
4882 cnp = &ndp->ni_cnd;
4883 error = 0;
4884 batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
4885
4886 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4887
4888 if (flags & VN_CREATE_NOINHERIT)
4889 vap->va_vaflags |= VA_NOINHERIT;
4890 if (flags & VN_CREATE_NOAUTH)
4891 vap->va_vaflags |= VA_NOAUTH;
4892 /*
4893 * Handle ACL inheritance, initialize vap.
4894 */
4895 error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
4896 if (error) {
4897 return error;
4898 }
4899
4900 if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
4901 panic("Open parameters, but not a regular file.");
4902 }
4903 if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
4904 panic("Mode for open, but not trying to open...");
4905 }
4906
4907 /*
4908 * Create the requested node.
4909 */
4910 switch(vap->va_type) {
4911 case VREG:
4912 error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
4913 break;
4914 case VDIR:
4915 error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
4916 break;
4917 case VSOCK:
4918 case VFIFO:
4919 case VBLK:
4920 case VCHR:
4921 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4922 break;
4923 default:
4924 panic("vnode_create: unknown vtype %d", vap->va_type);
4925 }
4926 if (error != 0) {
4927 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4928 goto out;
4929 }
4930
4931 vp = *vpp;
4932 old_error = error;
4933
4934 #if CONFIG_MACF
4935 if (!(flags & VN_CREATE_NOLABEL)) {
4936 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4937 if (error)
4938 goto error;
4939 }
4940 #endif
4941
4942 /*
4943 * If some of the requested attributes weren't handled by the VNOP,
4944 * use our fallback code.
4945 */
4946 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4947 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4948 error = vnode_setattr_fallback(*vpp, vap, ctx);
4949 }
4950 #if CONFIG_MACF
4951 error:
4952 #endif
4953 if ((error != 0) && (vp != (vnode_t)0)) {
4954
4955 /* If we've done a compound open, close */
4956 if (batched && (old_error == 0) && (vap->va_type == VREG)) {
4957 VNOP_CLOSE(vp, fmode, ctx);
4958 }
4959
4960 /* Need to provide notifications if a create succeeded */
4961 if (!batched) {
4962 *vpp = (vnode_t) 0;
4963 vnode_put(vp);
4964 }
4965 }
4966
4967 out:
4968 vn_attribute_cleanup(vap, defaulted);
4969
4970 return(error);
4971 }
4972
4973 static kauth_scope_t vnode_scope;
4974 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4975 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4976 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4977 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4978
4979 typedef struct _vnode_authorize_context {
4980 vnode_t vp;
4981 struct vnode_attr *vap;
4982 vnode_t dvp;
4983 struct vnode_attr *dvap;
4984 vfs_context_t ctx;
4985 int flags;
4986 int flags_valid;
4987 #define _VAC_IS_OWNER (1<<0)
4988 #define _VAC_IN_GROUP (1<<1)
4989 #define _VAC_IS_DIR_OWNER (1<<2)
4990 #define _VAC_IN_DIR_GROUP (1<<3)
4991 } *vauth_ctx;
4992
4993 void
4994 vnode_authorize_init(void)
4995 {
4996 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4997 }
4998
4999 #define VATTR_PREPARE_DEFAULTED_UID 0x1
5000 #define VATTR_PREPARE_DEFAULTED_GID 0x2
5001 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
5002
5003 int
5004 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
5005 {
5006 kauth_acl_t nacl = NULL, oacl = NULL;
5007 int error;
5008
5009 /*
5010 * Handle ACL inheritance.
5011 */
5012 if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
5013 /* save the original filesec */
5014 if (VATTR_IS_ACTIVE(vap, va_acl)) {
5015 oacl = vap->va_acl;
5016 }
5017
5018 vap->va_acl = NULL;
5019 if ((error = kauth_acl_inherit(dvp,
5020 oacl,
5021 &nacl,
5022 vap->va_type == VDIR,
5023 ctx)) != 0) {
5024 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
5025 return(error);
5026 }
5027
5028 /*
5029 * If the generated ACL is NULL, then we can save ourselves some effort
5030 * by clearing the active bit.
5031 */
5032 if (nacl == NULL) {
5033 VATTR_CLEAR_ACTIVE(vap, va_acl);
5034 } else {
5035 vap->va_base_acl = oacl;
5036 VATTR_SET(vap, va_acl, nacl);
5037 }
5038 }
5039
5040 error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
5041 if (error) {
5042 vn_attribute_cleanup(vap, *defaulted_fieldsp);
5043 }
5044
5045 return error;
5046 }
5047
5048 void
5049 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
5050 {
5051 /*
5052 * If the caller supplied a filesec in vap, it has been replaced
5053 * now by the post-inheritance copy. We need to put the original back
5054 * and free the inherited product.
5055 */
5056 kauth_acl_t nacl, oacl;
5057
5058 if (VATTR_IS_ACTIVE(vap, va_acl)) {
5059 nacl = vap->va_acl;
5060 oacl = vap->va_base_acl;
5061
5062 if (oacl) {
5063 VATTR_SET(vap, va_acl, oacl);
5064 vap->va_base_acl = NULL;
5065 } else {
5066 VATTR_CLEAR_ACTIVE(vap, va_acl);
5067 }
5068
5069 if (nacl != NULL) {
5070 kauth_acl_free(nacl);
5071 }
5072 }
5073
5074 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
5075 VATTR_CLEAR_ACTIVE(vap, va_mode);
5076 }
5077 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
5078 VATTR_CLEAR_ACTIVE(vap, va_gid);
5079 }
5080 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
5081 VATTR_CLEAR_ACTIVE(vap, va_uid);
5082 }
5083
5084 return;
5085 }
5086
5087 int
5088 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
5089 {
5090 int error = 0;
5091
5092 /*
5093 * Normally, unlinking of directories is not supported.
5094 * However, some file systems may have limited support.
5095 */
5096 if ((vp->v_type == VDIR) &&
5097 !(vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSDIRLINKS)) {
5098 return (EPERM); /* POSIX */
5099 }
5100
5101 /* authorize the delete operation */
5102 #if CONFIG_MACF
5103 if (!error)
5104 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
5105 #endif /* MAC */
5106 if (!error)
5107 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
5108
5109 return error;
5110 }
5111
5112 int
5113 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
5114 {
5115 /* Open of existing case */
5116 kauth_action_t action;
5117 int error = 0;
5118
5119 if (cnp->cn_ndp == NULL) {
5120 panic("NULL ndp");
5121 }
5122 if (reserved != NULL) {
5123 panic("reserved not NULL.");
5124 }
5125
5126 #if CONFIG_MACF
5127 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
5128 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
5129 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
5130 if (error)
5131 return (error);
5132 }
5133 #endif
5134
5135 if ( (fmode & O_DIRECTORY) && vp->v_type != VDIR ) {
5136 return (ENOTDIR);
5137 }
5138
5139 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
5140 return (EOPNOTSUPP); /* Operation not supported on socket */
5141 }
5142
5143 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
5144 return (ELOOP); /* O_NOFOLLOW was specified and the target is a symbolic link */
5145 }
5146
5147 /* disallow write operations on directories */
5148 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
5149 return (EISDIR);
5150 }
5151
5152 if ((cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH)) {
5153 if (vp->v_type != VDIR) {
5154 return (ENOTDIR);
5155 }
5156 }
5157
5158 #if CONFIG_MACF
5159 /* If a file being opened is a shadow file containing
5160 * namedstream data, ignore the macf checks because it
5161 * is a kernel internal file and access should always
5162 * be allowed.
5163 */
5164 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
5165 error = mac_vnode_check_open(ctx, vp, fmode);
5166 if (error) {
5167 return (error);
5168 }
5169 }
5170 #endif
5171
5172 /* compute action to be authorized */
5173 action = 0;
5174 if (fmode & FREAD) {
5175 action |= KAUTH_VNODE_READ_DATA;
5176 }
5177 if (fmode & (FWRITE | O_TRUNC)) {
5178 /*
5179 * If we are writing, appending, and not truncating,
5180 * indicate that we are appending so that if the
5181 * UF_APPEND or SF_APPEND bits are set, we do not deny
5182 * the open.
5183 */
5184 if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
5185 action |= KAUTH_VNODE_APPEND_DATA;
5186 } else {
5187 action |= KAUTH_VNODE_WRITE_DATA;
5188 }
5189 }
5190 return (vnode_authorize(vp, NULL, action, ctx));
5191 }
5192
5193 int
5194 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
5195 {
5196 /* Creation case */
5197 int error;
5198
5199 if (cnp->cn_ndp == NULL) {
5200 panic("NULL cn_ndp");
5201 }
5202 if (reserved != NULL) {
5203 panic("reserved not NULL.");
5204 }
5205
5206 /* Only validate path for creation if we didn't do a complete lookup */
5207 if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
5208 error = lookup_validate_creation_path(cnp->cn_ndp);
5209 if (error)
5210 return (error);
5211 }
5212
5213 #if CONFIG_MACF
5214 error = mac_vnode_check_create(ctx, dvp, cnp, vap);
5215 if (error)
5216 return (error);
5217 #endif /* CONFIG_MACF */
5218
5219 return (vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx));
5220 }
5221
5222 int
5223 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
5224 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5225 vfs_context_t ctx, void *reserved)
5226 {
5227 int error = 0;
5228 int moving = 0;
5229
5230 if (reserved != NULL) {
5231 panic("Passed something other than NULL as reserved field!");
5232 }
5233
5234 /*
5235 * Avoid renaming "." and "..".
5236 *
5237 * XXX No need to check for this in the FS. We should always have the leaves
5238 * in VFS in this case.
5239 */
5240 if (fvp->v_type == VDIR &&
5241 ((fdvp == fvp) ||
5242 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
5243 ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT)) ) {
5244 error = EINVAL;
5245 goto out;
5246 }
5247
5248 if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
5249 error = lookup_validate_creation_path(tcnp->cn_ndp);
5250 if (error)
5251 goto out;
5252 }
5253
5254 /***** <MACF> *****/
5255 #if CONFIG_MACF
5256 error = mac_vnode_check_rename_from(ctx, fdvp, fvp, fcnp);
5257 if (error)
5258 goto out;
5259 #endif
5260
5261 #if CONFIG_MACF
5262 error = mac_vnode_check_rename_to(ctx,
5263 tdvp, tvp, fdvp == tdvp, tcnp);
5264 if (error)
5265 goto out;
5266 #endif
5267 /***** </MACF> *****/
5268
5269 /***** <MiscChecks> *****/
5270 if (tvp != NULL) {
5271 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
5272 error = ENOTDIR;
5273 goto out;
5274 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
5275 error = EISDIR;
5276 goto out;
5277 }
5278 }
5279
5280 if (fvp == tdvp) {
5281 error = EINVAL;
5282 goto out;
5283 }
5284
5285 /*
5286 * The following edge case is caught here:
5287 * (to cannot be a descendent of from)
5288 *
5289 * o fdvp
5290 * /
5291 * /
5292 * o fvp
5293 * \
5294 * \
5295 * o tdvp
5296 * /
5297 * /
5298 * o tvp
5299 */
5300 if (tdvp->v_parent == fvp) {
5301 error = EINVAL;
5302 goto out;
5303 }
5304 /***** </MiscChecks> *****/
5305
5306 /***** <Kauth> *****/
5307
5308 error = 0;
5309 if ((tvp != NULL) && vnode_isdir(tvp)) {
5310 if (tvp != fdvp)
5311 moving = 1;
5312 } else if (tdvp != fdvp) {
5313 moving = 1;
5314 }
5315
5316
5317 /*
5318 * must have delete rights to remove the old name even in
5319 * the simple case of fdvp == tdvp.
5320 *
5321 * If fvp is a directory, and we are changing it's parent,
5322 * then we also need rights to rewrite its ".." entry as well.
5323 */
5324 if (vnode_isdir(fvp)) {
5325 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0)
5326 goto out;
5327 } else {
5328 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0)
5329 goto out;
5330 }
5331 if (moving) {
5332 /* moving into tdvp or tvp, must have rights to add */
5333 if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
5334 NULL,
5335 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
5336 ctx)) != 0) {
5337 goto out;
5338 }
5339 } else {
5340 /* node staying in same directory, must be allowed to add new name */
5341 if ((error = vnode_authorize(fdvp, NULL,
5342 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0)
5343 goto out;
5344 }
5345 /* overwriting tvp */
5346 if ((tvp != NULL) && !vnode_isdir(tvp) &&
5347 ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
5348 goto out;
5349 }
5350
5351 /***** </Kauth> *****/
5352
5353 /* XXX more checks? */
5354 out:
5355 return error;
5356 }
5357
5358 int
5359 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
5360 {
5361 int error;
5362
5363 if (reserved != NULL) {
5364 panic("reserved not NULL in vn_authorize_mkdir()");
5365 }
5366
5367 /* XXX A hack for now, to make shadow files work */
5368 if (cnp->cn_ndp == NULL) {
5369 return 0;
5370 }
5371
5372 if (vnode_compound_mkdir_available(dvp)) {
5373 error = lookup_validate_creation_path(cnp->cn_ndp);
5374 if (error)
5375 goto out;
5376 }
5377
5378 #if CONFIG_MACF
5379 error = mac_vnode_check_create(ctx,
5380 dvp, cnp, vap);
5381 if (error)
5382 goto out;
5383 #endif
5384
5385 /* authorize addition of a directory to the parent */
5386 if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0)
5387 goto out;
5388
5389 out:
5390 return error;
5391 }
5392
5393 int
5394 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
5395 {
5396 int error;
5397
5398 if (reserved != NULL) {
5399 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
5400 }
5401
5402 if (vp->v_type != VDIR) {
5403 /*
5404 * rmdir only deals with directories
5405 */
5406 return ENOTDIR;
5407 }
5408
5409 if (dvp == vp) {
5410 /*
5411 * No rmdir "." please.
5412 */
5413 return EINVAL;
5414 }
5415
5416 #if CONFIG_MACF
5417 error = mac_vnode_check_unlink(ctx, dvp,
5418 vp, cnp);
5419 if (error)
5420 return error;
5421 #endif
5422
5423 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
5424 }
5425
5426 /*
5427 * Authorize an operation on a vnode.
5428 *
5429 * This is KPI, but here because it needs vnode_scope.
5430 *
5431 * Returns: 0 Success
5432 * kauth_authorize_action:EPERM ...
5433 * xlate => EACCES Permission denied
5434 * kauth_authorize_action:0 Success
5435 * kauth_authorize_action: Depends on callback return; this is
5436 * usually only vnode_authorize_callback(),
5437 * but may include other listerners, if any
5438 * exist.
5439 * EROFS
5440 * EACCES
5441 * EPERM
5442 * ???
5443 */
5444 int
5445 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
5446 {
5447 int error, result;
5448
5449 /*
5450 * We can't authorize against a dead vnode; allow all operations through so that
5451 * the correct error can be returned.
5452 */
5453 if (vp->v_type == VBAD)
5454 return(0);
5455
5456 error = 0;
5457 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
5458 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
5459 if (result == EPERM) /* traditional behaviour */
5460 result = EACCES;
5461 /* did the lower layers give a better error return? */
5462 if ((result != 0) && (error != 0))
5463 return(error);
5464 return(result);
5465 }
5466
5467 /*
5468 * Test for vnode immutability.
5469 *
5470 * The 'append' flag is set when the authorization request is constrained
5471 * to operations which only request the right to append to a file.
5472 *
5473 * The 'ignore' flag is set when an operation modifying the immutability flags
5474 * is being authorized. We check the system securelevel to determine which
5475 * immutability flags we can ignore.
5476 */
5477 static int
5478 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
5479 {
5480 int mask;
5481
5482 /* start with all bits precluding the operation */
5483 mask = IMMUTABLE | APPEND;
5484
5485 /* if appending only, remove the append-only bits */
5486 if (append)
5487 mask &= ~APPEND;
5488
5489 /* ignore only set when authorizing flags changes */
5490 if (ignore) {
5491 if (securelevel <= 0) {
5492 /* in insecure state, flags do not inhibit changes */
5493 mask = 0;
5494 } else {
5495 /* in secure state, user flags don't inhibit */
5496 mask &= ~(UF_IMMUTABLE | UF_APPEND);
5497 }
5498 }
5499 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
5500 if ((vap->va_flags & mask) != 0)
5501 return(EPERM);
5502 return(0);
5503 }
5504
5505 static int
5506 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
5507 {
5508 int result;
5509
5510 /* default assumption is not-owner */
5511 result = 0;
5512
5513 /*
5514 * If the filesystem has given us a UID, we treat this as authoritative.
5515 */
5516 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
5517 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
5518 }
5519 /* we could test the owner UUID here if we had a policy for it */
5520
5521 return(result);
5522 }
5523
5524 /*
5525 * vauth_node_group
5526 *
5527 * Description: Ask if a cred is a member of the group owning the vnode object
5528 *
5529 * Parameters: vap vnode attribute
5530 * vap->va_gid group owner of vnode object
5531 * cred credential to check
5532 * ismember pointer to where to put the answer
5533 * idontknow Return this if we can't get an answer
5534 *
5535 * Returns: 0 Success
5536 * idontknow Can't get information
5537 * kauth_cred_ismember_gid:? Error from kauth subsystem
5538 * kauth_cred_ismember_gid:? Error from kauth subsystem
5539 */
5540 static int
5541 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
5542 {
5543 int error;
5544 int result;
5545
5546 error = 0;
5547 result = 0;
5548
5549 /*
5550 * The caller is expected to have asked the filesystem for a group
5551 * at some point prior to calling this function. The answer may
5552 * have been that there is no group ownership supported for the
5553 * vnode object, in which case we return
5554 */
5555 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
5556 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
5557 /*
5558 * Credentials which are opted into external group membership
5559 * resolution which are not known to the external resolver
5560 * will result in an ENOENT error. We translate this into
5561 * the appropriate 'idontknow' response for our caller.
5562 *
5563 * XXX We do not make a distinction here between an ENOENT
5564 * XXX arising from a response from the external resolver,
5565 * XXX and an ENOENT which is internally generated. This is
5566 * XXX a deficiency of the published kauth_cred_ismember_gid()
5567 * XXX KPI which can not be overcome without new KPI. For
5568 * XXX all currently known cases, however, this wil result
5569 * XXX in correct behaviour.
5570 */
5571 if (error == ENOENT)
5572 error = idontknow;
5573 }
5574 /*
5575 * XXX We could test the group UUID here if we had a policy for it,
5576 * XXX but this is problematic from the perspective of synchronizing
5577 * XXX group UUID and POSIX GID ownership of a file and keeping the
5578 * XXX values coherent over time. The problem is that the local
5579 * XXX system will vend transient group UUIDs for unknown POSIX GID
5580 * XXX values, and these are not persistent, whereas storage of values
5581 * XXX is persistent. One potential solution to this is a local
5582 * XXX (persistent) replica of remote directory entries and vended
5583 * XXX local ids in a local directory server (think in terms of a
5584 * XXX caching DNS server).
5585 */
5586
5587 if (!error)
5588 *ismember = result;
5589 return(error);
5590 }
5591
5592 static int
5593 vauth_file_owner(vauth_ctx vcp)
5594 {
5595 int result;
5596
5597 if (vcp->flags_valid & _VAC_IS_OWNER) {
5598 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
5599 } else {
5600 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
5601
5602 /* cache our result */
5603 vcp->flags_valid |= _VAC_IS_OWNER;
5604 if (result) {
5605 vcp->flags |= _VAC_IS_OWNER;
5606 } else {
5607 vcp->flags &= ~_VAC_IS_OWNER;
5608 }
5609 }
5610 return(result);
5611 }
5612
5613
5614 /*
5615 * vauth_file_ingroup
5616 *
5617 * Description: Ask if a user is a member of the group owning the directory
5618 *
5619 * Parameters: vcp The vnode authorization context that
5620 * contains the user and directory info
5621 * vcp->flags_valid Valid flags
5622 * vcp->flags Flags values
5623 * vcp->vap File vnode attributes
5624 * vcp->ctx VFS Context (for user)
5625 * ismember pointer to where to put the answer
5626 * idontknow Return this if we can't get an answer
5627 *
5628 * Returns: 0 Success
5629 * vauth_node_group:? Error from vauth_node_group()
5630 *
5631 * Implicit returns: *ismember 0 The user is not a group member
5632 * 1 The user is a group member
5633 */
5634 static int
5635 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5636 {
5637 int error;
5638
5639 /* Check for a cached answer first, to avoid the check if possible */
5640 if (vcp->flags_valid & _VAC_IN_GROUP) {
5641 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
5642 error = 0;
5643 } else {
5644 /* Otherwise, go look for it */
5645 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
5646
5647 if (!error) {
5648 /* cache our result */
5649 vcp->flags_valid |= _VAC_IN_GROUP;
5650 if (*ismember) {
5651 vcp->flags |= _VAC_IN_GROUP;
5652 } else {
5653 vcp->flags &= ~_VAC_IN_GROUP;
5654 }
5655 }
5656
5657 }
5658 return(error);
5659 }
5660
5661 static int
5662 vauth_dir_owner(vauth_ctx vcp)
5663 {
5664 int result;
5665
5666 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
5667 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
5668 } else {
5669 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
5670
5671 /* cache our result */
5672 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
5673 if (result) {
5674 vcp->flags |= _VAC_IS_DIR_OWNER;
5675 } else {
5676 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5677 }
5678 }
5679 return(result);
5680 }
5681
5682 /*
5683 * vauth_dir_ingroup
5684 *
5685 * Description: Ask if a user is a member of the group owning the directory
5686 *
5687 * Parameters: vcp The vnode authorization context that
5688 * contains the user and directory info
5689 * vcp->flags_valid Valid flags
5690 * vcp->flags Flags values
5691 * vcp->dvap Dir vnode attributes
5692 * vcp->ctx VFS Context (for user)
5693 * ismember pointer to where to put the answer
5694 * idontknow Return this if we can't get an answer
5695 *
5696 * Returns: 0 Success
5697 * vauth_node_group:? Error from vauth_node_group()
5698 *
5699 * Implicit returns: *ismember 0 The user is not a group member
5700 * 1 The user is a group member
5701 */
5702 static int
5703 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5704 {
5705 int error;
5706
5707 /* Check for a cached answer first, to avoid the check if possible */
5708 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5709 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5710 error = 0;
5711 } else {
5712 /* Otherwise, go look for it */
5713 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
5714
5715 if (!error) {
5716 /* cache our result */
5717 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5718 if (*ismember) {
5719 vcp->flags |= _VAC_IN_DIR_GROUP;
5720 } else {
5721 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5722 }
5723 }
5724 }
5725 return(error);
5726 }
5727
5728 /*
5729 * Test the posix permissions in (vap) to determine whether (credential)
5730 * may perform (action)
5731 */
5732 static int
5733 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5734 {
5735 struct vnode_attr *vap;
5736 int needed, error, owner_ok, group_ok, world_ok, ismember;
5737 #ifdef KAUTH_DEBUG_ENABLE
5738 const char *where = "uninitialized";
5739 # define _SETWHERE(c) where = c;
5740 #else
5741 # define _SETWHERE(c)
5742 #endif
5743
5744 /* checking file or directory? */
5745 if (on_dir) {
5746 vap = vcp->dvap;
5747 } else {
5748 vap = vcp->vap;
5749 }
5750
5751 error = 0;
5752
5753 /*
5754 * We want to do as little work here as possible. So first we check
5755 * which sets of permissions grant us the access we need, and avoid checking
5756 * whether specific permissions grant access when more generic ones would.
5757 */
5758
5759 /* owner permissions */
5760 needed = 0;
5761 if (action & VREAD)
5762 needed |= S_IRUSR;
5763 if (action & VWRITE)
5764 needed |= S_IWUSR;
5765 if (action & VEXEC)
5766 needed |= S_IXUSR;
5767 owner_ok = (needed & vap->va_mode) == needed;
5768
5769 /* group permissions */
5770 needed = 0;
5771 if (action & VREAD)
5772 needed |= S_IRGRP;
5773 if (action & VWRITE)
5774 needed |= S_IWGRP;
5775 if (action & VEXEC)
5776 needed |= S_IXGRP;
5777 group_ok = (needed & vap->va_mode) == needed;
5778
5779 /* world permissions */
5780 needed = 0;
5781 if (action & VREAD)
5782 needed |= S_IROTH;
5783 if (action & VWRITE)
5784 needed |= S_IWOTH;
5785 if (action & VEXEC)
5786 needed |= S_IXOTH;
5787 world_ok = (needed & vap->va_mode) == needed;
5788
5789 /* If granted/denied by all three, we're done */
5790 if (owner_ok && group_ok && world_ok) {
5791 _SETWHERE("all");
5792 goto out;
5793 }
5794 if (!owner_ok && !group_ok && !world_ok) {
5795 _SETWHERE("all");
5796 error = EACCES;
5797 goto out;
5798 }
5799
5800 /* Check ownership (relatively cheap) */
5801 if ((on_dir && vauth_dir_owner(vcp)) ||
5802 (!on_dir && vauth_file_owner(vcp))) {
5803 _SETWHERE("user");
5804 if (!owner_ok)
5805 error = EACCES;
5806 goto out;
5807 }
5808
5809 /* Not owner; if group and world both grant it we're done */
5810 if (group_ok && world_ok) {
5811 _SETWHERE("group/world");
5812 goto out;
5813 }
5814 if (!group_ok && !world_ok) {
5815 _SETWHERE("group/world");
5816 error = EACCES;
5817 goto out;
5818 }
5819
5820 /* Check group membership (most expensive) */
5821 ismember = 0; /* Default to allow, if the target has no group owner */
5822
5823 /*
5824 * In the case we can't get an answer about the user from the call to
5825 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
5826 * the side of caution, rather than simply granting access, or we will
5827 * fail to correctly implement exclusion groups, so we set the third
5828 * parameter on the basis of the state of 'group_ok'.
5829 */
5830 if (on_dir) {
5831 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5832 } else {
5833 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5834 }
5835 if (error) {
5836 if (!group_ok)
5837 ismember = 1;
5838 error = 0;
5839 }
5840 if (ismember) {
5841 _SETWHERE("group");
5842 if (!group_ok)
5843 error = EACCES;
5844 goto out;
5845 }
5846
5847 /* Not owner, not in group, use world result */
5848 _SETWHERE("world");
5849 if (!world_ok)
5850 error = EACCES;
5851
5852 /* FALLTHROUGH */
5853
5854 out:
5855 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5856 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5857 (action & VREAD) ? "r" : "-",
5858 (action & VWRITE) ? "w" : "-",
5859 (action & VEXEC) ? "x" : "-",
5860 needed,
5861 (vap->va_mode & S_IRUSR) ? "r" : "-",
5862 (vap->va_mode & S_IWUSR) ? "w" : "-",
5863 (vap->va_mode & S_IXUSR) ? "x" : "-",
5864 (vap->va_mode & S_IRGRP) ? "r" : "-",
5865 (vap->va_mode & S_IWGRP) ? "w" : "-",
5866 (vap->va_mode & S_IXGRP) ? "x" : "-",
5867 (vap->va_mode & S_IROTH) ? "r" : "-",
5868 (vap->va_mode & S_IWOTH) ? "w" : "-",
5869 (vap->va_mode & S_IXOTH) ? "x" : "-",
5870 kauth_cred_getuid(vcp->ctx->vc_ucred),
5871 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5872 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5873 return(error);
5874 }
5875
5876 /*
5877 * Authorize the deletion of the node vp from the directory dvp.
5878 *
5879 * We assume that:
5880 * - Neither the node nor the directory are immutable.
5881 * - The user is not the superuser.
5882 *
5883 * Deletion is not permitted if the directory is sticky and the caller is
5884 * not owner of the node or directory.
5885 *
5886 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5887 * the node may be deleted. If neither denies the permission, and the
5888 * caller has Posix write access to the directory, then the node may be
5889 * deleted.
5890 *
5891 * As an optimization, we cache whether or not delete child is permitted
5892 * on directories without the sticky bit set.
5893 */
5894 int
5895 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5896 /*static*/ int
5897 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5898 {
5899 struct vnode_attr *vap = vcp->vap;
5900 struct vnode_attr *dvap = vcp->dvap;
5901 kauth_cred_t cred = vcp->ctx->vc_ucred;
5902 struct kauth_acl_eval eval;
5903 int error, delete_denied, delete_child_denied, ismember;
5904
5905 /* check the ACL on the directory */
5906 delete_child_denied = 0;
5907 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5908 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5909 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5910 eval.ae_count = dvap->va_acl->acl_entrycount;
5911 eval.ae_options = 0;
5912 if (vauth_dir_owner(vcp))
5913 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5914 /*
5915 * We use ENOENT as a marker to indicate we could not get
5916 * information in order to delay evaluation until after we
5917 * have the ACL evaluation answer. Previously, we would
5918 * always deny the operation at this point.
5919 */
5920 if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT)
5921 return(error);
5922 if (error == ENOENT)
5923 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
5924 else if (ismember)
5925 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5926 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5927 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5928 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5929 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5930
5931 /*
5932 * If there is no entry, we are going to defer to other
5933 * authorization mechanisms.
5934 */
5935 error = kauth_acl_evaluate(cred, &eval);
5936
5937 if (error != 0) {
5938 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5939 return(error);
5940 }
5941 switch(eval.ae_result) {
5942 case KAUTH_RESULT_DENY:
5943 delete_child_denied = 1;
5944 break;
5945 /* FALLSTHROUGH */
5946 case KAUTH_RESULT_ALLOW:
5947 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5948 return(0);
5949 case KAUTH_RESULT_DEFER:
5950 default:
5951 /* Effectively the same as !delete_child_denied */
5952 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5953 break;
5954 }
5955 }
5956
5957 /* check the ACL on the node */
5958 delete_denied = 0;
5959 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5960 eval.ae_requested = KAUTH_VNODE_DELETE;
5961 eval.ae_acl = &vap->va_acl->acl_ace[0];
5962 eval.ae_count = vap->va_acl->acl_entrycount;
5963 eval.ae_options = 0;
5964 if (vauth_file_owner(vcp))
5965 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5966 /*
5967 * We use ENOENT as a marker to indicate we could not get
5968 * information in order to delay evaluation until after we
5969 * have the ACL evaluation answer. Previously, we would
5970 * always deny the operation at this point.
5971 */
5972 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT)
5973 return(error);
5974 if (error == ENOENT)
5975 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
5976 else if (ismember)
5977 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5978 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5979 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5980 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5981 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5982
5983 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5984 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5985 return(error);
5986 }
5987
5988 switch(eval.ae_result) {
5989 case KAUTH_RESULT_DENY:
5990 delete_denied = 1;
5991 break;
5992 case KAUTH_RESULT_ALLOW:
5993 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5994 return(0);
5995 case KAUTH_RESULT_DEFER:
5996 default:
5997 /* Effectively the same as !delete_child_denied */
5998 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
5999 break;
6000 }
6001 }
6002
6003 /* if denied by ACL on directory or node, return denial */
6004 if (delete_denied || delete_child_denied) {
6005 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
6006 return(EACCES);
6007 }
6008
6009 /*
6010 * enforce sticky bit behaviour; the cached_delete_child property will
6011 * be false and the dvap contents valis for sticky bit directories;
6012 * this makes us check the directory each time, but it's unavoidable,
6013 * as sticky bit is an exception to caching.
6014 */
6015 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
6016 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
6017 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
6018 return(EACCES);
6019 }
6020
6021 /* check the directory */
6022 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
6023 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
6024 return(error);
6025 }
6026
6027 /* not denied, must be OK */
6028 return(0);
6029 }
6030
6031
6032 /*
6033 * Authorize an operation based on the node's attributes.
6034 */
6035 static int
6036 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
6037 {
6038 struct vnode_attr *vap = vcp->vap;
6039 kauth_cred_t cred = vcp->ctx->vc_ucred;
6040 struct kauth_acl_eval eval;
6041 int error, ismember;
6042 mode_t posix_action;
6043
6044 /*
6045 * If we are the file owner, we automatically have some rights.
6046 *
6047 * Do we need to expand this to support group ownership?
6048 */
6049 if (vauth_file_owner(vcp))
6050 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
6051
6052 /*
6053 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
6054 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
6055 * change ownership to themselves, and WRITE_SECURITY is implicitly
6056 * granted to the owner. We need to do this because at this point
6057 * WRITE_SECURITY may not be granted as the caller is not currently
6058 * the owner.
6059 */
6060 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
6061 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
6062 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
6063
6064 if (acl_rights == 0) {
6065 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
6066 return(0);
6067 }
6068
6069 /* if we have an ACL, evaluate it */
6070 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
6071 eval.ae_requested = acl_rights;
6072 eval.ae_acl = &vap->va_acl->acl_ace[0];
6073 eval.ae_count = vap->va_acl->acl_entrycount;
6074 eval.ae_options = 0;
6075 if (vauth_file_owner(vcp))
6076 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
6077 /*
6078 * We use ENOENT as a marker to indicate we could not get
6079 * information in order to delay evaluation until after we
6080 * have the ACL evaluation answer. Previously, we would
6081 * always deny the operation at this point.
6082 */
6083 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT)
6084 return(error);
6085 if (error == ENOENT)
6086 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
6087 else if (ismember)
6088 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
6089 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
6090 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
6091 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
6092 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
6093
6094 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
6095 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
6096 return(error);
6097 }
6098
6099 switch(eval.ae_result) {
6100 case KAUTH_RESULT_DENY:
6101 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
6102 return(EACCES); /* deny, deny, counter-allege */
6103 case KAUTH_RESULT_ALLOW:
6104 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
6105 return(0);
6106 case KAUTH_RESULT_DEFER:
6107 default:
6108 /* Effectively the same as !delete_child_denied */
6109 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
6110 break;
6111 }
6112
6113 *found_deny = eval.ae_found_deny;
6114
6115 /* fall through and evaluate residual rights */
6116 } else {
6117 /* no ACL, everything is residual */
6118 eval.ae_residual = acl_rights;
6119 }
6120
6121 /*
6122 * Grant residual rights that have been pre-authorized.
6123 */
6124 eval.ae_residual &= ~preauth_rights;
6125
6126 /*
6127 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
6128 */
6129 if (vauth_file_owner(vcp))
6130 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
6131
6132 if (eval.ae_residual == 0) {
6133 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
6134 return(0);
6135 }
6136
6137 /*
6138 * Bail if we have residual rights that can't be granted by posix permissions,
6139 * or aren't presumed granted at this point.
6140 *
6141 * XXX these can be collapsed for performance
6142 */
6143 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
6144 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
6145 return(EACCES);
6146 }
6147 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
6148 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
6149 return(EACCES);
6150 }
6151
6152 #if DIAGNOSTIC
6153 if (eval.ae_residual & KAUTH_VNODE_DELETE)
6154 panic("vnode_authorize: can't be checking delete permission here");
6155 #endif
6156
6157 /*
6158 * Compute the fallback posix permissions that will satisfy the remaining
6159 * rights.
6160 */
6161 posix_action = 0;
6162 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
6163 KAUTH_VNODE_LIST_DIRECTORY |
6164 KAUTH_VNODE_READ_EXTATTRIBUTES))
6165 posix_action |= VREAD;
6166 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
6167 KAUTH_VNODE_ADD_FILE |
6168 KAUTH_VNODE_ADD_SUBDIRECTORY |
6169 KAUTH_VNODE_DELETE_CHILD |
6170 KAUTH_VNODE_WRITE_ATTRIBUTES |
6171 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
6172 posix_action |= VWRITE;
6173 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
6174 KAUTH_VNODE_SEARCH))
6175 posix_action |= VEXEC;
6176
6177 if (posix_action != 0) {
6178 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
6179 } else {
6180 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
6181 vcp->vp,
6182 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
6183 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
6184 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
6185 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
6186 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
6187 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
6188 (eval.ae_residual & KAUTH_VNODE_DELETE)
6189 ? " DELETE" : "",
6190 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
6191 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
6192 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
6193 ? " DELETE_CHILD" : "",
6194 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
6195 ? " READ_ATTRIBUTES" : "",
6196 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
6197 ? " WRITE_ATTRIBUTES" : "",
6198 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
6199 ? " READ_EXTATTRIBUTES" : "",
6200 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
6201 ? " WRITE_EXTATTRIBUTES" : "",
6202 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
6203 ? " READ_SECURITY" : "",
6204 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
6205 ? " WRITE_SECURITY" : "",
6206 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
6207 ? " CHECKIMMUTABLE" : "",
6208 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
6209 ? " CHANGE_OWNER" : "");
6210 }
6211
6212 /*
6213 * Lack of required Posix permissions implies no reason to deny access.
6214 */
6215 return(0);
6216 }
6217
6218 /*
6219 * Check for file immutability.
6220 */
6221 static int
6222 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
6223 {
6224 mount_t mp;
6225 int error;
6226 int append;
6227
6228 /*
6229 * Perform immutability checks for operations that change data.
6230 *
6231 * Sockets, fifos and devices require special handling.
6232 */
6233 switch(vp->v_type) {
6234 case VSOCK:
6235 case VFIFO:
6236 case VBLK:
6237 case VCHR:
6238 /*
6239 * Writing to these nodes does not change the filesystem data,
6240 * so forget that it's being tried.
6241 */
6242 rights &= ~KAUTH_VNODE_WRITE_DATA;
6243 break;
6244 default:
6245 break;
6246 }
6247
6248 error = 0;
6249 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
6250
6251 /* check per-filesystem options if possible */
6252 mp = vp->v_mount;
6253 if (mp != NULL) {
6254
6255 /* check for no-EA filesystems */
6256 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
6257 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
6258 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
6259 error = EACCES; /* User attributes disabled */
6260 goto out;
6261 }
6262 }
6263
6264 /*
6265 * check for file immutability. first, check if the requested rights are
6266 * allowable for a UF_APPEND file.
6267 */
6268 append = 0;
6269 if (vp->v_type == VDIR) {
6270 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
6271 append = 1;
6272 } else {
6273 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
6274 append = 1;
6275 }
6276 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
6277 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
6278 goto out;
6279 }
6280 }
6281 out:
6282 return(error);
6283 }
6284
6285 /*
6286 * Handle authorization actions for filesystems that advertise that the
6287 * server will be enforcing.
6288 *
6289 * Returns: 0 Authorization should be handled locally
6290 * 1 Authorization was handled by the FS
6291 *
6292 * Note: Imputed returns will only occur if the authorization request
6293 * was handled by the FS.
6294 *
6295 * Imputed: *resultp, modified Return code from FS when the request is
6296 * handled by the FS.
6297 * VNOP_ACCESS:???
6298 * VNOP_OPEN:???
6299 */
6300 static int
6301 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
6302 {
6303 int error;
6304
6305 /*
6306 * If the vp is a device node, socket or FIFO it actually represents a local
6307 * endpoint, so we need to handle it locally.
6308 */
6309 switch(vp->v_type) {
6310 case VBLK:
6311 case VCHR:
6312 case VSOCK:
6313 case VFIFO:
6314 return(0);
6315 default:
6316 break;
6317 }
6318
6319 /*
6320 * In the advisory request case, if the filesystem doesn't think it's reliable
6321 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
6322 */
6323 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
6324 return(0);
6325
6326 /*
6327 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
6328 * VNOP_ACCESS, as most will authorise inline with the actual request.
6329 */
6330 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
6331 *resultp = error;
6332 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
6333 return(1);
6334 }
6335
6336 /*
6337 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
6338 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
6339 */
6340 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
6341 /* try a VNOP_OPEN for readonly access */
6342 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
6343 *resultp = error;
6344 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
6345 return(1);
6346 }
6347 VNOP_CLOSE(vp, FREAD, ctx);
6348 }
6349
6350 /*
6351 * We don't have any reason to believe that the request has to be denied at this point,
6352 * so go ahead and allow it.
6353 */
6354 *resultp = 0;
6355 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
6356 return(1);
6357 }
6358
6359
6360
6361
6362 /*
6363 * Returns: KAUTH_RESULT_ALLOW
6364 * KAUTH_RESULT_DENY
6365 *
6366 * Imputed: *arg3, modified Error code in the deny case
6367 * EROFS Read-only file system
6368 * EACCES Permission denied
6369 * EPERM Operation not permitted [no execute]
6370 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
6371 * vnode_getattr:???
6372 * vnode_authorize_opaque:*arg2 ???
6373 * vnode_authorize_checkimmutable:???
6374 * vnode_authorize_delete:???
6375 * vnode_authorize_simple:???
6376 */
6377
6378
6379 static int
6380 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
6381 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
6382 {
6383 vfs_context_t ctx;
6384 vnode_t cvp = NULLVP;
6385 vnode_t vp, dvp;
6386 int result = KAUTH_RESULT_DENY;
6387 int parent_iocount = 0;
6388 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
6389
6390 ctx = (vfs_context_t)arg0;
6391 vp = (vnode_t)arg1;
6392 dvp = (vnode_t)arg2;
6393
6394 /*
6395 * if there are 2 vnodes passed in, we don't know at
6396 * this point which rights to look at based on the
6397 * combined action being passed in... defer until later...
6398 * otherwise check the kauth 'rights' cache hung
6399 * off of the vnode we're interested in... if we've already
6400 * been granted the right we're currently interested in,
6401 * we can just return success... otherwise we'll go through
6402 * the process of authorizing the requested right(s)... if that
6403 * succeeds, we'll add the right(s) to the cache.
6404 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
6405 */
6406 if (dvp && vp)
6407 goto defer;
6408 if (dvp) {
6409 cvp = dvp;
6410 } else {
6411 /*
6412 * For named streams on local-authorization volumes, rights are cached on the parent;
6413 * authorization is determined by looking at the parent's properties anyway, so storing
6414 * on the parent means that we don't recompute for the named stream and that if
6415 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
6416 * stream to flush its cache separately. If we miss in the cache, then we authorize
6417 * as if there were no cached rights (passing the named stream vnode and desired rights to
6418 * vnode_authorize_callback_int()).
6419 *
6420 * On an opaquely authorized volume, we don't know the relationship between the
6421 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
6422 * on such a volume are authorized directly (rather than using the parent) and have their
6423 * own caches. When a named stream vnode is created, we mark the parent as having a named
6424 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
6425 * find the stream and flush its cache.
6426 */
6427 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
6428 cvp = vnode_getparent(vp);
6429 if (cvp != NULLVP) {
6430 parent_iocount = 1;
6431 } else {
6432 cvp = NULL;
6433 goto defer; /* If we can't use the parent, take the slow path */
6434 }
6435
6436 /* Have to translate some actions */
6437 parent_action = action;
6438 if (parent_action & KAUTH_VNODE_READ_DATA) {
6439 parent_action &= ~KAUTH_VNODE_READ_DATA;
6440 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
6441 }
6442 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
6443 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
6444 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
6445 }
6446
6447 } else {
6448 cvp = vp;
6449 }
6450 }
6451
6452 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
6453 result = KAUTH_RESULT_ALLOW;
6454 goto out;
6455 }
6456 defer:
6457 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
6458
6459 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
6460 KAUTH_DEBUG("%p - caching action = %x", cvp, action);
6461 vnode_cache_authorized_action(cvp, ctx, action);
6462 }
6463
6464 out:
6465 if (parent_iocount) {
6466 vnode_put(cvp);
6467 }
6468
6469 return result;
6470 }
6471
6472
6473 static int
6474 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
6475 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
6476 {
6477 struct _vnode_authorize_context auth_context;
6478 vauth_ctx vcp;
6479 vfs_context_t ctx;
6480 vnode_t vp, dvp;
6481 kauth_cred_t cred;
6482 kauth_ace_rights_t rights;
6483 struct vnode_attr va, dva;
6484 int result;
6485 int *errorp;
6486 int noimmutable;
6487 boolean_t parent_authorized_for_delete_child = FALSE;
6488 boolean_t found_deny = FALSE;
6489 boolean_t parent_ref= FALSE;
6490
6491 vcp = &auth_context;
6492 ctx = vcp->ctx = (vfs_context_t)arg0;
6493 vp = vcp->vp = (vnode_t)arg1;
6494 dvp = vcp->dvp = (vnode_t)arg2;
6495 errorp = (int *)arg3;
6496 /*
6497 * Note that we authorize against the context, not the passed cred
6498 * (the same thing anyway)
6499 */
6500 cred = ctx->vc_ucred;
6501
6502 VATTR_INIT(&va);
6503 vcp->vap = &va;
6504 VATTR_INIT(&dva);
6505 vcp->dvap = &dva;
6506
6507 vcp->flags = vcp->flags_valid = 0;
6508
6509 #if DIAGNOSTIC
6510 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
6511 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
6512 #endif
6513
6514 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
6515 vp, vfs_context_proc(ctx)->p_comm,
6516 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
6517 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
6518 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
6519 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
6520 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
6521 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
6522 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
6523 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
6524 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
6525 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
6526 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
6527 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
6528 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
6529 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
6530 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
6531 vnode_isdir(vp) ? "directory" : "file",
6532 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
6533
6534 /*
6535 * Extract the control bits from the action, everything else is
6536 * requested rights.
6537 */
6538 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
6539 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
6540
6541 if (rights & KAUTH_VNODE_DELETE) {
6542 #if DIAGNOSTIC
6543 if (dvp == NULL)
6544 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
6545 #endif
6546 /*
6547 * check to see if we've already authorized the parent
6548 * directory for deletion of its children... if so, we
6549 * can skip a whole bunch of work... we will still have to
6550 * authorize that this specific child can be removed
6551 */
6552 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
6553 parent_authorized_for_delete_child = TRUE;
6554 } else {
6555 dvp = NULL;
6556 }
6557
6558 /*
6559 * Check for read-only filesystems.
6560 */
6561 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
6562 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
6563 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
6564 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
6565 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
6566 result = EROFS;
6567 goto out;
6568 }
6569
6570 /*
6571 * Check for noexec filesystems.
6572 */
6573 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
6574 result = EACCES;
6575 goto out;
6576 }
6577
6578 /*
6579 * Handle cases related to filesystems with non-local enforcement.
6580 * This call can return 0, in which case we will fall through to perform a
6581 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
6582 * an appropriate result, at which point we can return immediately.
6583 */
6584 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
6585 goto out;
6586
6587 /*
6588 * Get vnode attributes and extended security information for the vnode
6589 * and directory if required.
6590 */
6591 VATTR_WANTED(&va, va_mode);
6592 VATTR_WANTED(&va, va_uid);
6593 VATTR_WANTED(&va, va_gid);
6594 VATTR_WANTED(&va, va_flags);
6595 VATTR_WANTED(&va, va_acl);
6596 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
6597 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
6598 goto out;
6599 }
6600 if (dvp && parent_authorized_for_delete_child == FALSE) {
6601 VATTR_WANTED(&dva, va_mode);
6602 VATTR_WANTED(&dva, va_uid);
6603 VATTR_WANTED(&dva, va_gid);
6604 VATTR_WANTED(&dva, va_flags);
6605 VATTR_WANTED(&dva, va_acl);
6606 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
6607 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
6608 goto out;
6609 }
6610 }
6611
6612 /*
6613 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
6614 * *_EXTATTRIBUTES.
6615 */
6616 if (vnode_isnamedstream(vp)) {
6617 if (rights & KAUTH_VNODE_READ_DATA) {
6618 rights &= ~KAUTH_VNODE_READ_DATA;
6619 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
6620 }
6621 if (rights & KAUTH_VNODE_WRITE_DATA) {
6622 rights &= ~KAUTH_VNODE_WRITE_DATA;
6623 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
6624 }
6625 }
6626
6627 /*
6628 * Point 'vp' to the resource fork's parent for ACL checking
6629 */
6630 if (vnode_isnamedstream(vp) &&
6631 (vp->v_parent != NULL) &&
6632 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
6633 parent_ref = TRUE;
6634 vcp->vp = vp = vp->v_parent;
6635 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6636 kauth_acl_free(va.va_acl);
6637 VATTR_INIT(&va);
6638 VATTR_WANTED(&va, va_mode);
6639 VATTR_WANTED(&va, va_uid);
6640 VATTR_WANTED(&va, va_gid);
6641 VATTR_WANTED(&va, va_flags);
6642 VATTR_WANTED(&va, va_acl);
6643 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
6644 goto out;
6645 }
6646
6647 /*
6648 * Check for immutability.
6649 *
6650 * In the deletion case, parent directory immutability vetoes specific
6651 * file rights.
6652 */
6653 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
6654 goto out;
6655 if ((rights & KAUTH_VNODE_DELETE) &&
6656 parent_authorized_for_delete_child == FALSE &&
6657 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
6658 goto out;
6659
6660 /*
6661 * Clear rights that have been authorized by reaching this point, bail if nothing left to
6662 * check.
6663 */
6664 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
6665 if (rights == 0)
6666 goto out;
6667
6668 /*
6669 * If we're not the superuser, authorize based on file properties;
6670 * note that even if parent_authorized_for_delete_child is TRUE, we
6671 * need to check on the node itself.
6672 */
6673 if (!vfs_context_issuser(ctx)) {
6674 /* process delete rights */
6675 if ((rights & KAUTH_VNODE_DELETE) &&
6676 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
6677 goto out;
6678
6679 /* process remaining rights */
6680 if ((rights & ~KAUTH_VNODE_DELETE) &&
6681 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
6682 goto out;
6683 } else {
6684
6685 /*
6686 * Execute is only granted to root if one of the x bits is set. This check only
6687 * makes sense if the posix mode bits are actually supported.
6688 */
6689 if ((rights & KAUTH_VNODE_EXECUTE) &&
6690 (vp->v_type == VREG) &&
6691 VATTR_IS_SUPPORTED(&va, va_mode) &&
6692 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
6693 result = EPERM;
6694 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
6695 goto out;
6696 }
6697
6698 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
6699 }
6700 out:
6701 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6702 kauth_acl_free(va.va_acl);
6703 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
6704 kauth_acl_free(dva.va_acl);
6705
6706 if (result) {
6707 if (parent_ref)
6708 vnode_put(vp);
6709 *errorp = result;
6710 KAUTH_DEBUG("%p DENIED - auth denied", vp);
6711 return(KAUTH_RESULT_DENY);
6712 }
6713 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
6714 /*
6715 * if we were successfully granted the right to search this directory
6716 * and there were NO ACL DENYs for search and the posix permissions also don't
6717 * deny execute, we can synthesize a global right that allows anyone to
6718 * traverse this directory during a pathname lookup without having to
6719 * match the credential associated with this cache of rights.
6720 */
6721 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
6722 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
6723 (S_IXUSR | S_IXGRP | S_IXOTH))) {
6724 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
6725 }
6726 }
6727 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
6728 /*
6729 * parent was successfully and newly authorized for content deletions
6730 * add it to the cache, but only if it doesn't have the sticky
6731 * bit set on it. This same check is done earlier guarding
6732 * fetching of dva, and if we jumped to out without having done
6733 * this, we will have returned already because of a non-zero
6734 * 'result' value.
6735 */
6736 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
6737 !(dva.va_mode & (S_ISVTX))) {
6738 /* OK to cache delete rights */
6739 KAUTH_DEBUG("%p - caching DELETE_CHILD rights", dvp);
6740 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
6741 }
6742 }
6743 if (parent_ref)
6744 vnode_put(vp);
6745 /*
6746 * Note that this implies that we will allow requests for no rights, as well as
6747 * for rights that we do not recognise. There should be none of these.
6748 */
6749 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
6750 return(KAUTH_RESULT_ALLOW);
6751 }
6752
6753 int
6754 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
6755 {
6756 return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
6757 }
6758
6759 /*
6760 * Check that the attribute information in vattr can be legally applied to
6761 * a new file by the context.
6762 */
6763 static int
6764 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
6765 {
6766 int error;
6767 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6768 kauth_cred_t cred;
6769 guid_t changer;
6770 mount_t dmp;
6771
6772 error = 0;
6773
6774 if (defaulted_fieldsp) {
6775 *defaulted_fieldsp = 0;
6776 }
6777
6778 defaulted_owner = defaulted_group = defaulted_mode = 0;
6779
6780 /*
6781 * Require that the filesystem support extended security to apply any.
6782 */
6783 if (!vfs_extendedsecurity(dvp->v_mount) &&
6784 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6785 error = EINVAL;
6786 goto out;
6787 }
6788
6789 /*
6790 * Default some fields.
6791 */
6792 dmp = dvp->v_mount;
6793
6794 /*
6795 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6796 * owner takes ownership of all new files.
6797 */
6798 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6799 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6800 defaulted_owner = 1;
6801 } else {
6802 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6803 /* default owner is current user */
6804 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6805 defaulted_owner = 1;
6806 }
6807 }
6808
6809 /*
6810 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6811 * group takes ownership of all new files.
6812 */
6813 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6814 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6815 defaulted_group = 1;
6816 } else {
6817 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6818 /* default group comes from parent object, fallback to current user */
6819 struct vnode_attr dva;
6820 VATTR_INIT(&dva);
6821 VATTR_WANTED(&dva, va_gid);
6822 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6823 goto out;
6824 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6825 VATTR_SET(vap, va_gid, dva.va_gid);
6826 } else {
6827 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6828 }
6829 defaulted_group = 1;
6830 }
6831 }
6832
6833 if (!VATTR_IS_ACTIVE(vap, va_flags))
6834 VATTR_SET(vap, va_flags, 0);
6835
6836 /* default mode is everything, masked with current umask */
6837 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6838 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6839 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6840 defaulted_mode = 1;
6841 }
6842 /* set timestamps to now */
6843 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6844 nanotime(&vap->va_create_time);
6845 VATTR_SET_ACTIVE(vap, va_create_time);
6846 }
6847
6848 /*
6849 * Check for attempts to set nonsensical fields.
6850 */
6851 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6852 error = EINVAL;
6853 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6854 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6855 goto out;
6856 }
6857
6858 /*
6859 * Quickly check for the applicability of any enforcement here.
6860 * Tests below maintain the integrity of the local security model.
6861 */
6862 if (vfs_authopaque(dvp->v_mount))
6863 goto out;
6864
6865 /*
6866 * We need to know if the caller is the superuser, or if the work is
6867 * otherwise already authorised.
6868 */
6869 cred = vfs_context_ucred(ctx);
6870 if (noauth) {
6871 /* doing work for the kernel */
6872 has_priv_suser = 1;
6873 } else {
6874 has_priv_suser = vfs_context_issuser(ctx);
6875 }
6876
6877
6878 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6879 if (has_priv_suser) {
6880 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6881 error = EPERM;
6882 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6883 goto out;
6884 }
6885 } else {
6886 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6887 error = EPERM;
6888 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6889 goto out;
6890 }
6891 }
6892 }
6893
6894 /* if not superuser, validate legality of new-item attributes */
6895 if (!has_priv_suser) {
6896 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6897 /* setgid? */
6898 if (vap->va_mode & S_ISGID) {
6899 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6900 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6901 goto out;
6902 }
6903 if (!ismember) {
6904 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6905 error = EPERM;
6906 goto out;
6907 }
6908 }
6909
6910 /* setuid? */
6911 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6912 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6913 error = EPERM;
6914 goto out;
6915 }
6916 }
6917 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6918 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6919 error = EPERM;
6920 goto out;
6921 }
6922 if (!defaulted_group) {
6923 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6924 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6925 goto out;
6926 }
6927 if (!ismember) {
6928 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6929 error = EPERM;
6930 goto out;
6931 }
6932 }
6933
6934 /* initialising owner/group UUID */
6935 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6936 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6937 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6938 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6939 goto out;
6940 }
6941 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6942 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6943 error = EPERM;
6944 goto out;
6945 }
6946 }
6947 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6948 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6949 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6950 goto out;
6951 }
6952 if (!ismember) {
6953 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6954 error = EPERM;
6955 goto out;
6956 }
6957 }
6958 }
6959 out:
6960 if (defaulted_fieldsp) {
6961 if (defaulted_mode) {
6962 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
6963 }
6964 if (defaulted_group) {
6965 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
6966 }
6967 if (defaulted_owner) {
6968 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
6969 }
6970 }
6971 return(error);
6972 }
6973
6974 /*
6975 * Check that the attribute information in vap can be legally written by the
6976 * context.
6977 *
6978 * Call this when you're not sure about the vnode_attr; either its contents
6979 * have come from an unknown source, or when they are variable.
6980 *
6981 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6982 * must be authorized to be permitted to write the vattr.
6983 */
6984 int
6985 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6986 {
6987 struct vnode_attr ova;
6988 kauth_action_t required_action;
6989 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6990 guid_t changer;
6991 gid_t group;
6992 uid_t owner;
6993 mode_t newmode;
6994 kauth_cred_t cred;
6995 uint32_t fdelta;
6996
6997 VATTR_INIT(&ova);
6998 required_action = 0;
6999 error = 0;
7000
7001 /*
7002 * Quickly check for enforcement applicability.
7003 */
7004 if (vfs_authopaque(vp->v_mount))
7005 goto out;
7006
7007 /*
7008 * Check for attempts to set nonsensical fields.
7009 */
7010 if (vap->va_active & VNODE_ATTR_RDONLY) {
7011 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
7012 error = EINVAL;
7013 goto out;
7014 }
7015
7016 /*
7017 * We need to know if the caller is the superuser.
7018 */
7019 cred = vfs_context_ucred(ctx);
7020 has_priv_suser = kauth_cred_issuser(cred);
7021
7022 /*
7023 * If any of the following are changing, we need information from the old file:
7024 * va_uid
7025 * va_gid
7026 * va_mode
7027 * va_uuuid
7028 * va_guuid
7029 */
7030 if (VATTR_IS_ACTIVE(vap, va_uid) ||
7031 VATTR_IS_ACTIVE(vap, va_gid) ||
7032 VATTR_IS_ACTIVE(vap, va_mode) ||
7033 VATTR_IS_ACTIVE(vap, va_uuuid) ||
7034 VATTR_IS_ACTIVE(vap, va_guuid)) {
7035 VATTR_WANTED(&ova, va_mode);
7036 VATTR_WANTED(&ova, va_uid);
7037 VATTR_WANTED(&ova, va_gid);
7038 VATTR_WANTED(&ova, va_uuuid);
7039 VATTR_WANTED(&ova, va_guuid);
7040 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
7041 }
7042
7043 /*
7044 * If timestamps are being changed, we need to know who the file is owned
7045 * by.
7046 */
7047 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
7048 VATTR_IS_ACTIVE(vap, va_change_time) ||
7049 VATTR_IS_ACTIVE(vap, va_modify_time) ||
7050 VATTR_IS_ACTIVE(vap, va_access_time) ||
7051 VATTR_IS_ACTIVE(vap, va_backup_time)) {
7052
7053 VATTR_WANTED(&ova, va_uid);
7054 #if 0 /* enable this when we support UUIDs as official owners */
7055 VATTR_WANTED(&ova, va_uuuid);
7056 #endif
7057 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
7058 }
7059
7060 /*
7061 * If flags are being changed, we need the old flags.
7062 */
7063 if (VATTR_IS_ACTIVE(vap, va_flags)) {
7064 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
7065 VATTR_WANTED(&ova, va_flags);
7066 }
7067
7068 /*
7069 * If ACLs are being changed, we need the old ACLs.
7070 */
7071 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7072 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
7073 VATTR_WANTED(&ova, va_acl);
7074 }
7075
7076 /*
7077 * If the size is being set, make sure it's not a directory.
7078 */
7079 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
7080 /* size is meaningless on a directory, don't permit this */
7081 if (vnode_isdir(vp)) {
7082 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
7083 error = EISDIR;
7084 goto out;
7085 }
7086 }
7087
7088 /*
7089 * Get old data.
7090 */
7091 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
7092 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
7093 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
7094 goto out;
7095 }
7096
7097 /*
7098 * Size changes require write access to the file data.
7099 */
7100 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
7101 /* if we can't get the size, or it's different, we need write access */
7102 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
7103 required_action |= KAUTH_VNODE_WRITE_DATA;
7104 }
7105
7106 /*
7107 * Changing timestamps?
7108 *
7109 * Note that we are only called to authorize user-requested time changes;
7110 * side-effect time changes are not authorized. Authorisation is only
7111 * required for existing files.
7112 *
7113 * Non-owners are not permitted to change the time on an existing
7114 * file to anything other than the current time.
7115 */
7116 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
7117 VATTR_IS_ACTIVE(vap, va_change_time) ||
7118 VATTR_IS_ACTIVE(vap, va_modify_time) ||
7119 VATTR_IS_ACTIVE(vap, va_access_time) ||
7120 VATTR_IS_ACTIVE(vap, va_backup_time)) {
7121 /*
7122 * The owner and root may set any timestamps they like,
7123 * provided that the file is not immutable. The owner still needs
7124 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
7125 */
7126 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
7127 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
7128 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
7129 } else {
7130 /* just setting the current time? */
7131 if (vap->va_vaflags & VA_UTIMES_NULL) {
7132 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
7133 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
7134 } else {
7135 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
7136 error = EACCES;
7137 goto out;
7138 }
7139 }
7140 }
7141
7142 /*
7143 * Changing file mode?
7144 */
7145 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
7146 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
7147
7148 /*
7149 * Mode changes always have the same basic auth requirements.
7150 */
7151 if (has_priv_suser) {
7152 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
7153 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
7154 } else {
7155 /* need WRITE_SECURITY */
7156 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
7157 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7158 }
7159
7160 /*
7161 * Can't set the setgid bit if you're not in the group and not root. Have to have
7162 * existing group information in the case we're not setting it right now.
7163 */
7164 if (vap->va_mode & S_ISGID) {
7165 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
7166 if (!has_priv_suser) {
7167 if (VATTR_IS_ACTIVE(vap, va_gid)) {
7168 group = vap->va_gid;
7169 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
7170 group = ova.va_gid;
7171 } else {
7172 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
7173 error = EINVAL;
7174 goto out;
7175 }
7176 /*
7177 * This might be too restrictive; WRITE_SECURITY might be implied by
7178 * membership in this case, rather than being an additional requirement.
7179 */
7180 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
7181 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
7182 goto out;
7183 }
7184 if (!ismember) {
7185 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
7186 error = EPERM;
7187 goto out;
7188 }
7189 }
7190 }
7191
7192 /*
7193 * Can't set the setuid bit unless you're root or the file's owner.
7194 */
7195 if (vap->va_mode & S_ISUID) {
7196 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
7197 if (!has_priv_suser) {
7198 if (VATTR_IS_ACTIVE(vap, va_uid)) {
7199 owner = vap->va_uid;
7200 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
7201 owner = ova.va_uid;
7202 } else {
7203 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
7204 error = EINVAL;
7205 goto out;
7206 }
7207 if (owner != kauth_cred_getuid(cred)) {
7208 /*
7209 * We could allow this if WRITE_SECURITY is permitted, perhaps.
7210 */
7211 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
7212 error = EPERM;
7213 goto out;
7214 }
7215 }
7216 }
7217 }
7218
7219 /*
7220 * Validate/mask flags changes. This checks that only the flags in
7221 * the UF_SETTABLE mask are being set, and preserves the flags in
7222 * the SF_SETTABLE case.
7223 *
7224 * Since flags changes may be made in conjunction with other changes,
7225 * we will ask the auth code to ignore immutability in the case that
7226 * the SF_* flags are not set and we are only manipulating the file flags.
7227 *
7228 */
7229 if (VATTR_IS_ACTIVE(vap, va_flags)) {
7230 /* compute changing flags bits */
7231 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
7232 fdelta = vap->va_flags ^ ova.va_flags;
7233 } else {
7234 fdelta = vap->va_flags;
7235 }
7236
7237 if (fdelta != 0) {
7238 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
7239 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7240
7241 /* check that changing bits are legal */
7242 if (has_priv_suser) {
7243 /*
7244 * The immutability check will prevent us from clearing the SF_*
7245 * flags unless the system securelevel permits it, so just check
7246 * for legal flags here.
7247 */
7248 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
7249 error = EPERM;
7250 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
7251 goto out;
7252 }
7253 } else {
7254 if (fdelta & ~UF_SETTABLE) {
7255 error = EPERM;
7256 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
7257 goto out;
7258 }
7259 }
7260 /*
7261 * If the caller has the ability to manipulate file flags,
7262 * security is not reduced by ignoring them for this operation.
7263 *
7264 * A more complete test here would consider the 'after' states of the flags
7265 * to determine whether it would permit the operation, but this becomes
7266 * very complex.
7267 *
7268 * Ignoring immutability is conditional on securelevel; this does not bypass
7269 * the SF_* flags if securelevel > 0.
7270 */
7271 required_action |= KAUTH_VNODE_NOIMMUTABLE;
7272 }
7273 }
7274
7275 /*
7276 * Validate ownership information.
7277 */
7278 chowner = 0;
7279 chgroup = 0;
7280 clear_suid = 0;
7281 clear_sgid = 0;
7282
7283 /*
7284 * uid changing
7285 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
7286 * support them in general, and will ignore it if/when we try to set it.
7287 * We might want to clear the uid out of vap completely here.
7288 */
7289 if (VATTR_IS_ACTIVE(vap, va_uid)) {
7290 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
7291 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
7292 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
7293 error = EPERM;
7294 goto out;
7295 }
7296 chowner = 1;
7297 }
7298 clear_suid = 1;
7299 }
7300
7301 /*
7302 * gid changing
7303 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
7304 * support them in general, and will ignore it if/when we try to set it.
7305 * We might want to clear the gid out of vap completely here.
7306 */
7307 if (VATTR_IS_ACTIVE(vap, va_gid)) {
7308 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
7309 if (!has_priv_suser) {
7310 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
7311 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
7312 goto out;
7313 }
7314 if (!ismember) {
7315 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
7316 ova.va_gid, vap->va_gid);
7317 error = EPERM;
7318 goto out;
7319 }
7320 }
7321 chgroup = 1;
7322 }
7323 clear_sgid = 1;
7324 }
7325
7326 /*
7327 * Owner UUID being set or changed.
7328 */
7329 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
7330 /* if the owner UUID is not actually changing ... */
7331 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
7332 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
7333 goto no_uuuid_change;
7334
7335 /*
7336 * If the current owner UUID is a null GUID, check
7337 * it against the UUID corresponding to the owner UID.
7338 */
7339 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
7340 VATTR_IS_SUPPORTED(&ova, va_uid)) {
7341 guid_t uid_guid;
7342
7343 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
7344 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
7345 goto no_uuuid_change;
7346 }
7347 }
7348
7349 /*
7350 * The owner UUID cannot be set by a non-superuser to anything other than
7351 * their own or a null GUID (to "unset" the owner UUID).
7352 * Note that file systems must be prepared to handle the
7353 * null UUID case in a manner appropriate for that file
7354 * system.
7355 */
7356 if (!has_priv_suser) {
7357 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
7358 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
7359 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
7360 goto out;
7361 }
7362 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
7363 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
7364 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
7365 error = EPERM;
7366 goto out;
7367 }
7368 }
7369 chowner = 1;
7370 clear_suid = 1;
7371 }
7372 no_uuuid_change:
7373 /*
7374 * Group UUID being set or changed.
7375 */
7376 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
7377 /* if the group UUID is not actually changing ... */
7378 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
7379 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
7380 goto no_guuid_change;
7381
7382 /*
7383 * If the current group UUID is a null UUID, check
7384 * it against the UUID corresponding to the group GID.
7385 */
7386 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
7387 VATTR_IS_SUPPORTED(&ova, va_gid)) {
7388 guid_t gid_guid;
7389
7390 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
7391 kauth_guid_equal(&vap->va_guuid, &gid_guid))
7392 goto no_guuid_change;
7393 }
7394 }
7395
7396 /*
7397 * The group UUID cannot be set by a non-superuser to anything other than
7398 * one of which they are a member or a null GUID (to "unset"
7399 * the group UUID).
7400 * Note that file systems must be prepared to handle the
7401 * null UUID case in a manner appropriate for that file
7402 * system.
7403 */
7404 if (!has_priv_suser) {
7405 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
7406 ismember = 1;
7407 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
7408 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
7409 goto out;
7410 }
7411 if (!ismember) {
7412 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
7413 error = EPERM;
7414 goto out;
7415 }
7416 }
7417 chgroup = 1;
7418 }
7419 no_guuid_change:
7420
7421 /*
7422 * Compute authorisation for group/ownership changes.
7423 */
7424 if (chowner || chgroup || clear_suid || clear_sgid) {
7425 if (has_priv_suser) {
7426 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
7427 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
7428 } else {
7429 if (chowner) {
7430 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
7431 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
7432 }
7433 if (chgroup && !chowner) {
7434 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
7435 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7436 }
7437
7438 /* clear set-uid and set-gid bits as required by Posix */
7439 if (VATTR_IS_ACTIVE(vap, va_mode)) {
7440 newmode = vap->va_mode;
7441 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
7442 newmode = ova.va_mode;
7443 } else {
7444 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
7445 newmode = 0;
7446 }
7447 if (newmode & (S_ISUID | S_ISGID)) {
7448 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
7449 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
7450 }
7451 }
7452 }
7453
7454 /*
7455 * Authorise changes in the ACL.
7456 */
7457 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7458
7459 /* no existing ACL */
7460 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
7461
7462 /* adding an ACL */
7463 if (vap->va_acl != NULL) {
7464 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7465 KAUTH_DEBUG("CHMOD - adding ACL");
7466 }
7467
7468 /* removing an existing ACL */
7469 } else if (vap->va_acl == NULL) {
7470 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7471 KAUTH_DEBUG("CHMOD - removing ACL");
7472
7473 /* updating an existing ACL */
7474 } else {
7475 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
7476 /* entry count changed, must be different */
7477 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7478 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
7479 } else if (vap->va_acl->acl_entrycount > 0) {
7480 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
7481 if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
7482 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
7483 required_action |= KAUTH_VNODE_WRITE_SECURITY;
7484 KAUTH_DEBUG("CHMOD - changing ACL entries");
7485 }
7486 }
7487 }
7488 }
7489
7490 /*
7491 * Other attributes that require authorisation.
7492 */
7493 if (VATTR_IS_ACTIVE(vap, va_encoding))
7494 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
7495
7496 out:
7497 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
7498 kauth_acl_free(ova.va_acl);
7499 if (error == 0)
7500 *actionp = required_action;
7501 return(error);
7502 }
7503
7504 static int
7505 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
7506 {
7507 vnode_lock_spin(vp);
7508 vp->v_flag |= VLOCKLOCAL;
7509 vnode_unlock(vp);
7510
7511 return (VNODE_RETURNED);
7512 }
7513
7514 void
7515 vfs_setlocklocal(mount_t mp)
7516 {
7517 mount_lock_spin(mp);
7518 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
7519 mount_unlock(mp);
7520
7521 /*
7522 * The number of active vnodes is expected to be
7523 * very small when vfs_setlocklocal is invoked.
7524 */
7525 vnode_iterate(mp, 0, setlocklocal_callback, NULL);
7526 }
7527
7528 void
7529 vfs_setunmountpreflight(mount_t mp)
7530 {
7531 mount_lock_spin(mp);
7532 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
7533 mount_unlock(mp);
7534 }
7535
7536 void
7537 vfs_setcompoundopen(mount_t mp)
7538 {
7539 mount_lock_spin(mp);
7540 mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
7541 mount_unlock(mp);
7542 }
7543
7544 void
7545 vn_setunionwait(vnode_t vp)
7546 {
7547 vnode_lock_spin(vp);
7548 vp->v_flag |= VISUNION;
7549 vnode_unlock(vp);
7550 }
7551
7552
7553 void
7554 vn_checkunionwait(vnode_t vp)
7555 {
7556 vnode_lock_spin(vp);
7557 while ((vp->v_flag & VISUNION) == VISUNION)
7558 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
7559 vnode_unlock(vp);
7560 }
7561
7562 void
7563 vn_clearunionwait(vnode_t vp, int locked)
7564 {
7565 if (!locked)
7566 vnode_lock_spin(vp);
7567 if((vp->v_flag & VISUNION) == VISUNION) {
7568 vp->v_flag &= ~VISUNION;
7569 wakeup((caddr_t)&vp->v_flag);
7570 }
7571 if (!locked)
7572 vnode_unlock(vp);
7573 }
7574
7575 /*
7576 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
7577 */
7578 extern int thread_notrigger(void);
7579
7580 int
7581 thread_notrigger(void)
7582 {
7583 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
7584 return (uth->uu_notrigger);
7585 }
7586
7587 /*
7588 * Removes orphaned apple double files during a rmdir
7589 * Works by:
7590 * 1. vnode_suspend().
7591 * 2. Call VNOP_READDIR() till the end of directory is reached.
7592 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
7593 * 4. Continue (2) and (3) till end of directory is reached.
7594 * 5. If all the entries in the directory were files with "._" name, delete all the files.
7595 * 6. vnode_resume()
7596 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
7597 */
7598
7599 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
7600 {
7601
7602 #define UIO_BUFF_SIZE 2048
7603 uio_t auio = NULL;
7604 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
7605 int open_flag = 0, full_erase_flag = 0;
7606 char uio_buf[ UIO_SIZEOF(1) ];
7607 char *rbuf = NULL, *cpos, *cend;
7608 struct nameidata nd_temp;
7609 struct dirent *dp;
7610 errno_t error;
7611
7612 error = vnode_suspend(vp);
7613
7614 /*
7615 * restart_flag is set so that the calling rmdir sleeps and resets
7616 */
7617 if (error == EBUSY)
7618 *restart_flag = 1;
7619 if (error != 0)
7620 goto outsc;
7621
7622 /*
7623 * set up UIO
7624 */
7625 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
7626 if (rbuf)
7627 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
7628 &uio_buf[0], sizeof(uio_buf));
7629 if (!rbuf || !auio) {
7630 error = ENOMEM;
7631 goto outsc;
7632 }
7633
7634 uio_setoffset(auio,0);
7635
7636 eofflag = 0;
7637
7638 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
7639 goto outsc;
7640 else
7641 open_flag = 1;
7642
7643 /*
7644 * First pass checks if all files are appleDouble files.
7645 */
7646
7647 do {
7648 siz = UIO_BUFF_SIZE;
7649 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7650 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7651
7652 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
7653 goto outsc;
7654
7655 if (uio_resid(auio) != 0)
7656 siz -= uio_resid(auio);
7657
7658 /*
7659 * Iterate through directory
7660 */
7661 cpos = rbuf;
7662 cend = rbuf + siz;
7663 dp = (struct dirent*) cpos;
7664
7665 if (cpos == cend)
7666 eofflag = 1;
7667
7668 while ((cpos < cend)) {
7669 /*
7670 * Check for . and .. as well as directories
7671 */
7672 if (dp->d_ino != 0 &&
7673 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7674 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
7675 /*
7676 * Check for irregular files and ._ files
7677 * If there is a ._._ file abort the op
7678 */
7679 if ( dp->d_namlen < 2 ||
7680 strncmp(dp->d_name,"._",2) ||
7681 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
7682 error = ENOTEMPTY;
7683 goto outsc;
7684 }
7685 }
7686 cpos += dp->d_reclen;
7687 dp = (struct dirent*)cpos;
7688 }
7689
7690 /*
7691 * workaround for HFS/NFS setting eofflag before end of file
7692 */
7693 if (vp->v_tag == VT_HFS && nentries > 2)
7694 eofflag=0;
7695
7696 if (vp->v_tag == VT_NFS) {
7697 if (eofflag && !full_erase_flag) {
7698 full_erase_flag = 1;
7699 eofflag = 0;
7700 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7701 }
7702 else if (!eofflag && full_erase_flag)
7703 full_erase_flag = 0;
7704 }
7705
7706 } while (!eofflag);
7707 /*
7708 * If we've made it here all the files in the dir are ._ files.
7709 * We can delete the files even though the node is suspended
7710 * because we are the owner of the file.
7711 */
7712
7713 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7714 eofflag = 0;
7715 full_erase_flag = 0;
7716
7717 do {
7718 siz = UIO_BUFF_SIZE;
7719 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7720 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7721
7722 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
7723
7724 if (error != 0)
7725 goto outsc;
7726
7727 if (uio_resid(auio) != 0)
7728 siz -= uio_resid(auio);
7729
7730 /*
7731 * Iterate through directory
7732 */
7733 cpos = rbuf;
7734 cend = rbuf + siz;
7735 dp = (struct dirent*) cpos;
7736
7737 if (cpos == cend)
7738 eofflag = 1;
7739
7740 while ((cpos < cend)) {
7741 /*
7742 * Check for . and .. as well as directories
7743 */
7744 if (dp->d_ino != 0 &&
7745 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7746 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
7747 ) {
7748
7749 NDINIT(&nd_temp, DELETE, OP_UNLINK, USEDVP,
7750 UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name),
7751 ctx);
7752 nd_temp.ni_dvp = vp;
7753 error = unlink1(ctx, &nd_temp, 0);
7754
7755 if (error && error != ENOENT) {
7756 goto outsc;
7757 }
7758
7759 }
7760 cpos += dp->d_reclen;
7761 dp = (struct dirent*)cpos;
7762 }
7763
7764 /*
7765 * workaround for HFS/NFS setting eofflag before end of file
7766 */
7767 if (vp->v_tag == VT_HFS && nentries > 2)
7768 eofflag=0;
7769
7770 if (vp->v_tag == VT_NFS) {
7771 if (eofflag && !full_erase_flag) {
7772 full_erase_flag = 1;
7773 eofflag = 0;
7774 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7775 }
7776 else if (!eofflag && full_erase_flag)
7777 full_erase_flag = 0;
7778 }
7779
7780 } while (!eofflag);
7781
7782
7783 error = 0;
7784
7785 outsc:
7786 if (open_flag)
7787 VNOP_CLOSE(vp, FREAD, ctx);
7788
7789 uio_free(auio);
7790 FREE(rbuf, M_TEMP);
7791
7792 vnode_resume(vp);
7793
7794
7795 return(error);
7796
7797 }
7798
7799
7800 void
7801 lock_vnode_and_post(vnode_t vp, int kevent_num)
7802 {
7803 /* Only take the lock if there's something there! */
7804 if (vp->v_knotes.slh_first != NULL) {
7805 vnode_lock(vp);
7806 KNOTE(&vp->v_knotes, kevent_num);
7807 vnode_unlock(vp);
7808 }
7809 }
7810
7811 #ifdef JOE_DEBUG
7812 static void record_vp(vnode_t vp, int count) {
7813 struct uthread *ut;
7814
7815 #if CONFIG_TRIGGERS
7816 if (vp->v_resolve)
7817 return;
7818 #endif
7819 if ((vp->v_flag & VSYSTEM))
7820 return;
7821
7822 ut = get_bsdthread_info(current_thread());
7823 ut->uu_iocount += count;
7824
7825 if (count == 1) {
7826 if (ut->uu_vpindex < 32) {
7827 OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
7828
7829 ut->uu_vps[ut->uu_vpindex] = vp;
7830 ut->uu_vpindex++;
7831 }
7832 }
7833 }
7834 #endif
7835
7836
7837 #if CONFIG_TRIGGERS
7838
7839 #define TRIG_DEBUG 0
7840
7841 #if TRIG_DEBUG
7842 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
7843 #else
7844 #define TRIG_LOG(...)
7845 #endif
7846
7847 /*
7848 * Resolver result functions
7849 */
7850
7851 resolver_result_t
7852 vfs_resolver_result(uint32_t seq, enum resolver_status stat, int aux)
7853 {
7854 /*
7855 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
7856 * sequence auxiliary status
7857 */
7858 return (((uint64_t)seq) << 32) |
7859 (((uint64_t)(aux & 0x0fffffff)) << 4) |
7860 (uint64_t)(stat & 0x0000000F);
7861 }
7862
7863 enum resolver_status
7864 vfs_resolver_status(resolver_result_t result)
7865 {
7866 /* lower 4 bits is status */
7867 return (result & 0x0000000F);
7868 }
7869
7870 uint32_t
7871 vfs_resolver_sequence(resolver_result_t result)
7872 {
7873 /* upper 32 bits is sequence */
7874 return (uint32_t)(result >> 32);
7875 }
7876
7877 int
7878 vfs_resolver_auxiliary(resolver_result_t result)
7879 {
7880 /* 28 bits of auxiliary */
7881 return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
7882 }
7883
7884 /*
7885 * SPI
7886 * Call in for resolvers to update vnode trigger state
7887 */
7888 int
7889 vnode_trigger_update(vnode_t vp, resolver_result_t result)
7890 {
7891 vnode_resolve_t rp;
7892 uint32_t seq;
7893 enum resolver_status stat;
7894
7895 if (vp->v_resolve == NULL) {
7896 return (EINVAL);
7897 }
7898
7899 stat = vfs_resolver_status(result);
7900 seq = vfs_resolver_sequence(result);
7901
7902 if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
7903 return (EINVAL);
7904 }
7905
7906 rp = vp->v_resolve;
7907 lck_mtx_lock(&rp->vr_lock);
7908
7909 if (seq > rp->vr_lastseq) {
7910 if (stat == RESOLVER_RESOLVED)
7911 rp->vr_flags |= VNT_RESOLVED;
7912 else
7913 rp->vr_flags &= ~VNT_RESOLVED;
7914
7915 rp->vr_lastseq = seq;
7916 }
7917
7918 lck_mtx_unlock(&rp->vr_lock);
7919
7920 return (0);
7921 }
7922
7923 static int
7924 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
7925 {
7926 int error;
7927
7928 vnode_lock_spin(vp);
7929 if (vp->v_resolve != NULL) {
7930 vnode_unlock(vp);
7931 return EINVAL;
7932 } else {
7933 vp->v_resolve = rp;
7934 }
7935 vnode_unlock(vp);
7936
7937 if (ref) {
7938 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
7939 if (error != 0) {
7940 panic("VNODE_REF_FORCE didn't help...");
7941 }
7942 }
7943
7944 return 0;
7945 }
7946
7947 /*
7948 * VFS internal interfaces for vnode triggers
7949 *
7950 * vnode must already have an io count on entry
7951 * v_resolve is stable when io count is non-zero
7952 */
7953 static int
7954 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
7955 {
7956 vnode_resolve_t rp;
7957 int result;
7958 char byte;
7959
7960 #if 1
7961 /* minimum pointer test (debugging) */
7962 if (tinfo->vnt_data)
7963 byte = *((char *)tinfo->vnt_data);
7964 #endif
7965 MALLOC(rp, vnode_resolve_t, sizeof(*rp), M_TEMP, M_WAITOK);
7966 if (rp == NULL)
7967 return (ENOMEM);
7968
7969 lck_mtx_init(&rp->vr_lock, trigger_vnode_lck_grp, trigger_vnode_lck_attr);
7970
7971 rp->vr_resolve_func = tinfo->vnt_resolve_func;
7972 rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
7973 rp->vr_rearm_func = tinfo->vnt_rearm_func;
7974 rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
7975 rp->vr_data = tinfo->vnt_data;
7976 rp->vr_lastseq = 0;
7977 rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
7978 if (external) {
7979 rp->vr_flags |= VNT_EXTERNAL;
7980 }
7981
7982 result = vnode_resolver_attach(vp, rp, external);
7983 if (result != 0) {
7984 goto out;
7985 }
7986
7987 if (mp) {
7988 OSAddAtomic(1, &mp->mnt_numtriggers);
7989 }
7990
7991 return (result);
7992
7993 out:
7994 FREE(rp, M_TEMP);
7995 return result;
7996 }
7997
7998 static void
7999 vnode_resolver_release(vnode_resolve_t rp)
8000 {
8001 /*
8002 * Give them a chance to free any private data
8003 */
8004 if (rp->vr_data && rp->vr_reclaim_func) {
8005 rp->vr_reclaim_func(NULLVP, rp->vr_data);
8006 }
8007
8008 lck_mtx_destroy(&rp->vr_lock, trigger_vnode_lck_grp);
8009 FREE(rp, M_TEMP);
8010
8011 }
8012
8013 /* Called after the vnode has been drained */
8014 static void
8015 vnode_resolver_detach(vnode_t vp)
8016 {
8017 vnode_resolve_t rp;
8018 mount_t mp;
8019
8020 mp = vnode_mount(vp);
8021
8022 vnode_lock(vp);
8023 rp = vp->v_resolve;
8024 vp->v_resolve = NULL;
8025 vnode_unlock(vp);
8026
8027 if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
8028 vnode_rele_ext(vp, O_EVTONLY, 1);
8029 }
8030
8031 vnode_resolver_release(rp);
8032
8033 /* Keep count of active trigger vnodes per mount */
8034 OSAddAtomic(-1, &mp->mnt_numtriggers);
8035 }
8036
8037 /*
8038 * Pathname operations that don't trigger a mount for trigger vnodes
8039 */
8040 static const u_int64_t ignorable_pathops_mask =
8041 1LL << OP_MOUNT |
8042 1LL << OP_UNMOUNT |
8043 1LL << OP_STATFS |
8044 1LL << OP_ACCESS |
8045 1LL << OP_GETATTR |
8046 1LL << OP_LISTXATTR;
8047
8048 int
8049 vfs_istraditionaltrigger(enum path_operation op, const struct componentname *cnp)
8050 {
8051 if (cnp->cn_flags & ISLASTCN)
8052 return ((1LL << op) & ignorable_pathops_mask) == 0;
8053 else
8054 return (1);
8055 }
8056
8057 __private_extern__
8058 void
8059 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
8060 {
8061 vnode_resolve_t rp;
8062 resolver_result_t result;
8063 enum resolver_status status;
8064 uint32_t seq;
8065
8066 if ((vp->v_resolve == NULL) ||
8067 (vp->v_resolve->vr_rearm_func == NULL) ||
8068 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
8069 return;
8070 }
8071
8072 rp = vp->v_resolve;
8073 lck_mtx_lock(&rp->vr_lock);
8074
8075 /*
8076 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
8077 */
8078 if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
8079 lck_mtx_unlock(&rp->vr_lock);
8080 return;
8081 }
8082
8083 /* Check if this vnode is already armed */
8084 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
8085 lck_mtx_unlock(&rp->vr_lock);
8086 return;
8087 }
8088
8089 lck_mtx_unlock(&rp->vr_lock);
8090
8091 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
8092 status = vfs_resolver_status(result);
8093 seq = vfs_resolver_sequence(result);
8094
8095 lck_mtx_lock(&rp->vr_lock);
8096 if (seq > rp->vr_lastseq) {
8097 if (status == RESOLVER_UNRESOLVED)
8098 rp->vr_flags &= ~VNT_RESOLVED;
8099 rp->vr_lastseq = seq;
8100 }
8101 lck_mtx_unlock(&rp->vr_lock);
8102 }
8103
8104 __private_extern__
8105 int
8106 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
8107 {
8108 vnode_resolve_t rp;
8109 enum path_operation op;
8110 resolver_result_t result;
8111 enum resolver_status status;
8112 uint32_t seq;
8113
8114 /* Only trigger on topmost vnodes */
8115 if ((vp->v_resolve == NULL) ||
8116 (vp->v_resolve->vr_resolve_func == NULL) ||
8117 (vp->v_mountedhere != NULL)) {
8118 return (0);
8119 }
8120
8121 rp = vp->v_resolve;
8122 lck_mtx_lock(&rp->vr_lock);
8123
8124 /* Check if this vnode is already resolved */
8125 if (rp->vr_flags & VNT_RESOLVED) {
8126 lck_mtx_unlock(&rp->vr_lock);
8127 return (0);
8128 }
8129
8130 lck_mtx_unlock(&rp->vr_lock);
8131
8132 /*
8133 * XXX
8134 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
8135 * is there anyway to know this???
8136 * there can also be other legitimate lookups in parallel
8137 *
8138 * XXX - should we call this on a separate thread with a timeout?
8139 *
8140 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
8141 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
8142 */
8143 op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
8144
8145 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
8146 status = vfs_resolver_status(result);
8147 seq = vfs_resolver_sequence(result);
8148
8149 lck_mtx_lock(&rp->vr_lock);
8150 if (seq > rp->vr_lastseq) {
8151 if (status == RESOLVER_RESOLVED)
8152 rp->vr_flags |= VNT_RESOLVED;
8153 rp->vr_lastseq = seq;
8154 }
8155 lck_mtx_unlock(&rp->vr_lock);
8156
8157 /* On resolver errors, propagate the error back up */
8158 return (status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0);
8159 }
8160
8161 static int
8162 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
8163 {
8164 vnode_resolve_t rp;
8165 resolver_result_t result;
8166 enum resolver_status status;
8167 uint32_t seq;
8168
8169 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
8170 return (0);
8171 }
8172
8173 rp = vp->v_resolve;
8174 lck_mtx_lock(&rp->vr_lock);
8175
8176 /* Check if this vnode is already resolved */
8177 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
8178 printf("vnode_trigger_unresolve: not currently resolved\n");
8179 lck_mtx_unlock(&rp->vr_lock);
8180 return (0);
8181 }
8182
8183 rp->vr_flags |= VNT_VFS_UNMOUNTED;
8184
8185 lck_mtx_unlock(&rp->vr_lock);
8186
8187 /*
8188 * XXX
8189 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
8190 * there can also be other legitimate lookups in parallel
8191 *
8192 * XXX - should we call this on a separate thread with a timeout?
8193 */
8194
8195 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
8196 status = vfs_resolver_status(result);
8197 seq = vfs_resolver_sequence(result);
8198
8199 lck_mtx_lock(&rp->vr_lock);
8200 if (seq > rp->vr_lastseq) {
8201 if (status == RESOLVER_UNRESOLVED)
8202 rp->vr_flags &= ~VNT_RESOLVED;
8203 rp->vr_lastseq = seq;
8204 }
8205 rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
8206 lck_mtx_unlock(&rp->vr_lock);
8207
8208 /* On resolver errors, propagate the error back up */
8209 return (status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0);
8210 }
8211
8212 static int
8213 triggerisdescendant(mount_t mp, mount_t rmp)
8214 {
8215 int match = FALSE;
8216
8217 /*
8218 * walk up vnode covered chain looking for a match
8219 */
8220 name_cache_lock_shared();
8221
8222 while (1) {
8223 vnode_t vp;
8224
8225 /* did we encounter "/" ? */
8226 if (mp->mnt_flag & MNT_ROOTFS)
8227 break;
8228
8229 vp = mp->mnt_vnodecovered;
8230 if (vp == NULLVP)
8231 break;
8232
8233 mp = vp->v_mount;
8234 if (mp == rmp) {
8235 match = TRUE;
8236 break;
8237 }
8238 }
8239
8240 name_cache_unlock();
8241
8242 return (match);
8243 }
8244
8245 struct trigger_unmount_info {
8246 vfs_context_t ctx;
8247 mount_t top_mp;
8248 vnode_t trigger_vp;
8249 mount_t trigger_mp;
8250 uint32_t trigger_vid;
8251 int flags;
8252 };
8253
8254 static int
8255 trigger_unmount_callback(mount_t mp, void * arg)
8256 {
8257 struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
8258 boolean_t mountedtrigger = FALSE;
8259
8260 /*
8261 * When we encounter the top level mount we're done
8262 */
8263 if (mp == infop->top_mp)
8264 return (VFS_RETURNED_DONE);
8265
8266 if ((mp->mnt_vnodecovered == NULL) ||
8267 (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
8268 return (VFS_RETURNED);
8269 }
8270
8271 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
8272 (mp->mnt_vnodecovered->v_resolve != NULL) &&
8273 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
8274 mountedtrigger = TRUE;
8275 }
8276 vnode_put(mp->mnt_vnodecovered);
8277
8278 /*
8279 * When we encounter a mounted trigger, check if its under the top level mount
8280 */
8281 if ( !mountedtrigger || !triggerisdescendant(mp, infop->top_mp) )
8282 return (VFS_RETURNED);
8283
8284 /*
8285 * Process any pending nested mount (now that its not referenced)
8286 */
8287 if ((infop->trigger_vp != NULLVP) &&
8288 (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
8289 vnode_t vp = infop->trigger_vp;
8290 int error;
8291
8292 infop->trigger_vp = NULLVP;
8293
8294 if (mp == vp->v_mountedhere) {
8295 vnode_put(vp);
8296 printf("trigger_unmount_callback: unexpected match '%s'\n",
8297 mp->mnt_vfsstat.f_mntonname);
8298 return (VFS_RETURNED);
8299 }
8300 if (infop->trigger_mp != vp->v_mountedhere) {
8301 vnode_put(vp);
8302 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
8303 infop->trigger_mp, vp->v_mountedhere);
8304 goto savenext;
8305 }
8306
8307 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
8308 vnode_put(vp);
8309 if (error) {
8310 printf("unresolving: '%s', err %d\n",
8311 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
8312 "???", error);
8313 return (VFS_RETURNED_DONE); /* stop iteration on errors */
8314 }
8315 }
8316 savenext:
8317 /*
8318 * We can't call resolver here since we hold a mount iter
8319 * ref on mp so save its covered vp for later processing
8320 */
8321 infop->trigger_vp = mp->mnt_vnodecovered;
8322 if ((infop->trigger_vp != NULLVP) &&
8323 (vnode_getwithref(infop->trigger_vp) == 0)) {
8324 if (infop->trigger_vp->v_mountedhere == mp) {
8325 infop->trigger_vid = infop->trigger_vp->v_id;
8326 infop->trigger_mp = mp;
8327 }
8328 vnode_put(infop->trigger_vp);
8329 }
8330
8331 return (VFS_RETURNED);
8332 }
8333
8334 /*
8335 * Attempt to unmount any trigger mounts nested underneath a mount.
8336 * This is a best effort attempt and no retries are performed here.
8337 *
8338 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
8339 */
8340 __private_extern__
8341 void
8342 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
8343 {
8344 struct trigger_unmount_info info;
8345
8346 /* Must have trigger vnodes */
8347 if (mp->mnt_numtriggers == 0) {
8348 return;
8349 }
8350 /* Avoid recursive requests (by checking covered vnode) */
8351 if ((mp->mnt_vnodecovered != NULL) &&
8352 (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
8353 boolean_t recursive = FALSE;
8354
8355 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
8356 (mp->mnt_vnodecovered->v_resolve != NULL) &&
8357 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
8358 recursive = TRUE;
8359 }
8360 vnode_put(mp->mnt_vnodecovered);
8361 if (recursive)
8362 return;
8363 }
8364
8365 /*
8366 * Attempt to unmount any nested trigger mounts (best effort)
8367 */
8368 info.ctx = ctx;
8369 info.top_mp = mp;
8370 info.trigger_vp = NULLVP;
8371 info.trigger_vid = 0;
8372 info.trigger_mp = NULL;
8373 info.flags = flags;
8374
8375 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
8376
8377 /*
8378 * Process remaining nested mount (now that its not referenced)
8379 */
8380 if ((info.trigger_vp != NULLVP) &&
8381 (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
8382 vnode_t vp = info.trigger_vp;
8383
8384 if (info.trigger_mp == vp->v_mountedhere) {
8385 (void) vnode_trigger_unresolve(vp, flags, ctx);
8386 }
8387 vnode_put(vp);
8388 }
8389 }
8390
8391 int
8392 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
8393 {
8394 struct nameidata nd;
8395 int res;
8396 vnode_t rvp, vp;
8397 struct vnode_trigger_param vtp;
8398
8399 /*
8400 * Must be called for trigger callback, wherein rwlock is held
8401 */
8402 lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
8403
8404 TRIG_LOG("Adding trigger at %s\n", relpath);
8405 TRIG_LOG("Trying VFS_ROOT\n");
8406
8407 /*
8408 * We do a lookup starting at the root of the mountpoint, unwilling
8409 * to cross into other mountpoints.
8410 */
8411 res = VFS_ROOT(mp, &rvp, ctx);
8412 if (res != 0) {
8413 goto out;
8414 }
8415
8416 TRIG_LOG("Trying namei\n");
8417
8418 NDINIT(&nd, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
8419 CAST_USER_ADDR_T(relpath), ctx);
8420 nd.ni_dvp = rvp;
8421 res = namei(&nd);
8422 if (res != 0) {
8423 vnode_put(rvp);
8424 goto out;
8425 }
8426
8427 vp = nd.ni_vp;
8428 nameidone(&nd);
8429 vnode_put(rvp);
8430
8431 TRIG_LOG("Trying vnode_resolver_create()\n");
8432
8433 /*
8434 * Set up blob. vnode_create() takes a larger structure
8435 * with creation info, and we needed something different
8436 * for this case. One needs to win, or we need to munge both;
8437 * vnode_create() wins.
8438 */
8439 bzero(&vtp, sizeof(vtp));
8440 vtp.vnt_resolve_func = vtip->vti_resolve_func;
8441 vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
8442 vtp.vnt_rearm_func = vtip->vti_rearm_func;
8443 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
8444 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
8445 vtp.vnt_data = vtip->vti_data;
8446 vtp.vnt_flags = vtip->vti_flags;
8447
8448 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
8449 vnode_put(vp);
8450 out:
8451 TRIG_LOG("Returning %d\n", res);
8452 return res;
8453 }
8454
8455 #endif /* CONFIG_TRIGGERS */