]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <miscfs/fifofs/fifo.h>
109
110 #include <string.h>
111 #include <machine/spl.h>
112
113
114 #include <kern/assert.h>
115
116 #include <miscfs/specfs/specdev.h>
117
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120
121 #include <kern/kalloc.h> /* kalloc()/kfree() */
122 #include <kern/clock.h> /* delay_for_interval() */
123 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
124
125
126 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
127
128 #if CONFIG_MACF
129 #include <security/mac_framework.h>
130 #endif
131
132 extern lck_grp_t *vnode_lck_grp;
133 extern lck_attr_t *vnode_lck_attr;
134
135
136 extern lck_mtx_t * mnt_list_mtx_lock;
137
138 enum vtype iftovt_tab[16] = {
139 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
140 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
141 };
142 int vttoif_tab[9] = {
143 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
144 S_IFSOCK, S_IFIFO, S_IFMT,
145 };
146
147 /* XXX next protptype should be from <nfs/nfs.h> */
148 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
149
150 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
151 __private_extern__ void qsort(
152 void * array,
153 size_t nmembers,
154 size_t member_size,
155 int (*)(const void *, const void *));
156
157 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
158 __private_extern__ void vntblinit(void);
159 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
160 unsigned int val2);
161 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
162
163 extern int system_inshutdown;
164
165 static void vnode_list_add(vnode_t);
166 static void vnode_list_remove(vnode_t);
167 static void vnode_list_remove_locked(vnode_t);
168
169 static errno_t vnode_drain(vnode_t);
170 static void vgone(vnode_t, int flags);
171 static void vclean(vnode_t vp, int flag);
172 static void vnode_reclaim_internal(vnode_t, int, int, int);
173
174 static void vnode_dropiocount (vnode_t);
175 static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
176
177 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
178 static int vnode_reload(vnode_t);
179 static int vnode_isinuse_locked(vnode_t, int, int);
180
181 static void insmntque(vnode_t vp, mount_t mp);
182 static int mount_getvfscnt(void);
183 static int mount_fillfsids(fsid_t *, int );
184 static void vnode_iterate_setup(mount_t);
185 static int vnode_umount_preflight(mount_t, vnode_t, int);
186 static int vnode_iterate_prepare(mount_t);
187 static int vnode_iterate_reloadq(mount_t);
188 static void vnode_iterate_clear(mount_t);
189 static mount_t vfs_getvfs_locked(fsid_t *);
190
191 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
192
193 #ifdef JOE_DEBUG
194 static void record_vp(vnode_t vp, int count);
195 #endif
196
197 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
198 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
199
200 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
201 struct timeval rage_tv;
202 int rage_limit = 0;
203 int ragevnodes = 0;
204
205 #define RAGE_LIMIT_MIN 100
206 #define RAGE_TIME_LIMIT 5
207
208 struct mntlist mountlist; /* mounted filesystem list */
209 static int nummounts = 0;
210
211 #if DIAGNOSTIC
212 #define VLISTCHECK(fun, vp, list) \
213 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
214 panic("%s: %s vnode not on %slist", (fun), (list), (list));
215 #else
216 #define VLISTCHECK(fun, vp, list)
217 #endif /* DIAGNOSTIC */
218
219 #define VLISTNONE(vp) \
220 do { \
221 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
222 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
223 } while(0)
224
225 #define VONLIST(vp) \
226 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
227
228 /* remove a vnode from free vnode list */
229 #define VREMFREE(fun, vp) \
230 do { \
231 VLISTCHECK((fun), (vp), "free"); \
232 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
233 VLISTNONE((vp)); \
234 freevnodes--; \
235 } while(0)
236
237
238
239 /* remove a vnode from dead vnode list */
240 #define VREMDEAD(fun, vp) \
241 do { \
242 VLISTCHECK((fun), (vp), "dead"); \
243 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
244 VLISTNONE((vp)); \
245 vp->v_listflag &= ~VLIST_DEAD; \
246 deadvnodes--; \
247 } while(0)
248
249
250 /* remove a vnode from rage vnode list */
251 #define VREMRAGE(fun, vp) \
252 do { \
253 if ( !(vp->v_listflag & VLIST_RAGE)) \
254 panic("VREMRAGE: vp not on rage list"); \
255 VLISTCHECK((fun), (vp), "rage"); \
256 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
257 VLISTNONE((vp)); \
258 vp->v_listflag &= ~VLIST_RAGE; \
259 ragevnodes--; \
260 } while(0)
261
262
263 /*
264 * vnodetarget hasn't been used in a long time, but
265 * it was exported for some reason... I'm leaving in
266 * place for now... it should be deprecated out of the
267 * exports and removed eventually.
268 */
269 u_int32_t vnodetarget; /* target for vnreclaim() */
270 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
271
272 /*
273 * We need quite a few vnodes on the free list to sustain the
274 * rapid stat() the compilation process does, and still benefit from the name
275 * cache. Having too few vnodes on the free list causes serious disk
276 * thrashing as we cycle through them.
277 */
278 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
279
280 /*
281 * Initialize the vnode management data structures.
282 */
283 __private_extern__ void
284 vntblinit(void)
285 {
286 TAILQ_INIT(&vnode_free_list);
287 TAILQ_INIT(&vnode_rage_list);
288 TAILQ_INIT(&vnode_dead_list);
289 TAILQ_INIT(&mountlist);
290
291 if (!vnodetarget)
292 vnodetarget = VNODE_FREE_TARGET;
293
294 microuptime(&rage_tv);
295 rage_limit = desiredvnodes / 100;
296
297 if (rage_limit < RAGE_LIMIT_MIN)
298 rage_limit = RAGE_LIMIT_MIN;
299
300 /*
301 * Scale the vm_object_cache to accomodate the vnodes
302 * we want to cache
303 */
304 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
305 }
306
307 /* Reset the VM Object Cache with the values passed in */
308 __private_extern__ kern_return_t
309 reset_vmobjectcache(unsigned int val1, unsigned int val2)
310 {
311 vm_size_t oval = val1 - VNODE_FREE_MIN;
312 vm_size_t nval;
313
314 if (val1 == val2) {
315 return KERN_SUCCESS;
316 }
317
318 if(val2 < VNODE_FREE_MIN)
319 nval = 0;
320 else
321 nval = val2 - VNODE_FREE_MIN;
322
323 return(adjust_vm_object_cache(oval, nval));
324 }
325
326
327 /* the timeout is in 10 msecs */
328 int
329 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
330 int error = 0;
331 struct timespec ts;
332
333 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
334
335 if (vp->v_numoutput > output_target) {
336
337 slpflag |= PDROP;
338
339 vnode_lock_spin(vp);
340
341 while ((vp->v_numoutput > output_target) && error == 0) {
342 if (output_target)
343 vp->v_flag |= VTHROTTLED;
344 else
345 vp->v_flag |= VBWAIT;
346
347 ts.tv_sec = (slptimeout/100);
348 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
349 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
350
351 vnode_lock_spin(vp);
352 }
353 vnode_unlock(vp);
354 }
355 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
356
357 return error;
358 }
359
360
361 void
362 vnode_startwrite(vnode_t vp) {
363
364 OSAddAtomic(1, &vp->v_numoutput);
365 }
366
367
368 void
369 vnode_writedone(vnode_t vp)
370 {
371 if (vp) {
372 OSAddAtomic(-1, &vp->v_numoutput);
373
374 if (vp->v_numoutput <= 1) {
375 int need_wakeup = 0;
376
377 vnode_lock_spin(vp);
378
379 if (vp->v_numoutput < 0)
380 panic("vnode_writedone: numoutput < 0");
381
382 if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) {
383 vp->v_flag &= ~VTHROTTLED;
384 need_wakeup = 1;
385 }
386 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
387 vp->v_flag &= ~VBWAIT;
388 need_wakeup = 1;
389 }
390 vnode_unlock(vp);
391
392 if (need_wakeup)
393 wakeup((caddr_t)&vp->v_numoutput);
394 }
395 }
396 }
397
398
399
400 int
401 vnode_hasdirtyblks(vnode_t vp)
402 {
403 struct cl_writebehind *wbp;
404
405 /*
406 * Not taking the buf_mtxp as there is little
407 * point doing it. Even if the lock is taken the
408 * state can change right after that. If their
409 * needs to be a synchronization, it must be driven
410 * by the caller
411 */
412 if (vp->v_dirtyblkhd.lh_first)
413 return (1);
414
415 if (!UBCINFOEXISTS(vp))
416 return (0);
417
418 wbp = vp->v_ubcinfo->cl_wbehind;
419
420 if (wbp && (wbp->cl_number || wbp->cl_scmap))
421 return (1);
422
423 return (0);
424 }
425
426 int
427 vnode_hascleanblks(vnode_t vp)
428 {
429 /*
430 * Not taking the buf_mtxp as there is little
431 * point doing it. Even if the lock is taken the
432 * state can change right after that. If their
433 * needs to be a synchronization, it must be driven
434 * by the caller
435 */
436 if (vp->v_cleanblkhd.lh_first)
437 return (1);
438 return (0);
439 }
440
441 void
442 vnode_iterate_setup(mount_t mp)
443 {
444 while (mp->mnt_lflag & MNT_LITER) {
445 mp->mnt_lflag |= MNT_LITERWAIT;
446 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
447 }
448
449 mp->mnt_lflag |= MNT_LITER;
450
451 }
452
453 static int
454 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
455 {
456 vnode_t vp;
457
458 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
459 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
460 if (vp->v_tag == VT_UDF)
461 return 0;
462 if (vp->v_type == VDIR)
463 continue;
464 if (vp == skipvp)
465 continue;
466 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
467 (vp->v_flag & VNOFLUSH)))
468 continue;
469 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
470 continue;
471 if ((flags & WRITECLOSE) &&
472 (vp->v_writecount == 0 || vp->v_type != VREG))
473 continue;
474 /* Look for busy vnode */
475 if (((vp->v_usecount != 0) &&
476 ((vp->v_usecount - vp->v_kusecount) != 0)))
477 return(1);
478 }
479
480 return(0);
481 }
482
483 /*
484 * This routine prepares iteration by moving all the vnodes to worker queue
485 * called with mount lock held
486 */
487 int
488 vnode_iterate_prepare(mount_t mp)
489 {
490 vnode_t vp;
491
492 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
493 /* nothing to do */
494 return (0);
495 }
496
497 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
498 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
499 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
500 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
501
502 TAILQ_INIT(&mp->mnt_vnodelist);
503 if (mp->mnt_newvnodes.tqh_first != NULL)
504 panic("vnode_iterate_prepare: newvnode when entering vnode");
505 TAILQ_INIT(&mp->mnt_newvnodes);
506
507 return (1);
508 }
509
510
511 /* called with mount lock held */
512 int
513 vnode_iterate_reloadq(mount_t mp)
514 {
515 int moved = 0;
516
517 /* add the remaining entries in workerq to the end of mount vnode list */
518 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
519 struct vnode * mvp;
520 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
521
522 /* Joining the workerque entities to mount vnode list */
523 if (mvp)
524 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
525 else
526 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
527 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
528 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
529 TAILQ_INIT(&mp->mnt_workerqueue);
530 }
531
532 /* add the newvnodes to the head of mount vnode list */
533 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
534 struct vnode * nlvp;
535 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
536
537 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
538 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
539 if(mp->mnt_vnodelist.tqh_first)
540 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
541 else
542 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
543 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
544 TAILQ_INIT(&mp->mnt_newvnodes);
545 moved = 1;
546 }
547
548 return(moved);
549 }
550
551
552 void
553 vnode_iterate_clear(mount_t mp)
554 {
555 mp->mnt_lflag &= ~MNT_LITER;
556 if (mp->mnt_lflag & MNT_LITERWAIT) {
557 mp->mnt_lflag &= ~MNT_LITERWAIT;
558 wakeup(mp);
559 }
560 }
561
562
563 int
564 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
565 void *arg)
566 {
567 struct vnode *vp;
568 int vid, retval;
569 int ret = 0;
570
571 mount_lock(mp);
572
573 vnode_iterate_setup(mp);
574
575 /* it is returns 0 then there is nothing to do */
576 retval = vnode_iterate_prepare(mp);
577
578 if (retval == 0) {
579 vnode_iterate_clear(mp);
580 mount_unlock(mp);
581 return(ret);
582 }
583
584 /* iterate over all the vnodes */
585 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
586 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
587 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
588 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
589 vid = vp->v_id;
590 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
591 continue;
592 }
593 mount_unlock(mp);
594
595 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
596 mount_lock(mp);
597 continue;
598 }
599 if (flags & VNODE_RELOAD) {
600 /*
601 * we're reloading the filesystem
602 * cast out any inactive vnodes...
603 */
604 if (vnode_reload(vp)) {
605 /* vnode will be recycled on the refcount drop */
606 vnode_put(vp);
607 mount_lock(mp);
608 continue;
609 }
610 }
611
612 retval = callout(vp, arg);
613
614 switch (retval) {
615 case VNODE_RETURNED:
616 case VNODE_RETURNED_DONE:
617 vnode_put(vp);
618 if (retval == VNODE_RETURNED_DONE) {
619 mount_lock(mp);
620 ret = 0;
621 goto out;
622 }
623 break;
624
625 case VNODE_CLAIMED_DONE:
626 mount_lock(mp);
627 ret = 0;
628 goto out;
629 case VNODE_CLAIMED:
630 default:
631 break;
632 }
633 mount_lock(mp);
634 }
635
636 out:
637 (void)vnode_iterate_reloadq(mp);
638 vnode_iterate_clear(mp);
639 mount_unlock(mp);
640 return (ret);
641 }
642
643 void
644 mount_lock_renames(mount_t mp)
645 {
646 lck_mtx_lock(&mp->mnt_renamelock);
647 }
648
649 void
650 mount_unlock_renames(mount_t mp)
651 {
652 lck_mtx_unlock(&mp->mnt_renamelock);
653 }
654
655 void
656 mount_lock(mount_t mp)
657 {
658 lck_mtx_lock(&mp->mnt_mlock);
659 }
660
661 void
662 mount_lock_spin(mount_t mp)
663 {
664 lck_mtx_lock_spin(&mp->mnt_mlock);
665 }
666
667 void
668 mount_unlock(mount_t mp)
669 {
670 lck_mtx_unlock(&mp->mnt_mlock);
671 }
672
673
674 void
675 mount_ref(mount_t mp, int locked)
676 {
677 if ( !locked)
678 mount_lock_spin(mp);
679
680 mp->mnt_count++;
681
682 if ( !locked)
683 mount_unlock(mp);
684 }
685
686
687 void
688 mount_drop(mount_t mp, int locked)
689 {
690 if ( !locked)
691 mount_lock_spin(mp);
692
693 mp->mnt_count--;
694
695 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
696 wakeup(&mp->mnt_lflag);
697
698 if ( !locked)
699 mount_unlock(mp);
700 }
701
702
703 int
704 mount_iterref(mount_t mp, int locked)
705 {
706 int retval = 0;
707
708 if (!locked)
709 mount_list_lock();
710 if (mp->mnt_iterref < 0) {
711 retval = 1;
712 } else {
713 mp->mnt_iterref++;
714 }
715 if (!locked)
716 mount_list_unlock();
717 return(retval);
718 }
719
720 int
721 mount_isdrained(mount_t mp, int locked)
722 {
723 int retval;
724
725 if (!locked)
726 mount_list_lock();
727 if (mp->mnt_iterref < 0)
728 retval = 1;
729 else
730 retval = 0;
731 if (!locked)
732 mount_list_unlock();
733 return(retval);
734 }
735
736 void
737 mount_iterdrop(mount_t mp)
738 {
739 mount_list_lock();
740 mp->mnt_iterref--;
741 wakeup(&mp->mnt_iterref);
742 mount_list_unlock();
743 }
744
745 void
746 mount_iterdrain(mount_t mp)
747 {
748 mount_list_lock();
749 while (mp->mnt_iterref)
750 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
751 /* mount iterations drained */
752 mp->mnt_iterref = -1;
753 mount_list_unlock();
754 }
755 void
756 mount_iterreset(mount_t mp)
757 {
758 mount_list_lock();
759 if (mp->mnt_iterref == -1)
760 mp->mnt_iterref = 0;
761 mount_list_unlock();
762 }
763
764 /* always called with mount lock held */
765 int
766 mount_refdrain(mount_t mp)
767 {
768 if (mp->mnt_lflag & MNT_LDRAIN)
769 panic("already in drain");
770 mp->mnt_lflag |= MNT_LDRAIN;
771
772 while (mp->mnt_count)
773 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
774
775 if (mp->mnt_vnodelist.tqh_first != NULL)
776 panic("mount_refdrain: dangling vnode");
777
778 mp->mnt_lflag &= ~MNT_LDRAIN;
779
780 return(0);
781 }
782
783
784 /*
785 * Mark a mount point as busy. Used to synchronize access and to delay
786 * unmounting.
787 */
788 int
789 vfs_busy(mount_t mp, int flags)
790 {
791
792 restart:
793 if (mp->mnt_lflag & MNT_LDEAD)
794 return(ENOENT);
795
796 if (mp->mnt_lflag & MNT_LUNMOUNT) {
797 if (flags & LK_NOWAIT)
798 return (ENOENT);
799
800 mount_lock(mp);
801
802 if (mp->mnt_lflag & MNT_LDEAD) {
803 mount_unlock(mp);
804 return(ENOENT);
805 }
806 if (mp->mnt_lflag & MNT_LUNMOUNT) {
807 mp->mnt_lflag |= MNT_LWAIT;
808 /*
809 * Since all busy locks are shared except the exclusive
810 * lock granted when unmounting, the only place that a
811 * wakeup needs to be done is at the release of the
812 * exclusive lock at the end of dounmount.
813 */
814 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
815 return (ENOENT);
816 }
817 mount_unlock(mp);
818 }
819
820 lck_rw_lock_shared(&mp->mnt_rwlock);
821
822 /*
823 * until we are granted the rwlock, it's possible for the mount point to
824 * change state, so reevaluate before granting the vfs_busy
825 */
826 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
827 lck_rw_done(&mp->mnt_rwlock);
828 goto restart;
829 }
830 return (0);
831 }
832
833 /*
834 * Free a busy filesystem.
835 */
836
837 void
838 vfs_unbusy(mount_t mp)
839 {
840 lck_rw_done(&mp->mnt_rwlock);
841 }
842
843
844
845 static void
846 vfs_rootmountfailed(mount_t mp) {
847
848 mount_list_lock();
849 mp->mnt_vtable->vfc_refcount--;
850 mount_list_unlock();
851
852 vfs_unbusy(mp);
853
854 mount_lock_destroy(mp);
855
856 #if CONFIG_MACF
857 mac_mount_label_destroy(mp);
858 #endif
859
860 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
861 }
862
863 /*
864 * Lookup a filesystem type, and if found allocate and initialize
865 * a mount structure for it.
866 *
867 * Devname is usually updated by mount(8) after booting.
868 */
869 static mount_t
870 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
871 {
872 mount_t mp;
873
874 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
875 bzero((char *)mp, sizeof(struct mount));
876
877 /* Initialize the default IO constraints */
878 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
879 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
880 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
881 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
882 mp->mnt_devblocksize = DEV_BSIZE;
883 mp->mnt_alignmentmask = PAGE_MASK;
884 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
885 mp->mnt_ioscale = 1;
886 mp->mnt_ioflags = 0;
887 mp->mnt_realrootvp = NULLVP;
888 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
889
890 mount_lock_init(mp);
891 (void)vfs_busy(mp, LK_NOWAIT);
892
893 TAILQ_INIT(&mp->mnt_vnodelist);
894 TAILQ_INIT(&mp->mnt_workerqueue);
895 TAILQ_INIT(&mp->mnt_newvnodes);
896
897 mp->mnt_vtable = vfsp;
898 mp->mnt_op = vfsp->vfc_vfsops;
899 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
900 mp->mnt_vnodecovered = NULLVP;
901 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
902 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
903
904 mount_list_lock();
905 vfsp->vfc_refcount++;
906 mount_list_unlock();
907
908 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
909 mp->mnt_vfsstat.f_mntonname[0] = '/';
910 /* XXX const poisoning layering violation */
911 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
912
913 #if CONFIG_MACF
914 mac_mount_label_init(mp);
915 mac_mount_label_associate(vfs_context_kernel(), mp);
916 #endif
917 return (mp);
918 }
919
920 errno_t
921 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
922 {
923 struct vfstable *vfsp;
924
925 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
926 if (!strncmp(vfsp->vfc_name, fstypename,
927 sizeof(vfsp->vfc_name)))
928 break;
929 if (vfsp == NULL)
930 return (ENODEV);
931
932 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
933
934 if (*mpp)
935 return (0);
936
937 return (ENOMEM);
938 }
939
940
941 /*
942 * Find an appropriate filesystem to use for the root. If a filesystem
943 * has not been preselected, walk through the list of known filesystems
944 * trying those that have mountroot routines, and try them until one
945 * works or we have tried them all.
946 */
947 extern int (*mountroot)(void);
948
949 int
950 vfs_mountroot(void)
951 {
952 #if CONFIG_MACF
953 struct vnode *vp;
954 #endif
955 struct vfstable *vfsp;
956 vfs_context_t ctx = vfs_context_kernel();
957 struct vfs_attr vfsattr;
958 int error;
959 mount_t mp;
960 vnode_t bdevvp_rootvp;
961
962 if (mountroot != NULL) {
963 /*
964 * used for netboot which follows a different set of rules
965 */
966 error = (*mountroot)();
967 return (error);
968 }
969 if ((error = bdevvp(rootdev, &rootvp))) {
970 printf("vfs_mountroot: can't setup bdevvp\n");
971 return (error);
972 }
973 /*
974 * 4951998 - code we call in vfc_mountroot may replace rootvp
975 * so keep a local copy for some house keeping.
976 */
977 bdevvp_rootvp = rootvp;
978
979 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
980 if (vfsp->vfc_mountroot == NULL)
981 continue;
982
983 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
984 mp->mnt_devvp = rootvp;
985
986 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
987 if ( bdevvp_rootvp != rootvp ) {
988 /*
989 * rootvp changed...
990 * bump the iocount and fix up mnt_devvp for the
991 * new rootvp (it will already have a usecount taken)...
992 * drop the iocount and the usecount on the orignal
993 * since we are no longer going to use it...
994 */
995 vnode_getwithref(rootvp);
996 mp->mnt_devvp = rootvp;
997
998 vnode_rele(bdevvp_rootvp);
999 vnode_put(bdevvp_rootvp);
1000 }
1001 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1002
1003 vfs_unbusy(mp);
1004
1005 mount_list_add(mp);
1006
1007 /*
1008 * cache the IO attributes for the underlying physical media...
1009 * an error return indicates the underlying driver doesn't
1010 * support all the queries necessary... however, reasonable
1011 * defaults will have been set, so no reason to bail or care
1012 */
1013 vfs_init_io_attributes(rootvp, mp);
1014
1015 /*
1016 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1017 */
1018 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1019 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1020 }
1021 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1022 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1023 }
1024
1025 /*
1026 * Probe root file system for additional features.
1027 */
1028 (void)VFS_START(mp, 0, ctx);
1029
1030 VFSATTR_INIT(&vfsattr);
1031 VFSATTR_WANTED(&vfsattr, f_capabilities);
1032 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1033 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1034 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1035 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1036 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1037 }
1038 #if NAMEDSTREAMS
1039 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1040 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1041 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1042 }
1043 #endif
1044 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1045 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1046 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1047 }
1048 }
1049
1050 /*
1051 * get rid of iocount reference returned
1052 * by bdevvp (or picked up by us on the substitued
1053 * rootvp)... it (or we) will have also taken
1054 * a usecount reference which we want to keep
1055 */
1056 vnode_put(rootvp);
1057
1058 #if CONFIG_MACF
1059 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1060 return (0);
1061
1062 error = VFS_ROOT(mp, &vp, ctx);
1063 if (error) {
1064 printf("%s() VFS_ROOT() returned %d\n",
1065 __func__, error);
1066 dounmount(mp, MNT_FORCE, 0, ctx);
1067 goto fail;
1068 }
1069 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1070 /*
1071 * get rid of reference provided by VFS_ROOT
1072 */
1073 vnode_put(vp);
1074
1075 if (error) {
1076 printf("%s() vnode_label() returned %d\n",
1077 __func__, error);
1078 dounmount(mp, MNT_FORCE, 0, ctx);
1079 goto fail;
1080 }
1081 #endif
1082 return (0);
1083 }
1084 #if CONFIG_MACF
1085 fail:
1086 #endif
1087 vfs_rootmountfailed(mp);
1088
1089 if (error != EINVAL)
1090 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1091 }
1092 return (ENODEV);
1093 }
1094
1095 /*
1096 * Lookup a mount point by filesystem identifier.
1097 */
1098
1099 struct mount *
1100 vfs_getvfs(fsid_t *fsid)
1101 {
1102 return (mount_list_lookupby_fsid(fsid, 0, 0));
1103 }
1104
1105 static struct mount *
1106 vfs_getvfs_locked(fsid_t *fsid)
1107 {
1108 return(mount_list_lookupby_fsid(fsid, 1, 0));
1109 }
1110
1111 struct mount *
1112 vfs_getvfs_by_mntonname(char *path)
1113 {
1114 mount_t retmp = (mount_t)0;
1115 mount_t mp;
1116
1117 mount_list_lock();
1118 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1119 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1120 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1121 retmp = mp;
1122 goto out;
1123 }
1124 }
1125 out:
1126 mount_list_unlock();
1127 return (retmp);
1128 }
1129
1130 /* generation number for creation of new fsids */
1131 u_short mntid_gen = 0;
1132 /*
1133 * Get a new unique fsid
1134 */
1135 void
1136 vfs_getnewfsid(struct mount *mp)
1137 {
1138
1139 fsid_t tfsid;
1140 int mtype;
1141 mount_t nmp;
1142
1143 mount_list_lock();
1144
1145 /* generate a new fsid */
1146 mtype = mp->mnt_vtable->vfc_typenum;
1147 if (++mntid_gen == 0)
1148 mntid_gen++;
1149 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1150 tfsid.val[1] = mtype;
1151
1152 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1153 while (vfs_getvfs_locked(&tfsid)) {
1154 if (++mntid_gen == 0)
1155 mntid_gen++;
1156 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1157 }
1158 }
1159 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1160 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1161 mount_list_unlock();
1162 }
1163
1164 /*
1165 * Routines having to do with the management of the vnode table.
1166 */
1167 extern int (**dead_vnodeop_p)(void *);
1168 long numvnodes, freevnodes, deadvnodes;
1169
1170
1171 /*
1172 * Move a vnode from one mount queue to another.
1173 */
1174 static void
1175 insmntque(vnode_t vp, mount_t mp)
1176 {
1177 mount_t lmp;
1178 /*
1179 * Delete from old mount point vnode list, if on one.
1180 */
1181 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1182 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1183 panic("insmntque: vp not in mount vnode list");
1184 vp->v_lflag &= ~VNAMED_MOUNT;
1185
1186 mount_lock_spin(lmp);
1187
1188 mount_drop(lmp, 1);
1189
1190 if (vp->v_mntvnodes.tqe_next == NULL) {
1191 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1192 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1193 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1194 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1195 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1196 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1197 } else {
1198 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1199 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1200 }
1201 vp->v_mntvnodes.tqe_next = NULL;
1202 vp->v_mntvnodes.tqe_prev = NULL;
1203 mount_unlock(lmp);
1204 return;
1205 }
1206
1207 /*
1208 * Insert into list of vnodes for the new mount point, if available.
1209 */
1210 if ((vp->v_mount = mp) != NULL) {
1211 mount_lock_spin(mp);
1212 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1213 panic("vp already in mount list");
1214 if (mp->mnt_lflag & MNT_LITER)
1215 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1216 else
1217 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1218 if (vp->v_lflag & VNAMED_MOUNT)
1219 panic("insmntque: vp already in mount vnode list");
1220 vp->v_lflag |= VNAMED_MOUNT;
1221 mount_ref(mp, 1);
1222 mount_unlock(mp);
1223 }
1224 }
1225
1226
1227 /*
1228 * Create a vnode for a block device.
1229 * Used for root filesystem, argdev, and swap areas.
1230 * Also used for memory file system special devices.
1231 */
1232 int
1233 bdevvp(dev_t dev, vnode_t *vpp)
1234 {
1235 vnode_t nvp;
1236 int error;
1237 struct vnode_fsparam vfsp;
1238 struct vfs_context context;
1239
1240 if (dev == NODEV) {
1241 *vpp = NULLVP;
1242 return (ENODEV);
1243 }
1244
1245 context.vc_thread = current_thread();
1246 context.vc_ucred = FSCRED;
1247
1248 vfsp.vnfs_mp = (struct mount *)0;
1249 vfsp.vnfs_vtype = VBLK;
1250 vfsp.vnfs_str = "bdevvp";
1251 vfsp.vnfs_dvp = NULL;
1252 vfsp.vnfs_fsnode = NULL;
1253 vfsp.vnfs_cnp = NULL;
1254 vfsp.vnfs_vops = spec_vnodeop_p;
1255 vfsp.vnfs_rdev = dev;
1256 vfsp.vnfs_filesize = 0;
1257
1258 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1259
1260 vfsp.vnfs_marksystem = 0;
1261 vfsp.vnfs_markroot = 0;
1262
1263 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1264 *vpp = NULLVP;
1265 return (error);
1266 }
1267 vnode_lock_spin(nvp);
1268 nvp->v_flag |= VBDEVVP;
1269 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1270 vnode_unlock(nvp);
1271 if ( (error = vnode_ref(nvp)) ) {
1272 panic("bdevvp failed: vnode_ref");
1273 return (error);
1274 }
1275 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1276 panic("bdevvp failed: fsync");
1277 return (error);
1278 }
1279 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1280 panic("bdevvp failed: invalidateblks");
1281 return (error);
1282 }
1283
1284 #if CONFIG_MACF
1285 /*
1286 * XXXMAC: We can't put a MAC check here, the system will
1287 * panic without this vnode.
1288 */
1289 #endif /* MAC */
1290
1291 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1292 panic("bdevvp failed: open");
1293 return (error);
1294 }
1295 *vpp = nvp;
1296
1297 return (0);
1298 }
1299
1300 /*
1301 * Check to see if the new vnode represents a special device
1302 * for which we already have a vnode (either because of
1303 * bdevvp() or because of a different vnode representing
1304 * the same block device). If such an alias exists, deallocate
1305 * the existing contents and return the aliased vnode. The
1306 * caller is responsible for filling it with its new contents.
1307 */
1308 static vnode_t
1309 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1310 {
1311 struct vnode *vp;
1312 struct vnode **vpp;
1313 struct specinfo *sin = NULL;
1314 int vid = 0;
1315
1316 vpp = &speclisth[SPECHASH(nvp_rdev)];
1317 loop:
1318 SPECHASH_LOCK();
1319
1320 for (vp = *vpp; vp; vp = vp->v_specnext) {
1321 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1322 vid = vp->v_id;
1323 break;
1324 }
1325 }
1326 SPECHASH_UNLOCK();
1327
1328 if (vp) {
1329 found_alias:
1330 if (vnode_getwithvid(vp,vid)) {
1331 goto loop;
1332 }
1333 /*
1334 * Termination state is checked in vnode_getwithvid
1335 */
1336 vnode_lock(vp);
1337
1338 /*
1339 * Alias, but not in use, so flush it out.
1340 */
1341 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1342 vnode_reclaim_internal(vp, 1, 1, 0);
1343 vnode_put_locked(vp);
1344 vnode_unlock(vp);
1345 goto loop;
1346 }
1347
1348 }
1349 if (vp == NULL || vp->v_tag != VT_NON) {
1350 if (sin == NULL) {
1351 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1352 M_SPECINFO, M_WAITOK);
1353 }
1354
1355 nvp->v_specinfo = sin;
1356 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1357 nvp->v_rdev = nvp_rdev;
1358 nvp->v_specflags = 0;
1359 nvp->v_speclastr = -1;
1360
1361 SPECHASH_LOCK();
1362
1363 /* We dropped the lock, someone could have added */
1364 if (vp == NULLVP) {
1365 for (vp = *vpp; vp; vp = vp->v_specnext) {
1366 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1367 vid = vp->v_id;
1368 SPECHASH_UNLOCK();
1369 goto found_alias;
1370 }
1371 }
1372 }
1373
1374 nvp->v_hashchain = vpp;
1375 nvp->v_specnext = *vpp;
1376 *vpp = nvp;
1377
1378 if (vp != NULLVP) {
1379 nvp->v_specflags |= SI_ALIASED;
1380 vp->v_specflags |= SI_ALIASED;
1381 SPECHASH_UNLOCK();
1382 vnode_put_locked(vp);
1383 vnode_unlock(vp);
1384 } else {
1385 SPECHASH_UNLOCK();
1386 }
1387
1388 return (NULLVP);
1389 }
1390
1391 if (sin) {
1392 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1393 }
1394
1395 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1396 return(vp);
1397
1398 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1399
1400 return (vp);
1401 }
1402
1403
1404 /*
1405 * Get a reference on a particular vnode and lock it if requested.
1406 * If the vnode was on the inactive list, remove it from the list.
1407 * If the vnode was on the free list, remove it from the list and
1408 * move it to inactive list as needed.
1409 * The vnode lock bit is set if the vnode is being eliminated in
1410 * vgone. The process is awakened when the transition is completed,
1411 * and an error returned to indicate that the vnode is no longer
1412 * usable (possibly having been changed to a new file system type).
1413 */
1414 int
1415 vget_internal(vnode_t vp, int vid, int vflags)
1416 {
1417 int error = 0;
1418 int vpid;
1419
1420 vnode_lock_spin(vp);
1421
1422 if (vflags & VNODE_WITHID)
1423 vpid = vid;
1424 else
1425 vpid = vp->v_id; // save off the original v_id
1426
1427 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1428 /*
1429 * vnode to be returned only if it has writers opened
1430 */
1431 error = EINVAL;
1432 else
1433 error = vnode_getiocount(vp, vpid, vflags);
1434
1435 vnode_unlock(vp);
1436
1437 return (error);
1438 }
1439
1440 /*
1441 * Returns: 0 Success
1442 * ENOENT No such file or directory [terminating]
1443 */
1444 int
1445 vnode_ref(vnode_t vp)
1446 {
1447
1448 return (vnode_ref_ext(vp, 0));
1449 }
1450
1451 /*
1452 * Returns: 0 Success
1453 * ENOENT No such file or directory [terminating]
1454 */
1455 int
1456 vnode_ref_ext(vnode_t vp, int fmode)
1457 {
1458 int error = 0;
1459
1460 vnode_lock_spin(vp);
1461
1462 /*
1463 * once all the current call sites have been fixed to insure they have
1464 * taken an iocount, we can toughen this assert up and insist that the
1465 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1466 */
1467 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1468 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1469
1470 /*
1471 * if you are the owner of drain/termination, can acquire usecount
1472 */
1473 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1474 if (vp->v_owner != current_thread()) {
1475 error = ENOENT;
1476 goto out;
1477 }
1478 }
1479 vp->v_usecount++;
1480
1481 if (fmode & FWRITE) {
1482 if (++vp->v_writecount <= 0)
1483 panic("vnode_ref_ext: v_writecount");
1484 }
1485 if (fmode & O_EVTONLY) {
1486 if (++vp->v_kusecount <= 0)
1487 panic("vnode_ref_ext: v_kusecount");
1488 }
1489 if (vp->v_flag & VRAGE) {
1490 struct uthread *ut;
1491
1492 ut = get_bsdthread_info(current_thread());
1493
1494 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1495 !(ut->uu_flag & UT_RAGE_VNODES)) {
1496 /*
1497 * a 'normal' process accessed this vnode
1498 * so make sure its no longer marked
1499 * for rapid aging... also, make sure
1500 * it gets removed from the rage list...
1501 * when v_usecount drops back to 0, it
1502 * will be put back on the real free list
1503 */
1504 vp->v_flag &= ~VRAGE;
1505 vp->v_references = 0;
1506 vnode_list_remove(vp);
1507 }
1508 }
1509 out:
1510 vnode_unlock(vp);
1511
1512 return (error);
1513 }
1514
1515
1516 /*
1517 * put the vnode on appropriate free list.
1518 * called with vnode LOCKED
1519 */
1520 static void
1521 vnode_list_add(vnode_t vp)
1522 {
1523 #if DIAGNOSTIC
1524 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1525 #endif
1526 /*
1527 * if it is already on a list or non zero references return
1528 */
1529 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1530 return;
1531
1532 vnode_list_lock();
1533
1534 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1535 /*
1536 * add the new guy to the appropriate end of the RAGE list
1537 */
1538 if ((vp->v_flag & VAGE))
1539 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1540 else
1541 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1542
1543 vp->v_listflag |= VLIST_RAGE;
1544 ragevnodes++;
1545
1546 /*
1547 * reset the timestamp for the last inserted vp on the RAGE
1548 * queue to let new_vnode know that its not ok to start stealing
1549 * from this list... as long as we're actively adding to this list
1550 * we'll push out the vnodes we want to donate to the real free list
1551 * once we stop pushing, we'll let some time elapse before we start
1552 * stealing them in the new_vnode routine
1553 */
1554 microuptime(&rage_tv);
1555 } else {
1556 /*
1557 * if VL_DEAD, insert it at head of the dead list
1558 * else insert at tail of LRU list or at head if VAGE is set
1559 */
1560 if ( (vp->v_lflag & VL_DEAD)) {
1561 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1562 vp->v_listflag |= VLIST_DEAD;
1563 deadvnodes++;
1564 } else if ((vp->v_flag & VAGE)) {
1565 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1566 vp->v_flag &= ~VAGE;
1567 freevnodes++;
1568 } else {
1569 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1570 freevnodes++;
1571 }
1572 }
1573 vnode_list_unlock();
1574 }
1575
1576
1577 /*
1578 * remove the vnode from appropriate free list.
1579 * called with vnode LOCKED and
1580 * the list lock held
1581 */
1582 static void
1583 vnode_list_remove_locked(vnode_t vp)
1584 {
1585 if (VONLIST(vp)) {
1586 /*
1587 * the v_listflag field is
1588 * protected by the vnode_list_lock
1589 */
1590 if (vp->v_listflag & VLIST_RAGE)
1591 VREMRAGE("vnode_list_remove", vp);
1592 else if (vp->v_listflag & VLIST_DEAD)
1593 VREMDEAD("vnode_list_remove", vp);
1594 else
1595 VREMFREE("vnode_list_remove", vp);
1596 }
1597 }
1598
1599
1600 /*
1601 * remove the vnode from appropriate free list.
1602 * called with vnode LOCKED
1603 */
1604 static void
1605 vnode_list_remove(vnode_t vp)
1606 {
1607 #if DIAGNOSTIC
1608 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1609 #endif
1610 /*
1611 * we want to avoid taking the list lock
1612 * in the case where we're not on the free
1613 * list... this will be true for most
1614 * directories and any currently in use files
1615 *
1616 * we're guaranteed that we can't go from
1617 * the not-on-list state to the on-list
1618 * state since we hold the vnode lock...
1619 * all calls to vnode_list_add are done
1620 * under the vnode lock... so we can
1621 * check for that condition (the prevelant one)
1622 * without taking the list lock
1623 */
1624 if (VONLIST(vp)) {
1625 vnode_list_lock();
1626 /*
1627 * however, we're not guaranteed that
1628 * we won't go from the on-list state
1629 * to the not-on-list state until we
1630 * hold the vnode_list_lock... this
1631 * is due to "new_vnode" removing vnodes
1632 * from the free list uder the list_lock
1633 * w/o the vnode lock... so we need to
1634 * check again whether we're currently
1635 * on the free list
1636 */
1637 vnode_list_remove_locked(vp);
1638
1639 vnode_list_unlock();
1640 }
1641 }
1642
1643
1644 void
1645 vnode_rele(vnode_t vp)
1646 {
1647 vnode_rele_internal(vp, 0, 0, 0);
1648 }
1649
1650
1651 void
1652 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1653 {
1654 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1655 }
1656
1657
1658 void
1659 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1660 {
1661 if ( !locked)
1662 vnode_lock_spin(vp);
1663 #if DIAGNOSTIC
1664 else
1665 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1666 #endif
1667 if (--vp->v_usecount < 0)
1668 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1669
1670 if (fmode & FWRITE) {
1671 if (--vp->v_writecount < 0)
1672 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1673 }
1674 if (fmode & O_EVTONLY) {
1675 if (--vp->v_kusecount < 0)
1676 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1677 }
1678 if (vp->v_kusecount > vp->v_usecount)
1679 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1680
1681 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1682 /*
1683 * vnode is still busy... if we're the last
1684 * usecount, mark for a future call to VNOP_INACTIVE
1685 * when the iocount finally drops to 0
1686 */
1687 if (vp->v_usecount == 0) {
1688 vp->v_lflag |= VL_NEEDINACTIVE;
1689 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1690 }
1691 if ( !locked)
1692 vnode_unlock(vp);
1693 return;
1694 }
1695 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1696
1697 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1698 /*
1699 * vnode is being cleaned, or
1700 * we've requested that we don't reenter
1701 * the filesystem on this release... in
1702 * this case, we'll mark the vnode aged
1703 * if it's been marked for termination
1704 */
1705 if (dont_reenter) {
1706 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1707 vp->v_lflag |= VL_NEEDINACTIVE;
1708 vp->v_flag |= VAGE;
1709 }
1710 vnode_list_add(vp);
1711 if ( !locked)
1712 vnode_unlock(vp);
1713 return;
1714 }
1715 /*
1716 * at this point both the iocount and usecount
1717 * are zero
1718 * pick up an iocount so that we can call
1719 * VNOP_INACTIVE with the vnode lock unheld
1720 */
1721 vp->v_iocount++;
1722 #ifdef JOE_DEBUG
1723 record_vp(vp, 1);
1724 #endif
1725 vp->v_lflag &= ~VL_NEEDINACTIVE;
1726 vnode_unlock(vp);
1727
1728 VNOP_INACTIVE(vp, vfs_context_current());
1729
1730 vnode_lock_spin(vp);
1731 /*
1732 * because we dropped the vnode lock to call VNOP_INACTIVE
1733 * the state of the vnode may have changed... we may have
1734 * picked up an iocount, usecount or the MARKTERM may have
1735 * been set... we need to reevaluate the reference counts
1736 * to determine if we can call vnode_reclaim_internal at
1737 * this point... if the reference counts are up, we'll pick
1738 * up the MARKTERM state when they get subsequently dropped
1739 */
1740 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1741 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1742 struct uthread *ut;
1743
1744 ut = get_bsdthread_info(current_thread());
1745
1746 if (ut->uu_defer_reclaims) {
1747 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1748 ut->uu_vreclaims = vp;
1749 goto defer_reclaim;
1750 }
1751 vnode_lock_convert(vp);
1752 vnode_reclaim_internal(vp, 1, 1, 0);
1753 }
1754 vnode_dropiocount(vp);
1755 vnode_list_add(vp);
1756 defer_reclaim:
1757 if ( !locked)
1758 vnode_unlock(vp);
1759 return;
1760 }
1761
1762 /*
1763 * Remove any vnodes in the vnode table belonging to mount point mp.
1764 *
1765 * If MNT_NOFORCE is specified, there should not be any active ones,
1766 * return error if any are found (nb: this is a user error, not a
1767 * system error). If MNT_FORCE is specified, detach any active vnodes
1768 * that are found.
1769 */
1770 #if DIAGNOSTIC
1771 int busyprt = 0; /* print out busy vnodes */
1772 #if 0
1773 struct ctldebug debug1 = { "busyprt", &busyprt };
1774 #endif /* 0 */
1775 #endif
1776
1777 int
1778 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1779 {
1780 struct vnode *vp;
1781 int busy = 0;
1782 int reclaimed = 0;
1783 int retval;
1784 unsigned int vid;
1785
1786 mount_lock(mp);
1787 vnode_iterate_setup(mp);
1788 /*
1789 * On regular unmounts(not forced) do a
1790 * quick check for vnodes to be in use. This
1791 * preserves the caching of vnodes. automounter
1792 * tries unmounting every so often to see whether
1793 * it is still busy or not.
1794 */
1795 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1796 if (vnode_umount_preflight(mp, skipvp, flags)) {
1797 vnode_iterate_clear(mp);
1798 mount_unlock(mp);
1799 return(EBUSY);
1800 }
1801 }
1802 loop:
1803 /* it is returns 0 then there is nothing to do */
1804 retval = vnode_iterate_prepare(mp);
1805
1806 if (retval == 0) {
1807 vnode_iterate_clear(mp);
1808 mount_unlock(mp);
1809 return(retval);
1810 }
1811
1812 /* iterate over all the vnodes */
1813 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1814
1815 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1816 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1817 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1818
1819 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1820 continue;
1821 }
1822 vid = vp->v_id;
1823 mount_unlock(mp);
1824
1825 vnode_lock_spin(vp);
1826
1827 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1828 vnode_unlock(vp);
1829 mount_lock(mp);
1830 continue;
1831 }
1832
1833 /*
1834 * If requested, skip over vnodes marked VSYSTEM.
1835 * Skip over all vnodes marked VNOFLUSH.
1836 */
1837 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1838 (vp->v_flag & VNOFLUSH))) {
1839 vnode_unlock(vp);
1840 mount_lock(mp);
1841 continue;
1842 }
1843 /*
1844 * If requested, skip over vnodes marked VSWAP.
1845 */
1846 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1847 vnode_unlock(vp);
1848 mount_lock(mp);
1849 continue;
1850 }
1851 /*
1852 * If requested, skip over vnodes marked VROOT.
1853 */
1854 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1855 vnode_unlock(vp);
1856 mount_lock(mp);
1857 continue;
1858 }
1859 /*
1860 * If WRITECLOSE is set, only flush out regular file
1861 * vnodes open for writing.
1862 */
1863 if ((flags & WRITECLOSE) &&
1864 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1865 vnode_unlock(vp);
1866 mount_lock(mp);
1867 continue;
1868 }
1869 /*
1870 * If the real usecount is 0, all we need to do is clear
1871 * out the vnode data structures and we are done.
1872 */
1873 if (((vp->v_usecount == 0) ||
1874 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1875
1876 vnode_lock_convert(vp);
1877 vp->v_iocount++; /* so that drain waits for * other iocounts */
1878 #ifdef JOE_DEBUG
1879 record_vp(vp, 1);
1880 #endif
1881 vnode_reclaim_internal(vp, 1, 1, 0);
1882 vnode_dropiocount(vp);
1883 vnode_list_add(vp);
1884 vnode_unlock(vp);
1885
1886 reclaimed++;
1887 mount_lock(mp);
1888 continue;
1889 }
1890 /*
1891 * If FORCECLOSE is set, forcibly close the vnode.
1892 * For block or character devices, revert to an
1893 * anonymous device. For all other files, just kill them.
1894 */
1895 if (flags & FORCECLOSE) {
1896 vnode_lock_convert(vp);
1897
1898 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1899 vp->v_iocount++; /* so that drain waits * for other iocounts */
1900 #ifdef JOE_DEBUG
1901 record_vp(vp, 1);
1902 #endif
1903 vnode_reclaim_internal(vp, 1, 1, 0);
1904 vnode_dropiocount(vp);
1905 vnode_list_add(vp);
1906 vnode_unlock(vp);
1907 } else {
1908 vclean(vp, 0);
1909 vp->v_lflag &= ~VL_DEAD;
1910 vp->v_op = spec_vnodeop_p;
1911 vp->v_flag |= VDEVFLUSH;
1912 vnode_unlock(vp);
1913 }
1914 mount_lock(mp);
1915 continue;
1916 }
1917 #if DIAGNOSTIC
1918 if (busyprt)
1919 vprint("vflush: busy vnode", vp);
1920 #endif
1921 vnode_unlock(vp);
1922 mount_lock(mp);
1923 busy++;
1924 }
1925
1926 /* At this point the worker queue is completed */
1927 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1928 busy = 0;
1929 reclaimed = 0;
1930 (void)vnode_iterate_reloadq(mp);
1931 /* returned with mount lock held */
1932 goto loop;
1933 }
1934
1935 /* if new vnodes were created in between retry the reclaim */
1936 if ( vnode_iterate_reloadq(mp) != 0) {
1937 if (!(busy && ((flags & FORCECLOSE)==0)))
1938 goto loop;
1939 }
1940 vnode_iterate_clear(mp);
1941 mount_unlock(mp);
1942
1943 if (busy && ((flags & FORCECLOSE)==0))
1944 return (EBUSY);
1945 return (0);
1946 }
1947
1948 long num_recycledvnodes = 0;
1949 /*
1950 * Disassociate the underlying file system from a vnode.
1951 * The vnode lock is held on entry.
1952 */
1953 static void
1954 vclean(vnode_t vp, int flags)
1955 {
1956 vfs_context_t ctx = vfs_context_current();
1957 int active;
1958 int need_inactive;
1959 int already_terminating;
1960 int clflags = 0;
1961 #if NAMEDSTREAMS
1962 int is_namedstream;
1963 #endif
1964
1965 /*
1966 * Check to see if the vnode is in use.
1967 * If so we have to reference it before we clean it out
1968 * so that its count cannot fall to zero and generate a
1969 * race against ourselves to recycle it.
1970 */
1971 active = vp->v_usecount;
1972
1973 /*
1974 * just in case we missed sending a needed
1975 * VNOP_INACTIVE, we'll do it now
1976 */
1977 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
1978
1979 vp->v_lflag &= ~VL_NEEDINACTIVE;
1980
1981 /*
1982 * Prevent the vnode from being recycled or
1983 * brought into use while we clean it out.
1984 */
1985 already_terminating = (vp->v_lflag & VL_TERMINATE);
1986
1987 vp->v_lflag |= VL_TERMINATE;
1988
1989 /*
1990 * remove the vnode from any mount list
1991 * it might be on...
1992 */
1993 insmntque(vp, (struct mount *)0);
1994
1995 #if NAMEDSTREAMS
1996 is_namedstream = vnode_isnamedstream(vp);
1997 #endif
1998
1999 vnode_unlock(vp);
2000
2001 OSAddAtomicLong(1, &num_recycledvnodes);
2002
2003 if (flags & DOCLOSE)
2004 clflags |= IO_NDELAY;
2005 if (flags & REVOKEALL)
2006 clflags |= IO_REVOKE;
2007
2008 if (active && (flags & DOCLOSE))
2009 VNOP_CLOSE(vp, clflags, ctx);
2010
2011 /*
2012 * Clean out any buffers associated with the vnode.
2013 */
2014 if (flags & DOCLOSE) {
2015 #if NFSCLIENT
2016 if (vp->v_tag == VT_NFS)
2017 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2018 else
2019 #endif
2020 {
2021 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2022 buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2023 }
2024 if (UBCINFOEXISTS(vp))
2025 /*
2026 * Clean the pages in VM.
2027 */
2028 (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL);
2029 }
2030 if (active || need_inactive)
2031 VNOP_INACTIVE(vp, ctx);
2032
2033 #if NAMEDSTREAMS
2034 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2035 vnode_t pvp = vp->v_parent;
2036
2037 /* Delete the shadow stream file before we reclaim its vnode */
2038 if (vnode_isshadow(vp)) {
2039 vnode_relenamedstream(pvp, vp, ctx);
2040 }
2041
2042 /*
2043 * No more streams associated with the parent. We
2044 * have a ref on it, so its identity is stable.
2045 * If the parent is on an opaque volume, then we need to know
2046 * whether it has associated named streams.
2047 */
2048 if (vfs_authopaque(pvp->v_mount)) {
2049 vnode_lock_spin(pvp);
2050 pvp->v_lflag &= ~VL_HASSTREAMS;
2051 vnode_unlock(pvp);
2052 }
2053 }
2054 #endif
2055
2056 /*
2057 * Destroy ubc named reference
2058 * cluster_release is done on this path
2059 * along with dropping the reference on the ucred
2060 */
2061 ubc_destroy_named(vp);
2062
2063 /*
2064 * Reclaim the vnode.
2065 */
2066 if (VNOP_RECLAIM(vp, ctx))
2067 panic("vclean: cannot reclaim");
2068
2069 // make sure the name & parent ptrs get cleaned out!
2070 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2071
2072 vnode_lock(vp);
2073
2074 vp->v_mount = dead_mountp;
2075 vp->v_op = dead_vnodeop_p;
2076 vp->v_tag = VT_NON;
2077 vp->v_data = NULL;
2078
2079 vp->v_lflag |= VL_DEAD;
2080
2081 if (already_terminating == 0) {
2082 vp->v_lflag &= ~VL_TERMINATE;
2083 /*
2084 * Done with purge, notify sleepers of the grim news.
2085 */
2086 if (vp->v_lflag & VL_TERMWANT) {
2087 vp->v_lflag &= ~VL_TERMWANT;
2088 wakeup(&vp->v_lflag);
2089 }
2090 }
2091 }
2092
2093 /*
2094 * Eliminate all activity associated with the requested vnode
2095 * and with all vnodes aliased to the requested vnode.
2096 */
2097 int
2098 #if DIAGNOSTIC
2099 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2100 #else
2101 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2102 #endif
2103 {
2104 struct vnode *vq;
2105 int vid;
2106
2107 #if DIAGNOSTIC
2108 if ((flags & REVOKEALL) == 0)
2109 panic("vnop_revoke");
2110 #endif
2111
2112 if (vnode_isaliased(vp)) {
2113 /*
2114 * If a vgone (or vclean) is already in progress,
2115 * return an immediate error
2116 */
2117 if (vp->v_lflag & VL_TERMINATE)
2118 return(ENOENT);
2119
2120 /*
2121 * Ensure that vp will not be vgone'd while we
2122 * are eliminating its aliases.
2123 */
2124 SPECHASH_LOCK();
2125 while ((vp->v_specflags & SI_ALIASED)) {
2126 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2127 if (vq->v_rdev != vp->v_rdev ||
2128 vq->v_type != vp->v_type || vp == vq)
2129 continue;
2130 vid = vq->v_id;
2131 SPECHASH_UNLOCK();
2132 if (vnode_getwithvid(vq,vid)){
2133 SPECHASH_LOCK();
2134 break;
2135 }
2136 vnode_reclaim_internal(vq, 0, 1, 0);
2137 vnode_put(vq);
2138 SPECHASH_LOCK();
2139 break;
2140 }
2141 }
2142 SPECHASH_UNLOCK();
2143 }
2144 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2145
2146 return (0);
2147 }
2148
2149 /*
2150 * Recycle an unused vnode to the front of the free list.
2151 * Release the passed interlock if the vnode will be recycled.
2152 */
2153 int
2154 vnode_recycle(struct vnode *vp)
2155 {
2156 vnode_lock_spin(vp);
2157
2158 if (vp->v_iocount || vp->v_usecount) {
2159 vp->v_lflag |= VL_MARKTERM;
2160 vnode_unlock(vp);
2161 return(0);
2162 }
2163 vnode_lock_convert(vp);
2164 vnode_reclaim_internal(vp, 1, 0, 0);
2165
2166 vnode_unlock(vp);
2167
2168 return (1);
2169 }
2170
2171 static int
2172 vnode_reload(vnode_t vp)
2173 {
2174 vnode_lock_spin(vp);
2175
2176 if ((vp->v_iocount > 1) || vp->v_usecount) {
2177 vnode_unlock(vp);
2178 return(0);
2179 }
2180 if (vp->v_iocount <= 0)
2181 panic("vnode_reload with no iocount %d", vp->v_iocount);
2182
2183 /* mark for release when iocount is dopped */
2184 vp->v_lflag |= VL_MARKTERM;
2185 vnode_unlock(vp);
2186
2187 return (1);
2188 }
2189
2190
2191 static void
2192 vgone(vnode_t vp, int flags)
2193 {
2194 struct vnode *vq;
2195 struct vnode *vx;
2196
2197 /*
2198 * Clean out the filesystem specific data.
2199 * vclean also takes care of removing the
2200 * vnode from any mount list it might be on
2201 */
2202 vclean(vp, flags | DOCLOSE);
2203
2204 /*
2205 * If special device, remove it from special device alias list
2206 * if it is on one.
2207 */
2208 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2209 SPECHASH_LOCK();
2210 if (*vp->v_hashchain == vp) {
2211 *vp->v_hashchain = vp->v_specnext;
2212 } else {
2213 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2214 if (vq->v_specnext != vp)
2215 continue;
2216 vq->v_specnext = vp->v_specnext;
2217 break;
2218 }
2219 if (vq == NULL)
2220 panic("missing bdev");
2221 }
2222 if (vp->v_specflags & SI_ALIASED) {
2223 vx = NULL;
2224 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2225 if (vq->v_rdev != vp->v_rdev ||
2226 vq->v_type != vp->v_type)
2227 continue;
2228 if (vx)
2229 break;
2230 vx = vq;
2231 }
2232 if (vx == NULL)
2233 panic("missing alias");
2234 if (vq == NULL)
2235 vx->v_specflags &= ~SI_ALIASED;
2236 vp->v_specflags &= ~SI_ALIASED;
2237 }
2238 SPECHASH_UNLOCK();
2239 {
2240 struct specinfo *tmp = vp->v_specinfo;
2241 vp->v_specinfo = NULL;
2242 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2243 }
2244 }
2245 }
2246
2247 /*
2248 * Lookup a vnode by device number.
2249 */
2250 int
2251 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2252 {
2253 vnode_t vp;
2254 int rc = 0;
2255 int vid;
2256
2257 loop:
2258 SPECHASH_LOCK();
2259 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2260 if (dev != vp->v_rdev || type != vp->v_type)
2261 continue;
2262 vid = vp->v_id;
2263 SPECHASH_UNLOCK();
2264 if (vnode_getwithvid(vp,vid))
2265 goto loop;
2266 vnode_lock_spin(vp);
2267 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2268 vnode_unlock(vp);
2269 if ((*errorp = vfs_mountedon(vp)) != 0)
2270 rc = 1;
2271 } else
2272 vnode_unlock(vp);
2273 vnode_put(vp);
2274 return(rc);
2275 }
2276 SPECHASH_UNLOCK();
2277 return (0);
2278 }
2279
2280 /*
2281 * Calculate the total number of references to a special device.
2282 */
2283 int
2284 vcount(vnode_t vp)
2285 {
2286 vnode_t vq, vnext;
2287 int count;
2288 int vid;
2289
2290 loop:
2291 if (!vnode_isaliased(vp))
2292 return (vp->v_usecount - vp->v_kusecount);
2293 count = 0;
2294
2295 SPECHASH_LOCK();
2296 /*
2297 * Grab first vnode and its vid.
2298 */
2299 vq = *vp->v_hashchain;
2300 vid = vq ? vq->v_id : 0;
2301
2302 SPECHASH_UNLOCK();
2303
2304 while (vq) {
2305 /*
2306 * Attempt to get the vnode outside the SPECHASH lock.
2307 */
2308 if (vnode_getwithvid(vq, vid)) {
2309 goto loop;
2310 }
2311 vnode_lock(vq);
2312
2313 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2314 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2315 /*
2316 * Alias, but not in use, so flush it out.
2317 */
2318 vnode_reclaim_internal(vq, 1, 1, 0);
2319 vnode_put_locked(vq);
2320 vnode_unlock(vq);
2321 goto loop;
2322 }
2323 count += (vq->v_usecount - vq->v_kusecount);
2324 }
2325 vnode_unlock(vq);
2326
2327 SPECHASH_LOCK();
2328 /*
2329 * must do this with the reference still held on 'vq'
2330 * so that it can't be destroyed while we're poking
2331 * through v_specnext
2332 */
2333 vnext = vq->v_specnext;
2334 vid = vnext ? vnext->v_id : 0;
2335
2336 SPECHASH_UNLOCK();
2337
2338 vnode_put(vq);
2339
2340 vq = vnext;
2341 }
2342
2343 return (count);
2344 }
2345
2346 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2347
2348 /*
2349 * Print out a description of a vnode.
2350 */
2351 static const char *typename[] =
2352 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2353
2354 void
2355 vprint(const char *label, struct vnode *vp)
2356 {
2357 char sbuf[64];
2358
2359 if (label != NULL)
2360 printf("%s: ", label);
2361 printf("type %s, usecount %d, writecount %d",
2362 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2363 sbuf[0] = '\0';
2364 if (vp->v_flag & VROOT)
2365 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2366 if (vp->v_flag & VTEXT)
2367 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2368 if (vp->v_flag & VSYSTEM)
2369 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2370 if (vp->v_flag & VNOFLUSH)
2371 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2372 if (vp->v_flag & VBWAIT)
2373 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2374 if (vnode_isaliased(vp))
2375 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2376 if (sbuf[0] != '\0')
2377 printf(" flags (%s)", &sbuf[1]);
2378 }
2379
2380
2381 int
2382 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2383 {
2384 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2385 }
2386
2387 int
2388 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2389 {
2390 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2391 }
2392
2393 int
2394 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2395 {
2396 return ubc_cs_getcdhash(vp, offset, cdhash);
2397 }
2398
2399
2400 static char *extension_table=NULL;
2401 static int nexts;
2402 static int max_ext_width;
2403
2404 static int
2405 extension_cmp(const void *a, const void *b)
2406 {
2407 return (strlen((const char *)a) - strlen((const char *)b));
2408 }
2409
2410
2411 //
2412 // This is the api LaunchServices uses to inform the kernel
2413 // the list of package extensions to ignore.
2414 //
2415 // Internally we keep the list sorted by the length of the
2416 // the extension (from longest to shortest). We sort the
2417 // list of extensions so that we can speed up our searches
2418 // when comparing file names -- we only compare extensions
2419 // that could possibly fit into the file name, not all of
2420 // them (i.e. a short 8 character name can't have an 8
2421 // character extension).
2422 //
2423 extern lck_mtx_t *pkg_extensions_lck;
2424
2425 __private_extern__ int
2426 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2427 {
2428 char *new_exts, *old_exts;
2429 int error;
2430
2431 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2432 return EINVAL;
2433 }
2434
2435
2436 // allocate one byte extra so we can guarantee null termination
2437 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2438 if (new_exts == NULL) {
2439 return ENOMEM;
2440 }
2441
2442 error = copyin(data, new_exts, nentries * maxwidth);
2443 if (error) {
2444 FREE(new_exts, M_TEMP);
2445 return error;
2446 }
2447
2448 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2449
2450 qsort(new_exts, nentries, maxwidth, extension_cmp);
2451
2452 lck_mtx_lock(pkg_extensions_lck);
2453
2454 old_exts = extension_table;
2455 extension_table = new_exts;
2456 nexts = nentries;
2457 max_ext_width = maxwidth;
2458
2459 lck_mtx_unlock(pkg_extensions_lck);
2460
2461 if (old_exts) {
2462 FREE(old_exts, M_TEMP);
2463 }
2464
2465 return 0;
2466 }
2467
2468
2469 __private_extern__ int
2470 is_package_name(const char *name, int len)
2471 {
2472 int i, extlen;
2473 const char *ptr, *name_ext;
2474
2475 if (len <= 3) {
2476 return 0;
2477 }
2478
2479 name_ext = NULL;
2480 for(ptr=name; *ptr != '\0'; ptr++) {
2481 if (*ptr == '.') {
2482 name_ext = ptr;
2483 }
2484 }
2485
2486 // if there is no "." extension, it can't match
2487 if (name_ext == NULL) {
2488 return 0;
2489 }
2490
2491 // advance over the "."
2492 name_ext++;
2493
2494 lck_mtx_lock(pkg_extensions_lck);
2495
2496 // now iterate over all the extensions to see if any match
2497 ptr = &extension_table[0];
2498 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2499 extlen = strlen(ptr);
2500 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2501 // aha, a match!
2502 lck_mtx_unlock(pkg_extensions_lck);
2503 return 1;
2504 }
2505 }
2506
2507 lck_mtx_unlock(pkg_extensions_lck);
2508
2509 // if we get here, no extension matched
2510 return 0;
2511 }
2512
2513 int
2514 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2515 {
2516 char *ptr, *end;
2517 int comp=0;
2518
2519 *component = -1;
2520 if (*path != '/') {
2521 return EINVAL;
2522 }
2523
2524 end = path + 1;
2525 while(end < path + pathlen && *end != '\0') {
2526 while(end < path + pathlen && *end == '/' && *end != '\0') {
2527 end++;
2528 }
2529
2530 ptr = end;
2531
2532 while(end < path + pathlen && *end != '/' && *end != '\0') {
2533 end++;
2534 }
2535
2536 if (end > path + pathlen) {
2537 // hmm, string wasn't null terminated
2538 return EINVAL;
2539 }
2540
2541 *end = '\0';
2542 if (is_package_name(ptr, end - ptr)) {
2543 *component = comp;
2544 break;
2545 }
2546
2547 end++;
2548 comp++;
2549 }
2550
2551 return 0;
2552 }
2553
2554 /*
2555 * Determine if a name is inappropriate for a searchfs query.
2556 * This list consists of /System currently.
2557 */
2558
2559 int vn_searchfs_inappropriate_name(const char *name, int len) {
2560 const char *bad_names[] = { "System" };
2561 int bad_len[] = { 6 };
2562 int i;
2563
2564 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2565 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2566 return 1;
2567 }
2568 }
2569
2570 // if we get here, no name matched
2571 return 0;
2572 }
2573
2574 /*
2575 * Top level filesystem related information gathering.
2576 */
2577 extern unsigned int vfs_nummntops;
2578
2579 int
2580 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2581 user_addr_t newp, size_t newlen, proc_t p)
2582 {
2583 struct vfstable *vfsp;
2584 int *username;
2585 u_int usernamelen;
2586 int error;
2587 struct vfsconf vfsc;
2588
2589 /* All non VFS_GENERIC and in VFS_GENERIC,
2590 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2591 * needs to have root priv to have modifiers.
2592 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2593 */
2594 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2595 ((name[1] == VFS_MAXTYPENUM) ||
2596 (name[1] == VFS_CONF) ||
2597 (name[1] == VFS_SET_PACKAGE_EXTS)))
2598 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2599 return(error);
2600 }
2601 /*
2602 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2603 * is a VFS generic variable. So now we must check
2604 * namelen so we don't end up covering any UFS
2605 * variables (sinc UFS vfc_typenum is 1).
2606 *
2607 * It should have been:
2608 * name[0]: VFS_GENERIC
2609 * name[1]: VFS_NUMMNTOPS
2610 */
2611 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2612 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2613 }
2614
2615 /* all sysctl names at this level are at least name and field */
2616 if (namelen < 2)
2617 return (EISDIR); /* overloaded */
2618 if (name[0] != VFS_GENERIC) {
2619
2620 mount_list_lock();
2621 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2622 if (vfsp->vfc_typenum == name[0]) {
2623 vfsp->vfc_refcount++;
2624 break;
2625 }
2626 mount_list_unlock();
2627
2628 if (vfsp == NULL)
2629 return (ENOTSUP);
2630
2631 /* XXX current context proxy for proc p? */
2632 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2633 oldp, oldlenp, newp, newlen,
2634 vfs_context_current()));
2635
2636 mount_list_lock();
2637 vfsp->vfc_refcount--;
2638 mount_list_unlock();
2639 return error;
2640 }
2641 switch (name[1]) {
2642 case VFS_MAXTYPENUM:
2643 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2644 case VFS_CONF:
2645 if (namelen < 3)
2646 return (ENOTDIR); /* overloaded */
2647
2648 mount_list_lock();
2649 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2650 if (vfsp->vfc_typenum == name[2])
2651 break;
2652
2653 if (vfsp == NULL) {
2654 mount_list_unlock();
2655 return (ENOTSUP);
2656 }
2657
2658 vfsc.vfc_reserved1 = 0;
2659 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2660 vfsc.vfc_typenum = vfsp->vfc_typenum;
2661 vfsc.vfc_refcount = vfsp->vfc_refcount;
2662 vfsc.vfc_flags = vfsp->vfc_flags;
2663 vfsc.vfc_reserved2 = 0;
2664 vfsc.vfc_reserved3 = 0;
2665
2666 mount_list_unlock();
2667 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2668 sizeof(struct vfsconf)));
2669
2670 case VFS_SET_PACKAGE_EXTS:
2671 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2672 }
2673 /*
2674 * We need to get back into the general MIB, so we need to re-prepend
2675 * CTL_VFS to our name and try userland_sysctl().
2676 */
2677 usernamelen = namelen + 1;
2678 MALLOC(username, int *, usernamelen * sizeof(*username),
2679 M_TEMP, M_WAITOK);
2680 bcopy(name, username + 1, namelen * sizeof(*name));
2681 username[0] = CTL_VFS;
2682 error = userland_sysctl(p, username, usernamelen, oldp,
2683 oldlenp, newp, newlen, oldlenp);
2684 FREE(username, M_TEMP);
2685 return (error);
2686 }
2687
2688 /*
2689 * Dump vnode list (via sysctl) - defunct
2690 * use "pstat" instead
2691 */
2692 /* ARGSUSED */
2693 int
2694 sysctl_vnode
2695 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2696 {
2697 return(EINVAL);
2698 }
2699
2700 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2701 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED,
2702 0, 0, sysctl_vnode, "S,", "");
2703
2704
2705 /*
2706 * Check to see if a filesystem is mounted on a block device.
2707 */
2708 int
2709 vfs_mountedon(struct vnode *vp)
2710 {
2711 struct vnode *vq;
2712 int error = 0;
2713
2714 SPECHASH_LOCK();
2715 if (vp->v_specflags & SI_MOUNTEDON) {
2716 error = EBUSY;
2717 goto out;
2718 }
2719 if (vp->v_specflags & SI_ALIASED) {
2720 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2721 if (vq->v_rdev != vp->v_rdev ||
2722 vq->v_type != vp->v_type)
2723 continue;
2724 if (vq->v_specflags & SI_MOUNTEDON) {
2725 error = EBUSY;
2726 break;
2727 }
2728 }
2729 }
2730 out:
2731 SPECHASH_UNLOCK();
2732 return (error);
2733 }
2734
2735 /*
2736 * Unmount all filesystems. The list is traversed in reverse order
2737 * of mounting to avoid dependencies.
2738 */
2739 __private_extern__ void
2740 vfs_unmountall(void)
2741 {
2742 struct mount *mp;
2743 int error;
2744
2745 /*
2746 * Since this only runs when rebooting, it is not interlocked.
2747 */
2748 mount_list_lock();
2749 while(!TAILQ_EMPTY(&mountlist)) {
2750 mp = TAILQ_LAST(&mountlist, mntlist);
2751 mount_list_unlock();
2752 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2753 if ((error != 0) && (error != EBUSY)) {
2754 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2755 printf("%d)\n", error);
2756 mount_list_lock();
2757 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2758 continue;
2759 } else if (error == EBUSY) {
2760 /* If EBUSY is returned, the unmount was already in progress */
2761 printf("unmount of %p failed (", mp);
2762 printf("BUSY)\n");
2763 }
2764 mount_list_lock();
2765 }
2766 mount_list_unlock();
2767 }
2768
2769
2770 /*
2771 * This routine is called from vnode_pager_deallocate out of the VM
2772 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2773 * on a vnode that has a UBCINFO
2774 */
2775 __private_extern__ void
2776 vnode_pager_vrele(vnode_t vp)
2777 {
2778 struct ubc_info *uip;
2779
2780 vnode_lock_spin(vp);
2781
2782 vp->v_lflag &= ~VNAMED_UBC;
2783
2784 uip = vp->v_ubcinfo;
2785 vp->v_ubcinfo = UBC_INFO_NULL;
2786
2787 vnode_unlock(vp);
2788
2789 ubc_info_deallocate(uip);
2790 }
2791
2792
2793 #include <sys/disk.h>
2794
2795 errno_t
2796 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2797 {
2798 int error;
2799 off_t readblockcnt = 0;
2800 off_t writeblockcnt = 0;
2801 off_t readmaxcnt = 0;
2802 off_t writemaxcnt = 0;
2803 off_t readsegcnt = 0;
2804 off_t writesegcnt = 0;
2805 off_t readsegsize = 0;
2806 off_t writesegsize = 0;
2807 off_t alignment = 0;
2808 off_t ioqueue_depth = 0;
2809 u_int32_t blksize;
2810 u_int64_t temp;
2811 u_int32_t features;
2812 vfs_context_t ctx = vfs_context_current();
2813
2814 int isvirtual = 0;
2815 /*
2816 * determine if this mount point exists on the same device as the root
2817 * partition... if so, then it comes under the hard throttle control
2818 */
2819 int thisunit = -1;
2820 static int rootunit = -1;
2821
2822 if (rootunit == -1) {
2823 if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx))
2824 rootunit = -1;
2825 else if (rootvp == devvp)
2826 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2827 }
2828 if (devvp != rootvp && rootunit != -1) {
2829 if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) {
2830 if (thisunit == rootunit)
2831 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2832 }
2833 }
2834 /*
2835 * force the spec device to re-cache
2836 * the underlying block size in case
2837 * the filesystem overrode the initial value
2838 */
2839 set_fsblocksize(devvp);
2840
2841
2842 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2843 (caddr_t)&blksize, 0, ctx)))
2844 return (error);
2845
2846 mp->mnt_devblocksize = blksize;
2847
2848 /*
2849 * set the maximum possible I/O size
2850 * this may get clipped to a smaller value
2851 * based on which constraints are being advertised
2852 * and if those advertised constraints result in a smaller
2853 * limit for a given I/O
2854 */
2855 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2856 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2857
2858 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2859 if (isvirtual)
2860 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2861 }
2862
2863 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2864 (caddr_t)&features, 0, ctx)))
2865 return (error);
2866
2867 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2868 (caddr_t)&readblockcnt, 0, ctx)))
2869 return (error);
2870
2871 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2872 (caddr_t)&writeblockcnt, 0, ctx)))
2873 return (error);
2874
2875 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2876 (caddr_t)&readmaxcnt, 0, ctx)))
2877 return (error);
2878
2879 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2880 (caddr_t)&writemaxcnt, 0, ctx)))
2881 return (error);
2882
2883 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2884 (caddr_t)&readsegcnt, 0, ctx)))
2885 return (error);
2886
2887 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2888 (caddr_t)&writesegcnt, 0, ctx)))
2889 return (error);
2890
2891 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2892 (caddr_t)&readsegsize, 0, ctx)))
2893 return (error);
2894
2895 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2896 (caddr_t)&writesegsize, 0, ctx)))
2897 return (error);
2898
2899 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2900 (caddr_t)&alignment, 0, ctx)))
2901 return (error);
2902
2903 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2904 (caddr_t)&ioqueue_depth, 0, ctx)))
2905 return (error);
2906
2907 if (readmaxcnt)
2908 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2909
2910 if (readblockcnt) {
2911 temp = readblockcnt * blksize;
2912 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2913
2914 if (temp < mp->mnt_maxreadcnt)
2915 mp->mnt_maxreadcnt = (u_int32_t)temp;
2916 }
2917
2918 if (writemaxcnt)
2919 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2920
2921 if (writeblockcnt) {
2922 temp = writeblockcnt * blksize;
2923 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2924
2925 if (temp < mp->mnt_maxwritecnt)
2926 mp->mnt_maxwritecnt = (u_int32_t)temp;
2927 }
2928
2929 if (readsegcnt) {
2930 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2931 } else {
2932 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2933
2934 if (temp > UINT16_MAX)
2935 temp = UINT16_MAX;
2936 }
2937 mp->mnt_segreadcnt = (u_int16_t)temp;
2938
2939 if (writesegcnt) {
2940 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
2941 } else {
2942 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
2943
2944 if (temp > UINT16_MAX)
2945 temp = UINT16_MAX;
2946 }
2947 mp->mnt_segwritecnt = (u_int16_t)temp;
2948
2949 if (readsegsize)
2950 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
2951 else
2952 temp = mp->mnt_maxreadcnt;
2953 mp->mnt_maxsegreadsize = (u_int32_t)temp;
2954
2955 if (writesegsize)
2956 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
2957 else
2958 temp = mp->mnt_maxwritecnt;
2959 mp->mnt_maxsegwritesize = (u_int32_t)temp;
2960
2961 if (alignment)
2962 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
2963 else
2964 temp = 0;
2965 mp->mnt_alignmentmask = temp;
2966
2967
2968 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
2969 temp = ioqueue_depth;
2970 else
2971 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
2972
2973 mp->mnt_ioqueue_depth = temp;
2974 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
2975
2976 if (mp->mnt_ioscale > 1)
2977 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
2978
2979 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
2980 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
2981
2982 return (error);
2983 }
2984
2985 static struct klist fs_klist;
2986 lck_grp_t *fs_klist_lck_grp;
2987 lck_mtx_t *fs_klist_lock;
2988
2989 void
2990 vfs_event_init(void)
2991 {
2992
2993 klist_init(&fs_klist);
2994 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
2995 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
2996 }
2997
2998 void
2999 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3000 {
3001 lck_mtx_lock(fs_klist_lock);
3002 KNOTE(&fs_klist, event);
3003 lck_mtx_unlock(fs_klist_lock);
3004 }
3005
3006 /*
3007 * return the number of mounted filesystems.
3008 */
3009 static int
3010 sysctl_vfs_getvfscnt(void)
3011 {
3012 return(mount_getvfscnt());
3013 }
3014
3015
3016 static int
3017 mount_getvfscnt(void)
3018 {
3019 int ret;
3020
3021 mount_list_lock();
3022 ret = nummounts;
3023 mount_list_unlock();
3024 return (ret);
3025
3026 }
3027
3028
3029
3030 static int
3031 mount_fillfsids(fsid_t *fsidlst, int count)
3032 {
3033 struct mount *mp;
3034 int actual=0;
3035
3036 actual = 0;
3037 mount_list_lock();
3038 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3039 if (actual <= count) {
3040 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3041 actual++;
3042 }
3043 }
3044 mount_list_unlock();
3045 return (actual);
3046
3047 }
3048
3049 /*
3050 * fill in the array of fsid_t's up to a max of 'count', the actual
3051 * number filled in will be set in '*actual'. If there are more fsid_t's
3052 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3053 * have the actual count.
3054 * having *actual filled out even in the error case is depended upon.
3055 */
3056 static int
3057 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3058 {
3059 struct mount *mp;
3060
3061 *actual = 0;
3062 mount_list_lock();
3063 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3064 (*actual)++;
3065 if (*actual <= count)
3066 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3067 }
3068 mount_list_unlock();
3069 return (*actual <= count ? 0 : ENOMEM);
3070 }
3071
3072 static int
3073 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3074 __unused int arg2, struct sysctl_req *req)
3075 {
3076 int actual, error;
3077 size_t space;
3078 fsid_t *fsidlst;
3079
3080 /* This is a readonly node. */
3081 if (req->newptr != USER_ADDR_NULL)
3082 return (EPERM);
3083
3084 /* they are querying us so just return the space required. */
3085 if (req->oldptr == USER_ADDR_NULL) {
3086 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3087 return 0;
3088 }
3089 again:
3090 /*
3091 * Retrieve an accurate count of the amount of space required to copy
3092 * out all the fsids in the system.
3093 */
3094 space = req->oldlen;
3095 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3096
3097 /* they didn't give us enough space. */
3098 if (space < req->oldlen)
3099 return (ENOMEM);
3100
3101 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3102 if (fsidlst == NULL) {
3103 return (ENOMEM);
3104 }
3105
3106 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3107 &actual);
3108 /*
3109 * If we get back ENOMEM, then another mount has been added while we
3110 * slept in malloc above. If this is the case then try again.
3111 */
3112 if (error == ENOMEM) {
3113 FREE(fsidlst, M_TEMP);
3114 req->oldlen = space;
3115 goto again;
3116 }
3117 if (error == 0) {
3118 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3119 }
3120 FREE(fsidlst, M_TEMP);
3121 return (error);
3122 }
3123
3124 /*
3125 * Do a sysctl by fsid.
3126 */
3127 static int
3128 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3129 struct sysctl_req *req)
3130 {
3131 union union_vfsidctl vc;
3132 struct mount *mp;
3133 struct vfsstatfs *sp;
3134 int *name, flags, namelen;
3135 int error=0, gotref=0;
3136 vfs_context_t ctx = vfs_context_current();
3137 proc_t p = req->p; /* XXX req->p != current_proc()? */
3138 boolean_t is_64_bit;
3139
3140 name = arg1;
3141 namelen = arg2;
3142 is_64_bit = proc_is64bit(p);
3143
3144 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3145 if (error)
3146 goto out;
3147 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3148 error = EINVAL;
3149 goto out;
3150 }
3151 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3152 if (mp == NULL) {
3153 error = ENOENT;
3154 goto out;
3155 }
3156 gotref = 1;
3157 /* reset so that the fs specific code can fetch it. */
3158 req->newidx = 0;
3159 /*
3160 * Note if this is a VFS_CTL then we pass the actual sysctl req
3161 * in for "oldp" so that the lower layer can DTRT and use the
3162 * SYSCTL_IN/OUT routines.
3163 */
3164 if (mp->mnt_op->vfs_sysctl != NULL) {
3165 if (is_64_bit) {
3166 if (vfs_64bitready(mp)) {
3167 error = mp->mnt_op->vfs_sysctl(name, namelen,
3168 CAST_USER_ADDR_T(req),
3169 NULL, USER_ADDR_NULL, 0,
3170 ctx);
3171 }
3172 else {
3173 error = ENOTSUP;
3174 }
3175 }
3176 else {
3177 error = mp->mnt_op->vfs_sysctl(name, namelen,
3178 CAST_USER_ADDR_T(req),
3179 NULL, USER_ADDR_NULL, 0,
3180 ctx);
3181 }
3182 if (error != ENOTSUP) {
3183 goto out;
3184 }
3185 }
3186 switch (name[0]) {
3187 case VFS_CTL_UMOUNT:
3188 req->newidx = 0;
3189 if (is_64_bit) {
3190 req->newptr = vc.vc64.vc_ptr;
3191 req->newlen = (size_t)vc.vc64.vc_len;
3192 }
3193 else {
3194 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3195 req->newlen = vc.vc32.vc_len;
3196 }
3197 error = SYSCTL_IN(req, &flags, sizeof(flags));
3198 if (error)
3199 break;
3200
3201 mount_ref(mp, 0);
3202 mount_iterdrop(mp);
3203 gotref = 0;
3204 /* safedounmount consumes a ref */
3205 error = safedounmount(mp, flags, ctx);
3206 break;
3207 case VFS_CTL_STATFS:
3208 req->newidx = 0;
3209 if (is_64_bit) {
3210 req->newptr = vc.vc64.vc_ptr;
3211 req->newlen = (size_t)vc.vc64.vc_len;
3212 }
3213 else {
3214 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3215 req->newlen = vc.vc32.vc_len;
3216 }
3217 error = SYSCTL_IN(req, &flags, sizeof(flags));
3218 if (error)
3219 break;
3220 sp = &mp->mnt_vfsstat;
3221 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3222 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3223 goto out;
3224 if (is_64_bit) {
3225 struct user64_statfs sfs;
3226 bzero(&sfs, sizeof(sfs));
3227 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3228 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3229 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3230 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3231 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3232 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3233 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3234 sfs.f_files = (user64_long_t)sp->f_files;
3235 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3236 sfs.f_fsid = sp->f_fsid;
3237 sfs.f_owner = sp->f_owner;
3238
3239 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3240 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3241 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3242
3243 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3244 }
3245 else {
3246 struct user32_statfs sfs;
3247 bzero(&sfs, sizeof(sfs));
3248 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3249 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3250
3251 /*
3252 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3253 * have to fudge the numbers here in that case. We inflate the blocksize in order
3254 * to reflect the filesystem size as best we can.
3255 */
3256 if (sp->f_blocks > INT_MAX) {
3257 int shift;
3258
3259 /*
3260 * Work out how far we have to shift the block count down to make it fit.
3261 * Note that it's possible to have to shift so far that the resulting
3262 * blocksize would be unreportably large. At that point, we will clip
3263 * any values that don't fit.
3264 *
3265 * For safety's sake, we also ensure that f_iosize is never reported as
3266 * being smaller than f_bsize.
3267 */
3268 for (shift = 0; shift < 32; shift++) {
3269 if ((sp->f_blocks >> shift) <= INT_MAX)
3270 break;
3271 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3272 break;
3273 }
3274 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3275 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3276 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3277 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3278 #undef __SHIFT_OR_CLIP
3279 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3280 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3281 } else {
3282 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3283 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3284 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3285 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3286 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3287 }
3288 sfs.f_files = (user32_long_t)sp->f_files;
3289 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3290 sfs.f_fsid = sp->f_fsid;
3291 sfs.f_owner = sp->f_owner;
3292
3293 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3294 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3295 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3296
3297 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3298 }
3299 break;
3300 default:
3301 error = ENOTSUP;
3302 goto out;
3303 }
3304 out:
3305 if(gotref != 0)
3306 mount_iterdrop(mp);
3307 return (error);
3308 }
3309
3310 static int filt_fsattach(struct knote *kn);
3311 static void filt_fsdetach(struct knote *kn);
3312 static int filt_fsevent(struct knote *kn, long hint);
3313 struct filterops fs_filtops = {
3314 .f_attach = filt_fsattach,
3315 .f_detach = filt_fsdetach,
3316 .f_event = filt_fsevent,
3317 };
3318
3319 static int
3320 filt_fsattach(struct knote *kn)
3321 {
3322
3323 lck_mtx_lock(fs_klist_lock);
3324 kn->kn_flags |= EV_CLEAR;
3325 KNOTE_ATTACH(&fs_klist, kn);
3326 lck_mtx_unlock(fs_klist_lock);
3327 return (0);
3328 }
3329
3330 static void
3331 filt_fsdetach(struct knote *kn)
3332 {
3333 lck_mtx_lock(fs_klist_lock);
3334 KNOTE_DETACH(&fs_klist, kn);
3335 lck_mtx_unlock(fs_klist_lock);
3336 }
3337
3338 static int
3339 filt_fsevent(struct knote *kn, long hint)
3340 {
3341 /*
3342 * Backwards compatibility:
3343 * Other filters would do nothing if kn->kn_sfflags == 0
3344 */
3345
3346 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3347 kn->kn_fflags |= hint;
3348 }
3349
3350 return (kn->kn_fflags != 0);
3351 }
3352
3353 static int
3354 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3355 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3356 {
3357 int out, error;
3358 pid_t pid;
3359 proc_t p;
3360
3361 /* We need a pid. */
3362 if (req->newptr == USER_ADDR_NULL)
3363 return (EINVAL);
3364
3365 error = SYSCTL_IN(req, &pid, sizeof(pid));
3366 if (error)
3367 return (error);
3368
3369 p = proc_find(pid < 0 ? -pid : pid);
3370 if (p == NULL)
3371 return (ESRCH);
3372
3373 /*
3374 * Fetching the value is ok, but we only fetch if the old
3375 * pointer is given.
3376 */
3377 if (req->oldptr != USER_ADDR_NULL) {
3378 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3379 proc_rele(p);
3380 error = SYSCTL_OUT(req, &out, sizeof(out));
3381 return (error);
3382 }
3383
3384 /* cansignal offers us enough security. */
3385 if (p != req->p && proc_suser(req->p) != 0) {
3386 proc_rele(p);
3387 return (EPERM);
3388 }
3389
3390 if (pid < 0)
3391 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3392 else
3393 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3394 proc_rele(p);
3395
3396 return (0);
3397 }
3398
3399 /* the vfs.generic. branch. */
3400 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3401 /* retreive a list of mounted filesystem fsid_t */
3402 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD,
3403 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3404 /* perform operations on filesystem via fsid_t */
3405 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED,
3406 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3407 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY,
3408 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3409
3410
3411 long num_reusedvnodes = 0;
3412
3413 static int
3414 new_vnode(vnode_t *vpp)
3415 {
3416 vnode_t vp;
3417 int retries = 0; /* retry incase of tablefull */
3418 int force_alloc = 0, walk_count = 0;
3419 unsigned int vpid;
3420 struct timespec ts;
3421 struct timeval current_tv;
3422 #ifndef __LP64__
3423 struct unsafe_fsnode *l_unsafefs = 0;
3424 #endif /* __LP64__ */
3425 proc_t curproc = current_proc();
3426
3427 retry:
3428 microuptime(&current_tv);
3429
3430 vp = NULLVP;
3431
3432 vnode_list_lock();
3433
3434 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3435 /*
3436 * Can always reuse a dead one
3437 */
3438 vp = TAILQ_FIRST(&vnode_dead_list);
3439 goto steal_this_vp;
3440 }
3441 /*
3442 * no dead vnodes available... if we're under
3443 * the limit, we'll create a new vnode
3444 */
3445 if (numvnodes < desiredvnodes || force_alloc) {
3446 numvnodes++;
3447 vnode_list_unlock();
3448
3449 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3450 bzero((char *)vp, sizeof(*vp));
3451 VLISTNONE(vp); /* avoid double queue removal */
3452 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3453
3454 klist_init(&vp->v_knotes);
3455 nanouptime(&ts);
3456 vp->v_id = ts.tv_nsec;
3457 vp->v_flag = VSTANDARD;
3458
3459 #if CONFIG_MACF
3460 if (mac_vnode_label_init_needed(vp))
3461 mac_vnode_label_init(vp);
3462 #endif /* MAC */
3463
3464 vp->v_iocount = 1;
3465 goto done;
3466 }
3467
3468 #define MAX_WALK_COUNT 1000
3469
3470 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3471 (ragevnodes >= rage_limit ||
3472 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3473
3474 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3475 if ( !(vp->v_listflag & VLIST_RAGE))
3476 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3477
3478 // if we're a dependency-capable process, skip vnodes that can
3479 // cause recycling deadlocks. (i.e. this process is diskimages
3480 // helper and the vnode is in a disk image).
3481 //
3482 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3483 break;
3484 }
3485
3486 // don't iterate more than MAX_WALK_COUNT vnodes to
3487 // avoid keeping the vnode list lock held for too long.
3488 if (walk_count++ > MAX_WALK_COUNT) {
3489 vp = NULL;
3490 break;
3491 }
3492 }
3493
3494 }
3495
3496 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3497 /*
3498 * Pick the first vp for possible reuse
3499 */
3500 walk_count = 0;
3501 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3502 // if we're a dependency-capable process, skip vnodes that can
3503 // cause recycling deadlocks. (i.e. this process is diskimages
3504 // helper and the vnode is in a disk image)
3505 //
3506 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3507 break;
3508 }
3509
3510 // don't iterate more than MAX_WALK_COUNT vnodes to
3511 // avoid keeping the vnode list lock held for too long.
3512 if (walk_count++ > MAX_WALK_COUNT) {
3513 vp = NULL;
3514 break;
3515 }
3516 }
3517
3518 }
3519
3520 //
3521 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3522 // then we're trying to create a vnode on behalf of a
3523 // process like diskimages-helper that has file systems
3524 // mounted on top of itself (and thus we can't reclaim
3525 // vnodes in the file systems on top of us). if we can't
3526 // find a vnode to reclaim then we'll just have to force
3527 // the allocation.
3528 //
3529 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3530 force_alloc = 1;
3531 vnode_list_unlock();
3532 goto retry;
3533 }
3534
3535 if (vp == NULL) {
3536 /*
3537 * we've reached the system imposed maximum number of vnodes
3538 * but there isn't a single one available
3539 * wait a bit and then retry... if we can't get a vnode
3540 * after 100 retries, than log a complaint
3541 */
3542 if (++retries <= 100) {
3543 vnode_list_unlock();
3544 delay_for_interval(1, 1000 * 1000);
3545 goto retry;
3546 }
3547
3548 vnode_list_unlock();
3549 tablefull("vnode");
3550 log(LOG_EMERG, "%d desired, %d numvnodes, "
3551 "%d free, %d dead, %d rage\n",
3552 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3553 #if CONFIG_EMBEDDED
3554 /*
3555 * Running out of vnodes tends to make a system unusable. On an
3556 * embedded system, it's unlikely that the user can do anything
3557 * about it (or would know what to do, if they could). So panic
3558 * the system so it will automatically restart (and hopefully we
3559 * can get a panic log that tells us why we ran out).
3560 */
3561 panic("vnode table is full\n");
3562 #endif
3563 *vpp = NULL;
3564 return (ENFILE);
3565 }
3566 steal_this_vp:
3567 vpid = vp->v_id;
3568
3569 vnode_list_remove_locked(vp);
3570
3571 vnode_list_unlock();
3572
3573 vnode_lock_spin(vp);
3574
3575 /*
3576 * We could wait for the vnode_lock after removing the vp from the freelist
3577 * and the vid is bumped only at the very end of reclaim. So it is possible
3578 * that we are looking at a vnode that is being terminated. If so skip it.
3579 */
3580 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3581 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3582 /*
3583 * we lost the race between dropping the list lock
3584 * and picking up the vnode_lock... someone else
3585 * used this vnode and it is now in a new state
3586 * so we need to go back and try again
3587 */
3588 vnode_unlock(vp);
3589 goto retry;
3590 }
3591 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3592 /*
3593 * we did a vnode_rele_ext that asked for
3594 * us not to reenter the filesystem during
3595 * the release even though VL_NEEDINACTIVE was
3596 * set... we'll do it here by doing a
3597 * vnode_get/vnode_put
3598 *
3599 * pick up an iocount so that we can call
3600 * vnode_put and drive the VNOP_INACTIVE...
3601 * vnode_put will either leave us off
3602 * the freelist if a new ref comes in,
3603 * or put us back on the end of the freelist
3604 * or recycle us if we were marked for termination...
3605 * so we'll just go grab a new candidate
3606 */
3607 vp->v_iocount++;
3608 #ifdef JOE_DEBUG
3609 record_vp(vp, 1);
3610 #endif
3611 vnode_put_locked(vp);
3612 vnode_unlock(vp);
3613 goto retry;
3614 }
3615 OSAddAtomicLong(1, &num_reusedvnodes);
3616
3617 /* Checks for anyone racing us for recycle */
3618 if (vp->v_type != VBAD) {
3619 if (vp->v_lflag & VL_DEAD)
3620 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3621 vnode_lock_convert(vp);
3622 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3623
3624 if ((VONLIST(vp)))
3625 panic("new_vnode(%p): vp on list", vp);
3626 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3627 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3628 panic("new_vnode(%p): free vnode still referenced", vp);
3629 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3630 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3631 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3632 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3633 }
3634
3635 #ifndef __LP64__
3636 if (vp->v_unsafefs) {
3637 l_unsafefs = vp->v_unsafefs;
3638 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3639 }
3640 #endif /* __LP64__ */
3641
3642 #if CONFIG_MACF
3643 /*
3644 * We should never see VL_LABELWAIT or VL_LABEL here.
3645 * as those operations hold a reference.
3646 */
3647 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3648 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3649 if (vp->v_lflag & VL_LABELED) {
3650 vnode_lock_convert(vp);
3651 mac_vnode_label_recycle(vp);
3652 } else if (mac_vnode_label_init_needed(vp)) {
3653 vnode_lock_convert(vp);
3654 mac_vnode_label_init(vp);
3655 }
3656
3657 #endif /* MAC */
3658
3659 vp->v_iocount = 1;
3660 vp->v_lflag = 0;
3661 vp->v_writecount = 0;
3662 vp->v_references = 0;
3663 vp->v_iterblkflags = 0;
3664 vp->v_flag = VSTANDARD;
3665 /* vbad vnodes can point to dead_mountp */
3666 vp->v_mount = NULL;
3667 vp->v_defer_reclaimlist = (vnode_t)0;
3668
3669 vnode_unlock(vp);
3670
3671 #ifndef __LP64__
3672 if (l_unsafefs) {
3673 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3674 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3675 }
3676 #endif /* __LP64__ */
3677
3678 done:
3679 *vpp = vp;
3680
3681 return (0);
3682 }
3683
3684 void
3685 vnode_lock(vnode_t vp)
3686 {
3687 lck_mtx_lock(&vp->v_lock);
3688 }
3689
3690 void
3691 vnode_lock_spin(vnode_t vp)
3692 {
3693 lck_mtx_lock_spin(&vp->v_lock);
3694 }
3695
3696 void
3697 vnode_unlock(vnode_t vp)
3698 {
3699 lck_mtx_unlock(&vp->v_lock);
3700 }
3701
3702
3703
3704 int
3705 vnode_get(struct vnode *vp)
3706 {
3707 int retval;
3708
3709 vnode_lock_spin(vp);
3710 retval = vnode_get_locked(vp);
3711 vnode_unlock(vp);
3712
3713 return(retval);
3714 }
3715
3716 int
3717 vnode_get_locked(struct vnode *vp)
3718 {
3719 #if DIAGNOSTIC
3720 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3721 #endif
3722 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3723 return(ENOENT);
3724 }
3725 vp->v_iocount++;
3726 #ifdef JOE_DEBUG
3727 record_vp(vp, 1);
3728 #endif
3729 return (0);
3730 }
3731
3732 int
3733 vnode_getwithvid(vnode_t vp, uint32_t vid)
3734 {
3735 return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
3736 }
3737
3738 int
3739 vnode_getwithref(vnode_t vp)
3740 {
3741 return(vget_internal(vp, 0, 0));
3742 }
3743
3744
3745 __private_extern__ int
3746 vnode_getalways(vnode_t vp)
3747 {
3748 return(vget_internal(vp, 0, VNODE_ALWAYS));
3749 }
3750
3751 int
3752 vnode_put(vnode_t vp)
3753 {
3754 int retval;
3755
3756 vnode_lock_spin(vp);
3757 retval = vnode_put_locked(vp);
3758 vnode_unlock(vp);
3759
3760 return(retval);
3761 }
3762
3763 int
3764 vnode_put_locked(vnode_t vp)
3765 {
3766 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3767
3768 #if DIAGNOSTIC
3769 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3770 #endif
3771 retry:
3772 if (vp->v_iocount < 1)
3773 panic("vnode_put(%p): iocount < 1", vp);
3774
3775 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3776 vnode_dropiocount(vp);
3777 return(0);
3778 }
3779 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3780
3781 vp->v_lflag &= ~VL_NEEDINACTIVE;
3782 vnode_unlock(vp);
3783
3784 VNOP_INACTIVE(vp, ctx);
3785
3786 vnode_lock_spin(vp);
3787 /*
3788 * because we had to drop the vnode lock before calling
3789 * VNOP_INACTIVE, the state of this vnode may have changed...
3790 * we may pick up both VL_MARTERM and either
3791 * an iocount or a usecount while in the VNOP_INACTIVE call
3792 * we don't want to call vnode_reclaim_internal on a vnode
3793 * that has active references on it... so loop back around
3794 * and reevaluate the state
3795 */
3796 goto retry;
3797 }
3798 vp->v_lflag &= ~VL_NEEDINACTIVE;
3799
3800 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3801 vnode_lock_convert(vp);
3802 vnode_reclaim_internal(vp, 1, 1, 0);
3803 }
3804 vnode_dropiocount(vp);
3805 vnode_list_add(vp);
3806
3807 return(0);
3808 }
3809
3810 /* is vnode_t in use by others? */
3811 int
3812 vnode_isinuse(vnode_t vp, int refcnt)
3813 {
3814 return(vnode_isinuse_locked(vp, refcnt, 0));
3815 }
3816
3817
3818 static int
3819 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3820 {
3821 int retval = 0;
3822
3823 if (!locked)
3824 vnode_lock_spin(vp);
3825 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3826 retval = 1;
3827 goto out;
3828 }
3829 if (vp->v_type == VREG) {
3830 retval = ubc_isinuse_locked(vp, refcnt, 1);
3831 }
3832
3833 out:
3834 if (!locked)
3835 vnode_unlock(vp);
3836 return(retval);
3837 }
3838
3839
3840 /* resume vnode_t */
3841 errno_t
3842 vnode_resume(vnode_t vp)
3843 {
3844 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3845
3846 vnode_lock_spin(vp);
3847 vp->v_lflag &= ~VL_SUSPENDED;
3848 vp->v_owner = NULL;
3849 vnode_unlock(vp);
3850
3851 wakeup(&vp->v_iocount);
3852 }
3853 return(0);
3854 }
3855
3856 /* suspend vnode_t
3857 * Please do not use on more than one vnode at a time as it may
3858 * cause deadlocks.
3859 * xxx should we explicity prevent this from happening?
3860 */
3861
3862 errno_t
3863 vnode_suspend(vnode_t vp)
3864 {
3865 if (vp->v_lflag & VL_SUSPENDED) {
3866 return(EBUSY);
3867 }
3868
3869 vnode_lock_spin(vp);
3870
3871 /*
3872 * xxx is this sufficient to check if a vnode_drain is
3873 * progress?
3874 */
3875
3876 if (vp->v_owner == NULL) {
3877 vp->v_lflag |= VL_SUSPENDED;
3878 vp->v_owner = current_thread();
3879 }
3880 vnode_unlock(vp);
3881
3882 return(0);
3883 }
3884
3885
3886
3887 static errno_t
3888 vnode_drain(vnode_t vp)
3889 {
3890
3891 if (vp->v_lflag & VL_DRAIN) {
3892 panic("vnode_drain: recursuve drain");
3893 return(ENOENT);
3894 }
3895 vp->v_lflag |= VL_DRAIN;
3896 vp->v_owner = current_thread();
3897
3898 while (vp->v_iocount > 1)
3899 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
3900 return(0);
3901 }
3902
3903
3904 /*
3905 * if the number of recent references via vnode_getwithvid or vnode_getwithref
3906 * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from
3907 * the LRU list if it's currently on it... once the iocount and usecount both drop
3908 * to 0, it will get put back on the end of the list, effectively making it younger
3909 * this allows us to keep actively referenced vnodes in the list without having
3910 * to constantly remove and add to the list each time a vnode w/o a usecount is
3911 * referenced which costs us taking and dropping a global lock twice.
3912 */
3913 #define UNAGE_THRESHHOLD 25
3914
3915 static errno_t
3916 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
3917 {
3918 int nodead = vflags & VNODE_NODEAD;
3919 int nosusp = vflags & VNODE_NOSUSPEND;
3920 int always = vflags & VNODE_ALWAYS;
3921
3922 for (;;) {
3923 /*
3924 * if it is a dead vnode with deadfs
3925 */
3926 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
3927 return(ENOENT);
3928 }
3929 /*
3930 * will return VL_DEAD ones
3931 */
3932 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
3933 break;
3934 }
3935 /*
3936 * if suspended vnodes are to be failed
3937 */
3938 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
3939 return(ENOENT);
3940 }
3941 /*
3942 * if you are the owner of drain/suspend/termination , can acquire iocount
3943 * check for VL_TERMINATE; it does not set owner
3944 */
3945 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
3946 (vp->v_owner == current_thread())) {
3947 break;
3948 }
3949
3950 if (always != 0)
3951 break;
3952 vnode_lock_convert(vp);
3953
3954 if (vp->v_lflag & VL_TERMINATE) {
3955 vp->v_lflag |= VL_TERMWANT;
3956
3957 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
3958 } else
3959 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
3960 }
3961 if (vid != vp->v_id) {
3962 return(ENOENT);
3963 }
3964 if (++vp->v_references >= UNAGE_THRESHHOLD) {
3965 vp->v_references = 0;
3966 vnode_list_remove(vp);
3967 }
3968 vp->v_iocount++;
3969 #ifdef JOE_DEBUG
3970 record_vp(vp, 1);
3971 #endif
3972 return(0);
3973 }
3974
3975 static void
3976 vnode_dropiocount (vnode_t vp)
3977 {
3978 if (vp->v_iocount < 1)
3979 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
3980
3981 vp->v_iocount--;
3982 #ifdef JOE_DEBUG
3983 record_vp(vp, -1);
3984 #endif
3985 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
3986 wakeup(&vp->v_iocount);
3987 }
3988
3989
3990 void
3991 vnode_reclaim(struct vnode * vp)
3992 {
3993 vnode_reclaim_internal(vp, 0, 0, 0);
3994 }
3995
3996 __private_extern__
3997 void
3998 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
3999 {
4000 int isfifo = 0;
4001
4002 if (!locked)
4003 vnode_lock(vp);
4004
4005 if (vp->v_lflag & VL_TERMINATE) {
4006 panic("vnode reclaim in progress");
4007 }
4008 vp->v_lflag |= VL_TERMINATE;
4009
4010 vn_clearunionwait(vp, 1);
4011
4012 vnode_drain(vp);
4013
4014 isfifo = (vp->v_type == VFIFO);
4015
4016 if (vp->v_type != VBAD)
4017 vgone(vp, flags); /* clean and reclaim the vnode */
4018
4019 /*
4020 * give the vnode a new identity so that vnode_getwithvid will fail
4021 * on any stale cache accesses...
4022 * grab the list_lock so that if we're in "new_vnode"
4023 * behind the list_lock trying to steal this vnode, the v_id is stable...
4024 * once new_vnode drops the list_lock, it will block trying to take
4025 * the vnode lock until we release it... at that point it will evaluate
4026 * whether the v_vid has changed
4027 * also need to make sure that the vnode isn't on a list where "new_vnode"
4028 * can find it after the v_id has been bumped until we are completely done
4029 * with the vnode (i.e. putting it back on a list has to be the very last
4030 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4031 * are holding an io_count on the vnode... they need to drop the io_count
4032 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4033 * they are completely done with the vnode
4034 */
4035 vnode_list_lock();
4036
4037 vnode_list_remove_locked(vp);
4038 vp->v_id++;
4039
4040 vnode_list_unlock();
4041
4042 if (isfifo) {
4043 struct fifoinfo * fip;
4044
4045 fip = vp->v_fifoinfo;
4046 vp->v_fifoinfo = NULL;
4047 FREE(fip, M_TEMP);
4048 }
4049 vp->v_type = VBAD;
4050
4051 if (vp->v_data)
4052 panic("vnode_reclaim_internal: cleaned vnode isn't");
4053 if (vp->v_numoutput)
4054 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4055 if (UBCINFOEXISTS(vp))
4056 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4057 if (vp->v_parent)
4058 panic("vnode_reclaim_internal: vparent not removed");
4059 if (vp->v_name)
4060 panic("vnode_reclaim_internal: vname not removed");
4061
4062 vp->v_socket = NULL;
4063
4064 vp->v_lflag &= ~VL_TERMINATE;
4065 vp->v_lflag &= ~VL_DRAIN;
4066 vp->v_owner = NULL;
4067
4068 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4069
4070 /* Make sure that when we reuse the vnode, no knotes left over */
4071 klist_init(&vp->v_knotes);
4072
4073 if (vp->v_lflag & VL_TERMWANT) {
4074 vp->v_lflag &= ~VL_TERMWANT;
4075 wakeup(&vp->v_lflag);
4076 }
4077 if (!reuse) {
4078 /*
4079 * make sure we get on the
4080 * dead list if appropriate
4081 */
4082 vnode_list_add(vp);
4083 }
4084 if (!locked)
4085 vnode_unlock(vp);
4086 }
4087
4088 /* USAGE:
4089 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4090 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4091 * is obsoleted by this.
4092 * vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
4093 */
4094 int
4095 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4096 {
4097 int error;
4098 int insert = 1;
4099 vnode_t vp;
4100 vnode_t nvp;
4101 vnode_t dvp;
4102 struct uthread *ut;
4103 struct componentname *cnp;
4104 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4105
4106 if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) {
4107 if ( (error = new_vnode(&vp)) ) {
4108 return(error);
4109 } else {
4110 dvp = param->vnfs_dvp;
4111 cnp = param->vnfs_cnp;
4112
4113 vp->v_op = param->vnfs_vops;
4114 vp->v_type = param->vnfs_vtype;
4115 vp->v_data = param->vnfs_fsnode;
4116
4117 if (param->vnfs_markroot)
4118 vp->v_flag |= VROOT;
4119 if (param->vnfs_marksystem)
4120 vp->v_flag |= VSYSTEM;
4121 if (vp->v_type == VREG) {
4122 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4123 if (error) {
4124 #ifdef JOE_DEBUG
4125 record_vp(vp, 1);
4126 #endif
4127 vp->v_mount = NULL;
4128 vp->v_op = dead_vnodeop_p;
4129 vp->v_tag = VT_NON;
4130 vp->v_data = NULL;
4131 vp->v_type = VBAD;
4132 vp->v_lflag |= VL_DEAD;
4133
4134 vnode_put(vp);
4135 return(error);
4136 }
4137 }
4138 #ifdef JOE_DEBUG
4139 record_vp(vp, 1);
4140 #endif
4141 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4142
4143 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4144
4145 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4146 /*
4147 * if checkalias returns a vnode, it will be locked
4148 *
4149 * first get rid of the unneeded vnode we acquired
4150 */
4151 vp->v_data = NULL;
4152 vp->v_op = spec_vnodeop_p;
4153 vp->v_type = VBAD;
4154 vp->v_lflag = VL_DEAD;
4155 vp->v_data = NULL;
4156 vp->v_tag = VT_NON;
4157 vnode_put(vp);
4158
4159 /*
4160 * switch to aliased vnode and finish
4161 * preparing it
4162 */
4163 vp = nvp;
4164
4165 vclean(vp, 0);
4166 vp->v_op = param->vnfs_vops;
4167 vp->v_type = param->vnfs_vtype;
4168 vp->v_data = param->vnfs_fsnode;
4169 vp->v_lflag = 0;
4170 vp->v_mount = NULL;
4171 insmntque(vp, param->vnfs_mp);
4172 insert = 0;
4173 vnode_unlock(vp);
4174 }
4175 }
4176
4177 if (vp->v_type == VFIFO) {
4178 struct fifoinfo *fip;
4179
4180 MALLOC(fip, struct fifoinfo *,
4181 sizeof(*fip), M_TEMP, M_WAITOK);
4182 bzero(fip, sizeof(struct fifoinfo ));
4183 vp->v_fifoinfo = fip;
4184 }
4185 /* The file systems must pass the address of the location where
4186 * they store the vnode pointer. When we add the vnode into the mount
4187 * list and name cache they become discoverable. So the file system node
4188 * must have the connection to vnode setup by then
4189 */
4190 *vpp = vp;
4191
4192 /* Add fs named reference. */
4193 if (param->vnfs_flags & VNFS_ADDFSREF) {
4194 vp->v_lflag |= VNAMED_FSHASH;
4195 }
4196 if (param->vnfs_mp) {
4197 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4198 vp->v_flag |= VLOCKLOCAL;
4199 if (insert) {
4200 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4201 panic("insmntque: vp on the free list\n");
4202 /*
4203 * enter in mount vnode list
4204 */
4205 insmntque(vp, param->vnfs_mp);
4206 }
4207 #ifndef __LP64__
4208 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4209 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4210 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4211 vp->v_unsafefs->fsnode_count = 0;
4212 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4213 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4214 }
4215 #endif /* __LP64__ */
4216 }
4217 if (dvp && vnode_ref(dvp) == 0) {
4218 vp->v_parent = dvp;
4219 }
4220 if (cnp) {
4221 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4222 /*
4223 * enter into name cache
4224 * we've got the info to enter it into the name cache now
4225 * cache_enter_create will pick up an extra reference on
4226 * the name entered into the string cache
4227 */
4228 vp->v_name = cache_enter_create(dvp, vp, cnp);
4229 } else
4230 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4231
4232 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4233 vp->v_flag |= VISUNION;
4234 }
4235 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4236 /*
4237 * this vnode is being created as cacheable in the name cache
4238 * this allows us to re-enter it in the cache
4239 */
4240 vp->v_flag |= VNCACHEABLE;
4241 }
4242 ut = get_bsdthread_info(current_thread());
4243
4244 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4245 (ut->uu_flag & UT_RAGE_VNODES)) {
4246 /*
4247 * process has indicated that it wants any
4248 * vnodes created on its behalf to be rapidly
4249 * aged to reduce the impact on the cached set
4250 * of vnodes
4251 */
4252 vp->v_flag |= VRAGE;
4253 }
4254 return(0);
4255 }
4256 }
4257 return (EINVAL);
4258 }
4259
4260 int
4261 vnode_addfsref(vnode_t vp)
4262 {
4263 vnode_lock_spin(vp);
4264 if (vp->v_lflag & VNAMED_FSHASH)
4265 panic("add_fsref: vp already has named reference");
4266 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4267 panic("addfsref: vp on the free list\n");
4268 vp->v_lflag |= VNAMED_FSHASH;
4269 vnode_unlock(vp);
4270 return(0);
4271
4272 }
4273 int
4274 vnode_removefsref(vnode_t vp)
4275 {
4276 vnode_lock_spin(vp);
4277 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4278 panic("remove_fsref: no named reference");
4279 vp->v_lflag &= ~VNAMED_FSHASH;
4280 vnode_unlock(vp);
4281 return(0);
4282
4283 }
4284
4285
4286 int
4287 vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg)
4288 {
4289 mount_t mp;
4290 int ret = 0;
4291 fsid_t * fsid_list;
4292 int count, actualcount, i;
4293 void * allocmem;
4294
4295 count = mount_getvfscnt();
4296 count += 10;
4297
4298 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4299 allocmem = (void *)fsid_list;
4300
4301 actualcount = mount_fillfsids(fsid_list, count);
4302
4303 for (i=0; i< actualcount; i++) {
4304
4305 /* obtain the mount point with iteration reference */
4306 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4307
4308 if(mp == (struct mount *)0)
4309 continue;
4310 mount_lock(mp);
4311 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4312 mount_unlock(mp);
4313 mount_iterdrop(mp);
4314 continue;
4315
4316 }
4317 mount_unlock(mp);
4318
4319 /* iterate over all the vnodes */
4320 ret = callout(mp, arg);
4321
4322 mount_iterdrop(mp);
4323
4324 switch (ret) {
4325 case VFS_RETURNED:
4326 case VFS_RETURNED_DONE:
4327 if (ret == VFS_RETURNED_DONE) {
4328 ret = 0;
4329 goto out;
4330 }
4331 break;
4332
4333 case VFS_CLAIMED_DONE:
4334 ret = 0;
4335 goto out;
4336 case VFS_CLAIMED:
4337 default:
4338 break;
4339 }
4340 ret = 0;
4341 }
4342
4343 out:
4344 kfree(allocmem, (count * sizeof(fsid_t)));
4345 return (ret);
4346 }
4347
4348 /*
4349 * Update the vfsstatfs structure in the mountpoint.
4350 * MAC: Parameter eventtype added, indicating whether the event that
4351 * triggered this update came from user space, via a system call
4352 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4353 */
4354 int
4355 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4356 {
4357 struct vfs_attr va;
4358 int error;
4359
4360 /*
4361 * Request the attributes we want to propagate into
4362 * the per-mount vfsstat structure.
4363 */
4364 VFSATTR_INIT(&va);
4365 VFSATTR_WANTED(&va, f_iosize);
4366 VFSATTR_WANTED(&va, f_blocks);
4367 VFSATTR_WANTED(&va, f_bfree);
4368 VFSATTR_WANTED(&va, f_bavail);
4369 VFSATTR_WANTED(&va, f_bused);
4370 VFSATTR_WANTED(&va, f_files);
4371 VFSATTR_WANTED(&va, f_ffree);
4372 VFSATTR_WANTED(&va, f_bsize);
4373 VFSATTR_WANTED(&va, f_fssubtype);
4374 #if CONFIG_MACF
4375 if (eventtype == VFS_USER_EVENT) {
4376 error = mac_mount_check_getattr(ctx, mp, &va);
4377 if (error != 0)
4378 return (error);
4379 }
4380 #endif
4381
4382 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4383 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4384 return(error);
4385 }
4386
4387 /*
4388 * Unpack into the per-mount structure.
4389 *
4390 * We only overwrite these fields, which are likely to change:
4391 * f_blocks
4392 * f_bfree
4393 * f_bavail
4394 * f_bused
4395 * f_files
4396 * f_ffree
4397 *
4398 * And these which are not, but which the FS has no other way
4399 * of providing to us:
4400 * f_bsize
4401 * f_iosize
4402 * f_fssubtype
4403 *
4404 */
4405 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4406 /* 4822056 - protect against malformed server mount */
4407 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4408 } else {
4409 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4410 }
4411 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4412 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4413 } else {
4414 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4415 }
4416 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4417 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4418 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4419 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4420 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4421 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4422 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4423 mp->mnt_vfsstat.f_bused = va.f_bused;
4424 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4425 mp->mnt_vfsstat.f_files = va.f_files;
4426 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4427 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4428
4429 /* this is unlikely to change, but has to be queried for */
4430 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4431 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4432
4433 return(0);
4434 }
4435
4436 int
4437 mount_list_add(mount_t mp)
4438 {
4439 int res;
4440
4441 mount_list_lock();
4442 if (system_inshutdown != 0) {
4443 res = -1;
4444 } else {
4445 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4446 nummounts++;
4447 res = 0;
4448 }
4449 mount_list_unlock();
4450
4451 return res;
4452 }
4453
4454 void
4455 mount_list_remove(mount_t mp)
4456 {
4457 mount_list_lock();
4458 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4459 nummounts--;
4460 mp->mnt_list.tqe_next = NULL;
4461 mp->mnt_list.tqe_prev = NULL;
4462 mount_list_unlock();
4463 }
4464
4465 mount_t
4466 mount_lookupby_volfsid(int volfs_id, int withref)
4467 {
4468 mount_t cur_mount = (mount_t)0;
4469 mount_t mp;
4470
4471 mount_list_lock();
4472 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4473 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4474 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4475 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4476 cur_mount = mp;
4477 if (withref) {
4478 if (mount_iterref(cur_mount, 1)) {
4479 cur_mount = (mount_t)0;
4480 mount_list_unlock();
4481 goto out;
4482 }
4483 }
4484 break;
4485 }
4486 }
4487 mount_list_unlock();
4488 if (withref && (cur_mount != (mount_t)0)) {
4489 mp = cur_mount;
4490 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4491 cur_mount = (mount_t)0;
4492 }
4493 mount_iterdrop(mp);
4494 }
4495 out:
4496 return(cur_mount);
4497 }
4498
4499 mount_t
4500 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4501 {
4502 mount_t retmp = (mount_t)0;
4503 mount_t mp;
4504
4505 if (!locked)
4506 mount_list_lock();
4507 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4508 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4509 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4510 retmp = mp;
4511 if (withref) {
4512 if (mount_iterref(retmp, 1))
4513 retmp = (mount_t)0;
4514 }
4515 goto out;
4516 }
4517 out:
4518 if (!locked)
4519 mount_list_unlock();
4520 return (retmp);
4521 }
4522
4523 errno_t
4524 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4525 {
4526 struct nameidata nd;
4527 int error;
4528 u_int32_t ndflags = 0;
4529
4530 if (ctx == NULL) { /* XXX technically an error */
4531 ctx = vfs_context_current();
4532 }
4533
4534 if (flags & VNODE_LOOKUP_NOFOLLOW)
4535 ndflags = NOFOLLOW;
4536 else
4537 ndflags = FOLLOW;
4538
4539 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4540 ndflags |= NOCROSSMOUNT;
4541 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4542 ndflags |= DOWHITEOUT;
4543
4544 /* XXX AUDITVNPATH1 needed ? */
4545 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4546
4547 if ((error = namei(&nd)))
4548 return (error);
4549 *vpp = nd.ni_vp;
4550 nameidone(&nd);
4551
4552 return (0);
4553 }
4554
4555 errno_t
4556 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4557 {
4558 struct nameidata nd;
4559 int error;
4560 u_int32_t ndflags = 0;
4561 int lflags = flags;
4562
4563 if (ctx == NULL) { /* XXX technically an error */
4564 ctx = vfs_context_current();
4565 }
4566
4567 if (fmode & O_NOFOLLOW)
4568 lflags |= VNODE_LOOKUP_NOFOLLOW;
4569
4570 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4571 ndflags = NOFOLLOW;
4572 else
4573 ndflags = FOLLOW;
4574
4575 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4576 ndflags |= NOCROSSMOUNT;
4577 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4578 ndflags |= DOWHITEOUT;
4579
4580 /* XXX AUDITVNPATH1 needed ? */
4581 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4582
4583 if ((error = vn_open(&nd, fmode, cmode)))
4584 *vpp = NULL;
4585 else
4586 *vpp = nd.ni_vp;
4587
4588 return (error);
4589 }
4590
4591 errno_t
4592 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4593 {
4594 int error;
4595
4596 if (ctx == NULL) {
4597 ctx = vfs_context_current();
4598 }
4599
4600 error = vn_close(vp, flags, ctx);
4601 vnode_put(vp);
4602 return (error);
4603 }
4604
4605 /*
4606 * Returns: 0 Success
4607 * vnode_getattr:???
4608 */
4609 errno_t
4610 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4611 {
4612 struct vnode_attr va;
4613 int error;
4614
4615 VATTR_INIT(&va);
4616 VATTR_WANTED(&va, va_data_size);
4617 error = vnode_getattr(vp, &va, ctx);
4618 if (!error)
4619 *sizep = va.va_data_size;
4620 return(error);
4621 }
4622
4623 errno_t
4624 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4625 {
4626 struct vnode_attr va;
4627
4628 VATTR_INIT(&va);
4629 VATTR_SET(&va, va_data_size, size);
4630 va.va_vaflags = ioflag & 0xffff;
4631 return(vnode_setattr(vp, &va, ctx));
4632 }
4633
4634 /*
4635 * Create a filesystem object of arbitrary type with arbitrary attributes in
4636 * the spevied directory with the specified name.
4637 *
4638 * Parameters: dvp Pointer to the vnode of the directory
4639 * in which to create the object.
4640 * vpp Pointer to the area into which to
4641 * return the vnode of the created object.
4642 * cnp Component name pointer from the namei
4643 * data structure, containing the name to
4644 * use for the create object.
4645 * vap Pointer to the vnode_attr structure
4646 * describing the object to be created,
4647 * including the type of object.
4648 * flags VN_* flags controlling ACL inheritance
4649 * and whether or not authorization is to
4650 * be required for the operation.
4651 *
4652 * Returns: 0 Success
4653 * !0 errno value
4654 *
4655 * Implicit: *vpp Contains the vnode of the object that
4656 * was created, if successful.
4657 * *cnp May be modified by the underlying VFS.
4658 * *vap May be modified by the underlying VFS.
4659 * modified by either ACL inheritance or
4660 *
4661 *
4662 * be modified, even if the operation is
4663 *
4664 *
4665 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4666 *
4667 * Modification of '*cnp' and '*vap' by the underlying VFS is
4668 * strongly discouraged.
4669 *
4670 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4671 *
4672 * XXX: We should enummerate the possible errno values here, and where
4673 * in the code they originated.
4674 */
4675 errno_t
4676 vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx)
4677 {
4678 kauth_acl_t oacl, nacl;
4679 int initial_acl;
4680 errno_t error;
4681 vnode_t vp = (vnode_t)0;
4682
4683 error = 0;
4684 oacl = nacl = NULL;
4685 initial_acl = 0;
4686
4687 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4688
4689 /*
4690 * Handle ACL inheritance.
4691 */
4692 if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
4693 /* save the original filesec */
4694 if (VATTR_IS_ACTIVE(vap, va_acl)) {
4695 initial_acl = 1;
4696 oacl = vap->va_acl;
4697 }
4698
4699 vap->va_acl = NULL;
4700 if ((error = kauth_acl_inherit(dvp,
4701 oacl,
4702 &nacl,
4703 vap->va_type == VDIR,
4704 ctx)) != 0) {
4705 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
4706 return(error);
4707 }
4708
4709 /*
4710 * If the generated ACL is NULL, then we can save ourselves some effort
4711 * by clearing the active bit.
4712 */
4713 if (nacl == NULL) {
4714 VATTR_CLEAR_ACTIVE(vap, va_acl);
4715 } else {
4716 VATTR_SET(vap, va_acl, nacl);
4717 }
4718 }
4719
4720 /*
4721 * Check and default new attributes.
4722 * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller
4723 * hasn't supplied them.
4724 */
4725 if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) {
4726 KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error);
4727 goto out;
4728 }
4729
4730
4731 /*
4732 * Create the requested node.
4733 */
4734 switch(vap->va_type) {
4735 case VREG:
4736 error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx);
4737 break;
4738 case VDIR:
4739 error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx);
4740 break;
4741 case VSOCK:
4742 case VFIFO:
4743 case VBLK:
4744 case VCHR:
4745 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4746 break;
4747 default:
4748 panic("vnode_create: unknown vtype %d", vap->va_type);
4749 }
4750 if (error != 0) {
4751 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4752 goto out;
4753 }
4754
4755 vp = *vpp;
4756 #if CONFIG_MACF
4757 if (!(flags & VN_CREATE_NOLABEL)) {
4758 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4759 if (error)
4760 goto error;
4761 }
4762 #endif
4763
4764 /*
4765 * If some of the requested attributes weren't handled by the VNOP,
4766 * use our fallback code.
4767 */
4768 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4769 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4770 error = vnode_setattr_fallback(*vpp, vap, ctx);
4771 }
4772 #if CONFIG_MACF
4773 error:
4774 #endif
4775 if ((error != 0 ) && (vp != (vnode_t)0)) {
4776 *vpp = (vnode_t) 0;
4777 vnode_put(vp);
4778 }
4779
4780 out:
4781 /*
4782 * If the caller supplied a filesec in vap, it has been replaced
4783 * now by the post-inheritance copy. We need to put the original back
4784 * and free the inherited product.
4785 */
4786 if (initial_acl) {
4787 VATTR_SET(vap, va_acl, oacl);
4788 } else {
4789 VATTR_CLEAR_ACTIVE(vap, va_acl);
4790 }
4791 if (nacl != NULL)
4792 kauth_acl_free(nacl);
4793
4794 return(error);
4795 }
4796
4797 static kauth_scope_t vnode_scope;
4798 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4799 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4800 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4801 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4802
4803 typedef struct _vnode_authorize_context {
4804 vnode_t vp;
4805 struct vnode_attr *vap;
4806 vnode_t dvp;
4807 struct vnode_attr *dvap;
4808 vfs_context_t ctx;
4809 int flags;
4810 int flags_valid;
4811 #define _VAC_IS_OWNER (1<<0)
4812 #define _VAC_IN_GROUP (1<<1)
4813 #define _VAC_IS_DIR_OWNER (1<<2)
4814 #define _VAC_IN_DIR_GROUP (1<<3)
4815 } *vauth_ctx;
4816
4817 void
4818 vnode_authorize_init(void)
4819 {
4820 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4821 }
4822
4823 /*
4824 * Authorize an operation on a vnode.
4825 *
4826 * This is KPI, but here because it needs vnode_scope.
4827 *
4828 * Returns: 0 Success
4829 * kauth_authorize_action:EPERM ...
4830 * xlate => EACCES Permission denied
4831 * kauth_authorize_action:0 Success
4832 * kauth_authorize_action: Depends on callback return; this is
4833 * usually only vnode_authorize_callback(),
4834 * but may include other listerners, if any
4835 * exist.
4836 * EROFS
4837 * EACCES
4838 * EPERM
4839 * ???
4840 */
4841 int
4842 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
4843 {
4844 int error, result;
4845
4846 /*
4847 * We can't authorize against a dead vnode; allow all operations through so that
4848 * the correct error can be returned.
4849 */
4850 if (vp->v_type == VBAD)
4851 return(0);
4852
4853 error = 0;
4854 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
4855 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
4856 if (result == EPERM) /* traditional behaviour */
4857 result = EACCES;
4858 /* did the lower layers give a better error return? */
4859 if ((result != 0) && (error != 0))
4860 return(error);
4861 return(result);
4862 }
4863
4864 /*
4865 * Test for vnode immutability.
4866 *
4867 * The 'append' flag is set when the authorization request is constrained
4868 * to operations which only request the right to append to a file.
4869 *
4870 * The 'ignore' flag is set when an operation modifying the immutability flags
4871 * is being authorized. We check the system securelevel to determine which
4872 * immutability flags we can ignore.
4873 */
4874 static int
4875 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
4876 {
4877 int mask;
4878
4879 /* start with all bits precluding the operation */
4880 mask = IMMUTABLE | APPEND;
4881
4882 /* if appending only, remove the append-only bits */
4883 if (append)
4884 mask &= ~APPEND;
4885
4886 /* ignore only set when authorizing flags changes */
4887 if (ignore) {
4888 if (securelevel <= 0) {
4889 /* in insecure state, flags do not inhibit changes */
4890 mask = 0;
4891 } else {
4892 /* in secure state, user flags don't inhibit */
4893 mask &= ~(UF_IMMUTABLE | UF_APPEND);
4894 }
4895 }
4896 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
4897 if ((vap->va_flags & mask) != 0)
4898 return(EPERM);
4899 return(0);
4900 }
4901
4902 static int
4903 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
4904 {
4905 int result;
4906
4907 /* default assumption is not-owner */
4908 result = 0;
4909
4910 /*
4911 * If the filesystem has given us a UID, we treat this as authoritative.
4912 */
4913 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
4914 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
4915 }
4916 /* we could test the owner UUID here if we had a policy for it */
4917
4918 return(result);
4919 }
4920
4921 static int
4922 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember)
4923 {
4924 int error;
4925 int result;
4926
4927 error = 0;
4928 result = 0;
4929
4930 /* the caller is expected to have asked the filesystem for a group at some point */
4931 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
4932 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
4933 }
4934 /* we could test the group UUID here if we had a policy for it */
4935
4936 if (!error)
4937 *ismember = result;
4938 return(error);
4939 }
4940
4941 static int
4942 vauth_file_owner(vauth_ctx vcp)
4943 {
4944 int result;
4945
4946 if (vcp->flags_valid & _VAC_IS_OWNER) {
4947 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
4948 } else {
4949 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
4950
4951 /* cache our result */
4952 vcp->flags_valid |= _VAC_IS_OWNER;
4953 if (result) {
4954 vcp->flags |= _VAC_IS_OWNER;
4955 } else {
4956 vcp->flags &= ~_VAC_IS_OWNER;
4957 }
4958 }
4959 return(result);
4960 }
4961
4962 static int
4963 vauth_file_ingroup(vauth_ctx vcp, int *ismember)
4964 {
4965 int error;
4966
4967 if (vcp->flags_valid & _VAC_IN_GROUP) {
4968 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
4969 error = 0;
4970 } else {
4971 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember);
4972
4973 if (!error) {
4974 /* cache our result */
4975 vcp->flags_valid |= _VAC_IN_GROUP;
4976 if (*ismember) {
4977 vcp->flags |= _VAC_IN_GROUP;
4978 } else {
4979 vcp->flags &= ~_VAC_IN_GROUP;
4980 }
4981 }
4982
4983 }
4984 return(error);
4985 }
4986
4987 static int
4988 vauth_dir_owner(vauth_ctx vcp)
4989 {
4990 int result;
4991
4992 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
4993 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
4994 } else {
4995 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
4996
4997 /* cache our result */
4998 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
4999 if (result) {
5000 vcp->flags |= _VAC_IS_DIR_OWNER;
5001 } else {
5002 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5003 }
5004 }
5005 return(result);
5006 }
5007
5008 static int
5009 vauth_dir_ingroup(vauth_ctx vcp, int *ismember)
5010 {
5011 int error;
5012
5013 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5014 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5015 error = 0;
5016 } else {
5017 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember);
5018
5019 if (!error) {
5020 /* cache our result */
5021 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5022 if (*ismember) {
5023 vcp->flags |= _VAC_IN_DIR_GROUP;
5024 } else {
5025 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5026 }
5027 }
5028 }
5029 return(error);
5030 }
5031
5032 /*
5033 * Test the posix permissions in (vap) to determine whether (credential)
5034 * may perform (action)
5035 */
5036 static int
5037 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5038 {
5039 struct vnode_attr *vap;
5040 int needed, error, owner_ok, group_ok, world_ok, ismember;
5041 #ifdef KAUTH_DEBUG_ENABLE
5042 const char *where = "uninitialized";
5043 # define _SETWHERE(c) where = c;
5044 #else
5045 # define _SETWHERE(c)
5046 #endif
5047
5048 /* checking file or directory? */
5049 if (on_dir) {
5050 vap = vcp->dvap;
5051 } else {
5052 vap = vcp->vap;
5053 }
5054
5055 error = 0;
5056
5057 /*
5058 * We want to do as little work here as possible. So first we check
5059 * which sets of permissions grant us the access we need, and avoid checking
5060 * whether specific permissions grant access when more generic ones would.
5061 */
5062
5063 /* owner permissions */
5064 needed = 0;
5065 if (action & VREAD)
5066 needed |= S_IRUSR;
5067 if (action & VWRITE)
5068 needed |= S_IWUSR;
5069 if (action & VEXEC)
5070 needed |= S_IXUSR;
5071 owner_ok = (needed & vap->va_mode) == needed;
5072
5073 /* group permissions */
5074 needed = 0;
5075 if (action & VREAD)
5076 needed |= S_IRGRP;
5077 if (action & VWRITE)
5078 needed |= S_IWGRP;
5079 if (action & VEXEC)
5080 needed |= S_IXGRP;
5081 group_ok = (needed & vap->va_mode) == needed;
5082
5083 /* world permissions */
5084 needed = 0;
5085 if (action & VREAD)
5086 needed |= S_IROTH;
5087 if (action & VWRITE)
5088 needed |= S_IWOTH;
5089 if (action & VEXEC)
5090 needed |= S_IXOTH;
5091 world_ok = (needed & vap->va_mode) == needed;
5092
5093 /* If granted/denied by all three, we're done */
5094 if (owner_ok && group_ok && world_ok) {
5095 _SETWHERE("all");
5096 goto out;
5097 }
5098 if (!owner_ok && !group_ok && !world_ok) {
5099 _SETWHERE("all");
5100 error = EACCES;
5101 goto out;
5102 }
5103
5104 /* Check ownership (relatively cheap) */
5105 if ((on_dir && vauth_dir_owner(vcp)) ||
5106 (!on_dir && vauth_file_owner(vcp))) {
5107 _SETWHERE("user");
5108 if (!owner_ok)
5109 error = EACCES;
5110 goto out;
5111 }
5112
5113 /* Not owner; if group and world both grant it we're done */
5114 if (group_ok && world_ok) {
5115 _SETWHERE("group/world");
5116 goto out;
5117 }
5118 if (!group_ok && !world_ok) {
5119 _SETWHERE("group/world");
5120 error = EACCES;
5121 goto out;
5122 }
5123
5124 /* Check group membership (most expensive) */
5125 ismember = 0;
5126 if (on_dir) {
5127 error = vauth_dir_ingroup(vcp, &ismember);
5128 } else {
5129 error = vauth_file_ingroup(vcp, &ismember);
5130 }
5131 if (error)
5132 goto out;
5133 if (ismember) {
5134 _SETWHERE("group");
5135 if (!group_ok)
5136 error = EACCES;
5137 goto out;
5138 }
5139
5140 /* Not owner, not in group, use world result */
5141 _SETWHERE("world");
5142 if (!world_ok)
5143 error = EACCES;
5144
5145 /* FALLTHROUGH */
5146
5147 out:
5148 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5149 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5150 (action & VREAD) ? "r" : "-",
5151 (action & VWRITE) ? "w" : "-",
5152 (action & VEXEC) ? "x" : "-",
5153 needed,
5154 (vap->va_mode & S_IRUSR) ? "r" : "-",
5155 (vap->va_mode & S_IWUSR) ? "w" : "-",
5156 (vap->va_mode & S_IXUSR) ? "x" : "-",
5157 (vap->va_mode & S_IRGRP) ? "r" : "-",
5158 (vap->va_mode & S_IWGRP) ? "w" : "-",
5159 (vap->va_mode & S_IXGRP) ? "x" : "-",
5160 (vap->va_mode & S_IROTH) ? "r" : "-",
5161 (vap->va_mode & S_IWOTH) ? "w" : "-",
5162 (vap->va_mode & S_IXOTH) ? "x" : "-",
5163 kauth_cred_getuid(vcp->ctx->vc_ucred),
5164 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5165 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5166 return(error);
5167 }
5168
5169 /*
5170 * Authorize the deletion of the node vp from the directory dvp.
5171 *
5172 * We assume that:
5173 * - Neither the node nor the directory are immutable.
5174 * - The user is not the superuser.
5175 *
5176 * Deletion is not permitted if the directory is sticky and the caller is
5177 * not owner of the node or directory.
5178 *
5179 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5180 * the node may be deleted. If neither denies the permission, and the
5181 * caller has Posix write access to the directory, then the node may be
5182 * deleted.
5183 *
5184 * As an optimization, we cache whether or not delete child is permitted
5185 * on directories without the sticky bit set.
5186 */
5187 int
5188 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5189 /*static*/ int
5190 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5191 {
5192 struct vnode_attr *vap = vcp->vap;
5193 struct vnode_attr *dvap = vcp->dvap;
5194 kauth_cred_t cred = vcp->ctx->vc_ucred;
5195 struct kauth_acl_eval eval;
5196 int error, delete_denied, delete_child_denied, ismember;
5197
5198 /* check the ACL on the directory */
5199 delete_child_denied = 0;
5200 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5201 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5202 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5203 eval.ae_count = dvap->va_acl->acl_entrycount;
5204 eval.ae_options = 0;
5205 if (vauth_dir_owner(vcp))
5206 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5207 if ((error = vauth_dir_ingroup(vcp, &ismember)) != 0)
5208 return(error);
5209 if (ismember)
5210 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5211 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5212 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5213 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5214 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5215
5216 error = kauth_acl_evaluate(cred, &eval);
5217
5218 if (error != 0) {
5219 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5220 return(error);
5221 }
5222 if (eval.ae_result == KAUTH_RESULT_DENY)
5223 delete_child_denied = 1;
5224 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5225 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5226 return(0);
5227 }
5228 }
5229
5230 /* check the ACL on the node */
5231 delete_denied = 0;
5232 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5233 eval.ae_requested = KAUTH_VNODE_DELETE;
5234 eval.ae_acl = &vap->va_acl->acl_ace[0];
5235 eval.ae_count = vap->va_acl->acl_entrycount;
5236 eval.ae_options = 0;
5237 if (vauth_file_owner(vcp))
5238 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5239 if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
5240 return(error);
5241 if (ismember)
5242 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5243 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5244 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5245 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5246 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5247
5248 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5249 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5250 return(error);
5251 }
5252 if (eval.ae_result == KAUTH_RESULT_DENY)
5253 delete_denied = 1;
5254 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5255 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5256 return(0);
5257 }
5258 }
5259
5260 /* if denied by ACL on directory or node, return denial */
5261 if (delete_denied || delete_child_denied) {
5262 KAUTH_DEBUG("%p ALLOWED - denied by ACL", vcp->vp);
5263 return(EACCES);
5264 }
5265
5266 /*
5267 * enforce sticky bit behaviour; the cached_delete_child property will
5268 * be false and the dvap contents valis for sticky bit directories;
5269 * this makes us check the directory each time, but it's unavoidable,
5270 * as sticky bit is an exception to caching.
5271 */
5272 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
5273 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
5274 vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
5275 return(EACCES);
5276 }
5277
5278 /* check the directory */
5279 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
5280 KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
5281 return(error);
5282 }
5283
5284 /* not denied, must be OK */
5285 return(0);
5286 }
5287
5288
5289 /*
5290 * Authorize an operation based on the node's attributes.
5291 */
5292 static int
5293 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
5294 {
5295 struct vnode_attr *vap = vcp->vap;
5296 kauth_cred_t cred = vcp->ctx->vc_ucred;
5297 struct kauth_acl_eval eval;
5298 int error, ismember;
5299 mode_t posix_action;
5300
5301 /*
5302 * If we are the file owner, we automatically have some rights.
5303 *
5304 * Do we need to expand this to support group ownership?
5305 */
5306 if (vauth_file_owner(vcp))
5307 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
5308
5309 /*
5310 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
5311 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
5312 * change ownership to themselves, and WRITE_SECURITY is implicitly
5313 * granted to the owner. We need to do this because at this point
5314 * WRITE_SECURITY may not be granted as the caller is not currently
5315 * the owner.
5316 */
5317 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
5318 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
5319 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
5320
5321 if (acl_rights == 0) {
5322 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
5323 return(0);
5324 }
5325
5326 /* if we have an ACL, evaluate it */
5327 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5328 eval.ae_requested = acl_rights;
5329 eval.ae_acl = &vap->va_acl->acl_ace[0];
5330 eval.ae_count = vap->va_acl->acl_entrycount;
5331 eval.ae_options = 0;
5332 if (vauth_file_owner(vcp))
5333 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5334 if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
5335 return(error);
5336 if (ismember)
5337 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5338 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5339 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5340 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5341 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5342
5343 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5344 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5345 return(error);
5346 }
5347
5348 if (eval.ae_result == KAUTH_RESULT_DENY) {
5349 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
5350 return(EACCES); /* deny, deny, counter-allege */
5351 }
5352 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5353 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
5354 return(0);
5355 }
5356 *found_deny = eval.ae_found_deny;
5357
5358 /* fall through and evaluate residual rights */
5359 } else {
5360 /* no ACL, everything is residual */
5361 eval.ae_residual = acl_rights;
5362 }
5363
5364 /*
5365 * Grant residual rights that have been pre-authorized.
5366 */
5367 eval.ae_residual &= ~preauth_rights;
5368
5369 /*
5370 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
5371 */
5372 if (vauth_file_owner(vcp))
5373 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
5374
5375 if (eval.ae_residual == 0) {
5376 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
5377 return(0);
5378 }
5379
5380 /*
5381 * Bail if we have residual rights that can't be granted by posix permissions,
5382 * or aren't presumed granted at this point.
5383 *
5384 * XXX these can be collapsed for performance
5385 */
5386 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
5387 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
5388 return(EACCES);
5389 }
5390 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
5391 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
5392 return(EACCES);
5393 }
5394
5395 #if DIAGNOSTIC
5396 if (eval.ae_residual & KAUTH_VNODE_DELETE)
5397 panic("vnode_authorize: can't be checking delete permission here");
5398 #endif
5399
5400 /*
5401 * Compute the fallback posix permissions that will satisfy the remaining
5402 * rights.
5403 */
5404 posix_action = 0;
5405 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
5406 KAUTH_VNODE_LIST_DIRECTORY |
5407 KAUTH_VNODE_READ_EXTATTRIBUTES))
5408 posix_action |= VREAD;
5409 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
5410 KAUTH_VNODE_ADD_FILE |
5411 KAUTH_VNODE_ADD_SUBDIRECTORY |
5412 KAUTH_VNODE_DELETE_CHILD |
5413 KAUTH_VNODE_WRITE_ATTRIBUTES |
5414 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
5415 posix_action |= VWRITE;
5416 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
5417 KAUTH_VNODE_SEARCH))
5418 posix_action |= VEXEC;
5419
5420 if (posix_action != 0) {
5421 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
5422 } else {
5423 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
5424 vcp->vp,
5425 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
5426 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5427 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
5428 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5429 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
5430 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
5431 (eval.ae_residual & KAUTH_VNODE_DELETE)
5432 ? " DELETE" : "",
5433 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
5434 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5435 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
5436 ? " DELETE_CHILD" : "",
5437 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
5438 ? " READ_ATTRIBUTES" : "",
5439 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
5440 ? " WRITE_ATTRIBUTES" : "",
5441 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
5442 ? " READ_EXTATTRIBUTES" : "",
5443 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
5444 ? " WRITE_EXTATTRIBUTES" : "",
5445 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
5446 ? " READ_SECURITY" : "",
5447 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
5448 ? " WRITE_SECURITY" : "",
5449 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
5450 ? " CHECKIMMUTABLE" : "",
5451 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
5452 ? " CHANGE_OWNER" : "");
5453 }
5454
5455 /*
5456 * Lack of required Posix permissions implies no reason to deny access.
5457 */
5458 return(0);
5459 }
5460
5461 /*
5462 * Check for file immutability.
5463 */
5464 static int
5465 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
5466 {
5467 mount_t mp;
5468 int error;
5469 int append;
5470
5471 /*
5472 * Perform immutability checks for operations that change data.
5473 *
5474 * Sockets, fifos and devices require special handling.
5475 */
5476 switch(vp->v_type) {
5477 case VSOCK:
5478 case VFIFO:
5479 case VBLK:
5480 case VCHR:
5481 /*
5482 * Writing to these nodes does not change the filesystem data,
5483 * so forget that it's being tried.
5484 */
5485 rights &= ~KAUTH_VNODE_WRITE_DATA;
5486 break;
5487 default:
5488 break;
5489 }
5490
5491 error = 0;
5492 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
5493
5494 /* check per-filesystem options if possible */
5495 mp = vp->v_mount;
5496 if (mp != NULL) {
5497
5498 /* check for no-EA filesystems */
5499 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
5500 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
5501 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
5502 error = EACCES; /* User attributes disabled */
5503 goto out;
5504 }
5505 }
5506
5507 /*
5508 * check for file immutability. first, check if the requested rights are
5509 * allowable for a UF_APPEND file.
5510 */
5511 append = 0;
5512 if (vp->v_type == VDIR) {
5513 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5514 append = 1;
5515 } else {
5516 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5517 append = 1;
5518 }
5519 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
5520 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
5521 goto out;
5522 }
5523 }
5524 out:
5525 return(error);
5526 }
5527
5528 /*
5529 * Handle authorization actions for filesystems that advertise that the
5530 * server will be enforcing.
5531 *
5532 * Returns: 0 Authorization should be handled locally
5533 * 1 Authorization was handled by the FS
5534 *
5535 * Note: Imputed returns will only occur if the authorization request
5536 * was handled by the FS.
5537 *
5538 * Imputed: *resultp, modified Return code from FS when the request is
5539 * handled by the FS.
5540 * VNOP_ACCESS:???
5541 * VNOP_OPEN:???
5542 */
5543 static int
5544 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
5545 {
5546 int error;
5547
5548 /*
5549 * If the vp is a device node, socket or FIFO it actually represents a local
5550 * endpoint, so we need to handle it locally.
5551 */
5552 switch(vp->v_type) {
5553 case VBLK:
5554 case VCHR:
5555 case VSOCK:
5556 case VFIFO:
5557 return(0);
5558 default:
5559 break;
5560 }
5561
5562 /*
5563 * In the advisory request case, if the filesystem doesn't think it's reliable
5564 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
5565 */
5566 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
5567 return(0);
5568
5569 /*
5570 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
5571 * VNOP_ACCESS, as most will authorise inline with the actual request.
5572 */
5573 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
5574 *resultp = error;
5575 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
5576 return(1);
5577 }
5578
5579 /*
5580 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
5581 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
5582 */
5583 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
5584 /* try a VNOP_OPEN for readonly access */
5585 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
5586 *resultp = error;
5587 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
5588 return(1);
5589 }
5590 VNOP_CLOSE(vp, FREAD, ctx);
5591 }
5592
5593 /*
5594 * We don't have any reason to believe that the request has to be denied at this point,
5595 * so go ahead and allow it.
5596 */
5597 *resultp = 0;
5598 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
5599 return(1);
5600 }
5601
5602
5603
5604
5605 /*
5606 * Returns: KAUTH_RESULT_ALLOW
5607 * KAUTH_RESULT_DENY
5608 *
5609 * Imputed: *arg3, modified Error code in the deny case
5610 * EROFS Read-only file system
5611 * EACCES Permission denied
5612 * EPERM Operation not permitted [no execute]
5613 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
5614 * vnode_getattr:???
5615 * vnode_authorize_opaque:*arg2 ???
5616 * vnode_authorize_checkimmutable:???
5617 * vnode_authorize_delete:???
5618 * vnode_authorize_simple:???
5619 */
5620
5621
5622 static int
5623 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
5624 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5625 {
5626 vfs_context_t ctx;
5627 vnode_t cvp = NULLVP;
5628 vnode_t vp, dvp;
5629 int result = KAUTH_RESULT_DENY;
5630 int parent_iocount = 0;
5631 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
5632
5633 ctx = (vfs_context_t)arg0;
5634 vp = (vnode_t)arg1;
5635 dvp = (vnode_t)arg2;
5636
5637 /*
5638 * if there are 2 vnodes passed in, we don't know at
5639 * this point which rights to look at based on the
5640 * combined action being passed in... defer until later...
5641 * otherwise check the kauth 'rights' cache hung
5642 * off of the vnode we're interested in... if we've already
5643 * been granted the right we're currently interested in,
5644 * we can just return success... otherwise we'll go through
5645 * the process of authorizing the requested right(s)... if that
5646 * succeeds, we'll add the right(s) to the cache.
5647 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
5648 */
5649 if (dvp && vp)
5650 goto defer;
5651 if (dvp) {
5652 cvp = dvp;
5653 } else {
5654 /*
5655 * For named streams on local-authorization volumes, rights are cached on the parent;
5656 * authorization is determined by looking at the parent's properties anyway, so storing
5657 * on the parent means that we don't recompute for the named stream and that if
5658 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
5659 * stream to flush its cache separately. If we miss in the cache, then we authorize
5660 * as if there were no cached rights (passing the named stream vnode and desired rights to
5661 * vnode_authorize_callback_int()).
5662 *
5663 * On an opaquely authorized volume, we don't know the relationship between the
5664 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
5665 * on such a volume are authorized directly (rather than using the parent) and have their
5666 * own caches. When a named stream vnode is created, we mark the parent as having a named
5667 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
5668 * find the stream and flush its cache.
5669 */
5670 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
5671 cvp = vp->v_parent;
5672 if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
5673 parent_iocount = 1;
5674 } else {
5675 cvp = NULL;
5676 goto defer; /* If we can't use the parent, take the slow path */
5677 }
5678
5679 /* Have to translate some actions */
5680 parent_action = action;
5681 if (parent_action & KAUTH_VNODE_READ_DATA) {
5682 parent_action &= ~KAUTH_VNODE_READ_DATA;
5683 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5684 }
5685 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
5686 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
5687 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5688 }
5689
5690 } else {
5691 cvp = vp;
5692 }
5693 }
5694
5695 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
5696 result = KAUTH_RESULT_ALLOW;
5697 goto out;
5698 }
5699 defer:
5700 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
5701
5702 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
5703 vnode_cache_authorized_action(cvp, ctx, action);
5704
5705 out:
5706 if (parent_iocount) {
5707 vnode_put(cvp);
5708 }
5709
5710 return result;
5711 }
5712
5713
5714 static int
5715 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
5716 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5717 {
5718 struct _vnode_authorize_context auth_context;
5719 vauth_ctx vcp;
5720 vfs_context_t ctx;
5721 vnode_t vp, dvp;
5722 kauth_cred_t cred;
5723 kauth_ace_rights_t rights;
5724 struct vnode_attr va, dva;
5725 int result;
5726 int *errorp;
5727 int noimmutable;
5728 boolean_t parent_authorized_for_delete_child = FALSE;
5729 boolean_t found_deny = FALSE;
5730 boolean_t parent_ref= FALSE;
5731
5732 vcp = &auth_context;
5733 ctx = vcp->ctx = (vfs_context_t)arg0;
5734 vp = vcp->vp = (vnode_t)arg1;
5735 dvp = vcp->dvp = (vnode_t)arg2;
5736 errorp = (int *)arg3;
5737 /*
5738 * Note that we authorize against the context, not the passed cred
5739 * (the same thing anyway)
5740 */
5741 cred = ctx->vc_ucred;
5742
5743 VATTR_INIT(&va);
5744 vcp->vap = &va;
5745 VATTR_INIT(&dva);
5746 vcp->dvap = &dva;
5747
5748 vcp->flags = vcp->flags_valid = 0;
5749
5750 #if DIAGNOSTIC
5751 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
5752 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
5753 #endif
5754
5755 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
5756 vp, vfs_context_proc(ctx)->p_comm,
5757 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
5758 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5759 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5760 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
5761 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
5762 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5763 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
5764 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
5765 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
5766 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
5767 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
5768 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
5769 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
5770 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
5771 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
5772 vnode_isdir(vp) ? "directory" : "file",
5773 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
5774
5775 /*
5776 * Extract the control bits from the action, everything else is
5777 * requested rights.
5778 */
5779 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
5780 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
5781
5782 if (rights & KAUTH_VNODE_DELETE) {
5783 #if DIAGNOSTIC
5784 if (dvp == NULL)
5785 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
5786 #endif
5787 /*
5788 * check to see if we've already authorized the parent
5789 * directory for deletion of its children... if so, we
5790 * can skip a whole bunch of work... we will still have to
5791 * authorize that this specific child can be removed
5792 */
5793 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
5794 parent_authorized_for_delete_child = TRUE;
5795 } else {
5796 dvp = NULL;
5797 }
5798
5799 /*
5800 * Check for read-only filesystems.
5801 */
5802 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
5803 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
5804 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
5805 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
5806 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
5807 result = EROFS;
5808 goto out;
5809 }
5810
5811 /*
5812 * Check for noexec filesystems.
5813 */
5814 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
5815 result = EACCES;
5816 goto out;
5817 }
5818
5819 /*
5820 * Handle cases related to filesystems with non-local enforcement.
5821 * This call can return 0, in which case we will fall through to perform a
5822 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
5823 * an appropriate result, at which point we can return immediately.
5824 */
5825 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
5826 goto out;
5827
5828 /*
5829 * Get vnode attributes and extended security information for the vnode
5830 * and directory if required.
5831 */
5832 VATTR_WANTED(&va, va_mode);
5833 VATTR_WANTED(&va, va_uid);
5834 VATTR_WANTED(&va, va_gid);
5835 VATTR_WANTED(&va, va_flags);
5836 VATTR_WANTED(&va, va_acl);
5837 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
5838 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
5839 goto out;
5840 }
5841 if (dvp && parent_authorized_for_delete_child == FALSE) {
5842 VATTR_WANTED(&dva, va_mode);
5843 VATTR_WANTED(&dva, va_uid);
5844 VATTR_WANTED(&dva, va_gid);
5845 VATTR_WANTED(&dva, va_flags);
5846 VATTR_WANTED(&dva, va_acl);
5847 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
5848 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
5849 goto out;
5850 }
5851 }
5852
5853 /*
5854 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
5855 * *_EXTATTRIBUTES.
5856 */
5857 if (vnode_isnamedstream(vp)) {
5858 if (rights & KAUTH_VNODE_READ_DATA) {
5859 rights &= ~KAUTH_VNODE_READ_DATA;
5860 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5861 }
5862 if (rights & KAUTH_VNODE_WRITE_DATA) {
5863 rights &= ~KAUTH_VNODE_WRITE_DATA;
5864 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5865 }
5866 }
5867
5868 /*
5869 * Point 'vp' to the resource fork's parent for ACL checking
5870 */
5871 if (vnode_isnamedstream(vp) &&
5872 (vp->v_parent != NULL) &&
5873 (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) {
5874 parent_ref = TRUE;
5875 vcp->vp = vp = vp->v_parent;
5876 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
5877 kauth_acl_free(va.va_acl);
5878 VATTR_INIT(&va);
5879 VATTR_WANTED(&va, va_mode);
5880 VATTR_WANTED(&va, va_uid);
5881 VATTR_WANTED(&va, va_gid);
5882 VATTR_WANTED(&va, va_flags);
5883 VATTR_WANTED(&va, va_acl);
5884 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
5885 goto out;
5886 }
5887
5888 /*
5889 * Check for immutability.
5890 *
5891 * In the deletion case, parent directory immutability vetoes specific
5892 * file rights.
5893 */
5894 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
5895 goto out;
5896 if ((rights & KAUTH_VNODE_DELETE) &&
5897 parent_authorized_for_delete_child == FALSE &&
5898 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
5899 goto out;
5900
5901 /*
5902 * Clear rights that have been authorized by reaching this point, bail if nothing left to
5903 * check.
5904 */
5905 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
5906 if (rights == 0)
5907 goto out;
5908
5909 /*
5910 * If we're not the superuser, authorize based on file properties;
5911 * note that even if parent_authorized_for_delete_child is TRUE, we
5912 * need to check on the node itself.
5913 */
5914 if (!vfs_context_issuser(ctx)) {
5915 /* process delete rights */
5916 if ((rights & KAUTH_VNODE_DELETE) &&
5917 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
5918 goto out;
5919
5920 /* process remaining rights */
5921 if ((rights & ~KAUTH_VNODE_DELETE) &&
5922 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
5923 goto out;
5924 } else {
5925
5926 /*
5927 * Execute is only granted to root if one of the x bits is set. This check only
5928 * makes sense if the posix mode bits are actually supported.
5929 */
5930 if ((rights & KAUTH_VNODE_EXECUTE) &&
5931 (vp->v_type == VREG) &&
5932 VATTR_IS_SUPPORTED(&va, va_mode) &&
5933 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
5934 result = EPERM;
5935 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
5936 goto out;
5937 }
5938
5939 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
5940 }
5941 out:
5942 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
5943 kauth_acl_free(va.va_acl);
5944 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
5945 kauth_acl_free(dva.va_acl);
5946
5947 if (result) {
5948 if (parent_ref)
5949 vnode_put(vp);
5950 *errorp = result;
5951 KAUTH_DEBUG("%p DENIED - auth denied", vp);
5952 return(KAUTH_RESULT_DENY);
5953 }
5954 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
5955 /*
5956 * if we were successfully granted the right to search this directory
5957 * and there were NO ACL DENYs for search and the posix permissions also don't
5958 * deny execute, we can synthesize a global right that allows anyone to
5959 * traverse this directory during a pathname lookup without having to
5960 * match the credential associated with this cache of rights.
5961 */
5962 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
5963 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
5964 (S_IXUSR | S_IXGRP | S_IXOTH))) {
5965 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
5966 }
5967 }
5968 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
5969 /*
5970 * parent was successfully and newly authorized for content deletions
5971 * add it to the cache, but only if it doesn't have the sticky
5972 * bit set on it. This same check is done earlier guarding
5973 * fetching of dva, and if we jumped to out without having done
5974 * this, we will have returned already because of a non-zero
5975 * 'result' value.
5976 */
5977 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
5978 !(dva.va_mode & (S_ISVTX))) {
5979 /* OK to cache delete rights */
5980 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
5981 }
5982 }
5983 if (parent_ref)
5984 vnode_put(vp);
5985 /*
5986 * Note that this implies that we will allow requests for no rights, as well as
5987 * for rights that we do not recognise. There should be none of these.
5988 */
5989 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
5990 return(KAUTH_RESULT_ALLOW);
5991 }
5992
5993 /*
5994 * Check that the attribute information in vattr can be legally applied to
5995 * a new file by the context.
5996 */
5997 int
5998 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
5999 {
6000 int error;
6001 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6002 kauth_cred_t cred;
6003 guid_t changer;
6004 mount_t dmp;
6005
6006 error = 0;
6007 defaulted_owner = defaulted_group = defaulted_mode = 0;
6008
6009 /*
6010 * Require that the filesystem support extended security to apply any.
6011 */
6012 if (!vfs_extendedsecurity(dvp->v_mount) &&
6013 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6014 error = EINVAL;
6015 goto out;
6016 }
6017
6018 /*
6019 * Default some fields.
6020 */
6021 dmp = dvp->v_mount;
6022
6023 /*
6024 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6025 * owner takes ownership of all new files.
6026 */
6027 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6028 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6029 defaulted_owner = 1;
6030 } else {
6031 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6032 /* default owner is current user */
6033 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6034 defaulted_owner = 1;
6035 }
6036 }
6037
6038 /*
6039 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6040 * group takes ownership of all new files.
6041 */
6042 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6043 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6044 defaulted_group = 1;
6045 } else {
6046 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6047 /* default group comes from parent object, fallback to current user */
6048 struct vnode_attr dva;
6049 VATTR_INIT(&dva);
6050 VATTR_WANTED(&dva, va_gid);
6051 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6052 goto out;
6053 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6054 VATTR_SET(vap, va_gid, dva.va_gid);
6055 } else {
6056 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6057 }
6058 defaulted_group = 1;
6059 }
6060 }
6061
6062 if (!VATTR_IS_ACTIVE(vap, va_flags))
6063 VATTR_SET(vap, va_flags, 0);
6064
6065 /* default mode is everything, masked with current umask */
6066 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6067 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6068 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6069 defaulted_mode = 1;
6070 }
6071 /* set timestamps to now */
6072 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6073 nanotime(&vap->va_create_time);
6074 VATTR_SET_ACTIVE(vap, va_create_time);
6075 }
6076
6077 /*
6078 * Check for attempts to set nonsensical fields.
6079 */
6080 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6081 error = EINVAL;
6082 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6083 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6084 goto out;
6085 }
6086
6087 /*
6088 * Quickly check for the applicability of any enforcement here.
6089 * Tests below maintain the integrity of the local security model.
6090 */
6091 if (vfs_authopaque(dvp->v_mount))
6092 goto out;
6093
6094 /*
6095 * We need to know if the caller is the superuser, or if the work is
6096 * otherwise already authorised.
6097 */
6098 cred = vfs_context_ucred(ctx);
6099 if (noauth) {
6100 /* doing work for the kernel */
6101 has_priv_suser = 1;
6102 } else {
6103 has_priv_suser = vfs_context_issuser(ctx);
6104 }
6105
6106
6107 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6108 if (has_priv_suser) {
6109 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6110 error = EPERM;
6111 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6112 goto out;
6113 }
6114 } else {
6115 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6116 error = EPERM;
6117 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6118 goto out;
6119 }
6120 }
6121 }
6122
6123 /* if not superuser, validate legality of new-item attributes */
6124 if (!has_priv_suser) {
6125 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6126 /* setgid? */
6127 if (vap->va_mode & S_ISGID) {
6128 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6129 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6130 goto out;
6131 }
6132 if (!ismember) {
6133 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6134 error = EPERM;
6135 goto out;
6136 }
6137 }
6138
6139 /* setuid? */
6140 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6141 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6142 error = EPERM;
6143 goto out;
6144 }
6145 }
6146 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6147 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6148 error = EPERM;
6149 goto out;
6150 }
6151 if (!defaulted_group) {
6152 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6153 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6154 goto out;
6155 }
6156 if (!ismember) {
6157 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6158 error = EPERM;
6159 goto out;
6160 }
6161 }
6162
6163 /* initialising owner/group UUID */
6164 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6165 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6166 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6167 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6168 goto out;
6169 }
6170 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6171 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6172 error = EPERM;
6173 goto out;
6174 }
6175 }
6176 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6177 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6178 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6179 goto out;
6180 }
6181 if (!ismember) {
6182 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6183 error = EPERM;
6184 goto out;
6185 }
6186 }
6187 }
6188 out:
6189 return(error);
6190 }
6191
6192 /*
6193 * Check that the attribute information in vap can be legally written by the
6194 * context.
6195 *
6196 * Call this when you're not sure about the vnode_attr; either its contents
6197 * have come from an unknown source, or when they are variable.
6198 *
6199 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6200 * must be authorized to be permitted to write the vattr.
6201 */
6202 int
6203 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6204 {
6205 struct vnode_attr ova;
6206 kauth_action_t required_action;
6207 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6208 guid_t changer;
6209 gid_t group;
6210 uid_t owner;
6211 mode_t newmode;
6212 kauth_cred_t cred;
6213 uint32_t fdelta;
6214
6215 VATTR_INIT(&ova);
6216 required_action = 0;
6217 error = 0;
6218
6219 /*
6220 * Quickly check for enforcement applicability.
6221 */
6222 if (vfs_authopaque(vp->v_mount))
6223 goto out;
6224
6225 /*
6226 * Check for attempts to set nonsensical fields.
6227 */
6228 if (vap->va_active & VNODE_ATTR_RDONLY) {
6229 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
6230 error = EINVAL;
6231 goto out;
6232 }
6233
6234 /*
6235 * We need to know if the caller is the superuser.
6236 */
6237 cred = vfs_context_ucred(ctx);
6238 has_priv_suser = kauth_cred_issuser(cred);
6239
6240 /*
6241 * If any of the following are changing, we need information from the old file:
6242 * va_uid
6243 * va_gid
6244 * va_mode
6245 * va_uuuid
6246 * va_guuid
6247 */
6248 if (VATTR_IS_ACTIVE(vap, va_uid) ||
6249 VATTR_IS_ACTIVE(vap, va_gid) ||
6250 VATTR_IS_ACTIVE(vap, va_mode) ||
6251 VATTR_IS_ACTIVE(vap, va_uuuid) ||
6252 VATTR_IS_ACTIVE(vap, va_guuid)) {
6253 VATTR_WANTED(&ova, va_mode);
6254 VATTR_WANTED(&ova, va_uid);
6255 VATTR_WANTED(&ova, va_gid);
6256 VATTR_WANTED(&ova, va_uuuid);
6257 VATTR_WANTED(&ova, va_guuid);
6258 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
6259 }
6260
6261 /*
6262 * If timestamps are being changed, we need to know who the file is owned
6263 * by.
6264 */
6265 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6266 VATTR_IS_ACTIVE(vap, va_change_time) ||
6267 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6268 VATTR_IS_ACTIVE(vap, va_access_time) ||
6269 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6270
6271 VATTR_WANTED(&ova, va_uid);
6272 #if 0 /* enable this when we support UUIDs as official owners */
6273 VATTR_WANTED(&ova, va_uuuid);
6274 #endif
6275 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
6276 }
6277
6278 /*
6279 * If flags are being changed, we need the old flags.
6280 */
6281 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6282 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
6283 VATTR_WANTED(&ova, va_flags);
6284 }
6285
6286 /*
6287 * If the size is being set, make sure it's not a directory.
6288 */
6289 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6290 /* size is meaningless on a directory, don't permit this */
6291 if (vnode_isdir(vp)) {
6292 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
6293 error = EISDIR;
6294 goto out;
6295 }
6296 }
6297
6298 /*
6299 * Get old data.
6300 */
6301 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
6302 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
6303 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
6304 goto out;
6305 }
6306
6307 /*
6308 * Size changes require write access to the file data.
6309 */
6310 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6311 /* if we can't get the size, or it's different, we need write access */
6312 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
6313 required_action |= KAUTH_VNODE_WRITE_DATA;
6314 }
6315
6316 /*
6317 * Changing timestamps?
6318 *
6319 * Note that we are only called to authorize user-requested time changes;
6320 * side-effect time changes are not authorized. Authorisation is only
6321 * required for existing files.
6322 *
6323 * Non-owners are not permitted to change the time on an existing
6324 * file to anything other than the current time.
6325 */
6326 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6327 VATTR_IS_ACTIVE(vap, va_change_time) ||
6328 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6329 VATTR_IS_ACTIVE(vap, va_access_time) ||
6330 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6331 /*
6332 * The owner and root may set any timestamps they like,
6333 * provided that the file is not immutable. The owner still needs
6334 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
6335 */
6336 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
6337 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
6338 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
6339 } else {
6340 /* just setting the current time? */
6341 if (vap->va_vaflags & VA_UTIMES_NULL) {
6342 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
6343 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6344 } else {
6345 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
6346 error = EACCES;
6347 goto out;
6348 }
6349 }
6350 }
6351
6352 /*
6353 * Changing file mode?
6354 */
6355 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
6356 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
6357
6358 /*
6359 * Mode changes always have the same basic auth requirements.
6360 */
6361 if (has_priv_suser) {
6362 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
6363 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6364 } else {
6365 /* need WRITE_SECURITY */
6366 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
6367 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6368 }
6369
6370 /*
6371 * Can't set the setgid bit if you're not in the group and not root. Have to have
6372 * existing group information in the case we're not setting it right now.
6373 */
6374 if (vap->va_mode & S_ISGID) {
6375 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6376 if (!has_priv_suser) {
6377 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6378 group = vap->va_gid;
6379 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
6380 group = ova.va_gid;
6381 } else {
6382 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
6383 error = EINVAL;
6384 goto out;
6385 }
6386 /*
6387 * This might be too restrictive; WRITE_SECURITY might be implied by
6388 * membership in this case, rather than being an additional requirement.
6389 */
6390 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
6391 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6392 goto out;
6393 }
6394 if (!ismember) {
6395 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
6396 error = EPERM;
6397 goto out;
6398 }
6399 }
6400 }
6401
6402 /*
6403 * Can't set the setuid bit unless you're root or the file's owner.
6404 */
6405 if (vap->va_mode & S_ISUID) {
6406 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6407 if (!has_priv_suser) {
6408 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6409 owner = vap->va_uid;
6410 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
6411 owner = ova.va_uid;
6412 } else {
6413 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
6414 error = EINVAL;
6415 goto out;
6416 }
6417 if (owner != kauth_cred_getuid(cred)) {
6418 /*
6419 * We could allow this if WRITE_SECURITY is permitted, perhaps.
6420 */
6421 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6422 error = EPERM;
6423 goto out;
6424 }
6425 }
6426 }
6427 }
6428
6429 /*
6430 * Validate/mask flags changes. This checks that only the flags in
6431 * the UF_SETTABLE mask are being set, and preserves the flags in
6432 * the SF_SETTABLE case.
6433 *
6434 * Since flags changes may be made in conjunction with other changes,
6435 * we will ask the auth code to ignore immutability in the case that
6436 * the SF_* flags are not set and we are only manipulating the file flags.
6437 *
6438 */
6439 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6440 /* compute changing flags bits */
6441 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
6442 fdelta = vap->va_flags ^ ova.va_flags;
6443 } else {
6444 fdelta = vap->va_flags;
6445 }
6446
6447 if (fdelta != 0) {
6448 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
6449 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6450
6451 /* check that changing bits are legal */
6452 if (has_priv_suser) {
6453 /*
6454 * The immutability check will prevent us from clearing the SF_*
6455 * flags unless the system securelevel permits it, so just check
6456 * for legal flags here.
6457 */
6458 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
6459 error = EPERM;
6460 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6461 goto out;
6462 }
6463 } else {
6464 if (fdelta & ~UF_SETTABLE) {
6465 error = EPERM;
6466 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6467 goto out;
6468 }
6469 }
6470 /*
6471 * If the caller has the ability to manipulate file flags,
6472 * security is not reduced by ignoring them for this operation.
6473 *
6474 * A more complete test here would consider the 'after' states of the flags
6475 * to determine whether it would permit the operation, but this becomes
6476 * very complex.
6477 *
6478 * Ignoring immutability is conditional on securelevel; this does not bypass
6479 * the SF_* flags if securelevel > 0.
6480 */
6481 required_action |= KAUTH_VNODE_NOIMMUTABLE;
6482 }
6483 }
6484
6485 /*
6486 * Validate ownership information.
6487 */
6488 chowner = 0;
6489 chgroup = 0;
6490 clear_suid = 0;
6491 clear_sgid = 0;
6492
6493 /*
6494 * uid changing
6495 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
6496 * support them in general, and will ignore it if/when we try to set it.
6497 * We might want to clear the uid out of vap completely here.
6498 */
6499 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6500 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
6501 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
6502 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
6503 error = EPERM;
6504 goto out;
6505 }
6506 chowner = 1;
6507 }
6508 clear_suid = 1;
6509 }
6510
6511 /*
6512 * gid changing
6513 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
6514 * support them in general, and will ignore it if/when we try to set it.
6515 * We might want to clear the gid out of vap completely here.
6516 */
6517 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6518 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
6519 if (!has_priv_suser) {
6520 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6521 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6522 goto out;
6523 }
6524 if (!ismember) {
6525 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
6526 ova.va_gid, vap->va_gid);
6527 error = EPERM;
6528 goto out;
6529 }
6530 }
6531 chgroup = 1;
6532 }
6533 clear_sgid = 1;
6534 }
6535
6536 /*
6537 * Owner UUID being set or changed.
6538 */
6539 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6540 /* if the owner UUID is not actually changing ... */
6541 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
6542 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
6543 goto no_uuuid_change;
6544
6545 /*
6546 * If the current owner UUID is a null GUID, check
6547 * it against the UUID corresponding to the owner UID.
6548 */
6549 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
6550 VATTR_IS_SUPPORTED(&ova, va_uid)) {
6551 guid_t uid_guid;
6552
6553 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
6554 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
6555 goto no_uuuid_change;
6556 }
6557 }
6558
6559 /*
6560 * The owner UUID cannot be set by a non-superuser to anything other than
6561 * their own or a null GUID (to "unset" the owner UUID).
6562 * Note that file systems must be prepared to handle the
6563 * null UUID case in a manner appropriate for that file
6564 * system.
6565 */
6566 if (!has_priv_suser) {
6567 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6568 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6569 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
6570 goto out;
6571 }
6572 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
6573 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
6574 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
6575 error = EPERM;
6576 goto out;
6577 }
6578 }
6579 chowner = 1;
6580 clear_suid = 1;
6581 }
6582 no_uuuid_change:
6583 /*
6584 * Group UUID being set or changed.
6585 */
6586 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6587 /* if the group UUID is not actually changing ... */
6588 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
6589 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
6590 goto no_guuid_change;
6591
6592 /*
6593 * If the current group UUID is a null UUID, check
6594 * it against the UUID corresponding to the group GID.
6595 */
6596 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
6597 VATTR_IS_SUPPORTED(&ova, va_gid)) {
6598 guid_t gid_guid;
6599
6600 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
6601 kauth_guid_equal(&vap->va_guuid, &gid_guid))
6602 goto no_guuid_change;
6603 }
6604 }
6605
6606 /*
6607 * The group UUID cannot be set by a non-superuser to anything other than
6608 * one of which they are a member or a null GUID (to "unset"
6609 * the group UUID).
6610 * Note that file systems must be prepared to handle the
6611 * null UUID case in a manner appropriate for that file
6612 * system.
6613 */
6614 if (!has_priv_suser) {
6615 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
6616 ismember = 1;
6617 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6618 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6619 goto out;
6620 }
6621 if (!ismember) {
6622 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
6623 error = EPERM;
6624 goto out;
6625 }
6626 }
6627 chgroup = 1;
6628 }
6629 no_guuid_change:
6630
6631 /*
6632 * Compute authorisation for group/ownership changes.
6633 */
6634 if (chowner || chgroup || clear_suid || clear_sgid) {
6635 if (has_priv_suser) {
6636 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
6637 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6638 } else {
6639 if (chowner) {
6640 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
6641 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
6642 }
6643 if (chgroup && !chowner) {
6644 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
6645 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6646 }
6647
6648 /* clear set-uid and set-gid bits as required by Posix */
6649 if (VATTR_IS_ACTIVE(vap, va_mode)) {
6650 newmode = vap->va_mode;
6651 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
6652 newmode = ova.va_mode;
6653 } else {
6654 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
6655 newmode = 0;
6656 }
6657 if (newmode & (S_ISUID | S_ISGID)) {
6658 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
6659 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
6660 }
6661 }
6662 }
6663
6664 /*
6665 * Authorise changes in the ACL.
6666 */
6667 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6668
6669 /* no existing ACL */
6670 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
6671
6672 /* adding an ACL */
6673 if (vap->va_acl != NULL) {
6674 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6675 KAUTH_DEBUG("CHMOD - adding ACL");
6676 }
6677
6678 /* removing an existing ACL */
6679 } else if (vap->va_acl == NULL) {
6680 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6681 KAUTH_DEBUG("CHMOD - removing ACL");
6682
6683 /* updating an existing ACL */
6684 } else {
6685 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
6686 /* entry count changed, must be different */
6687 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6688 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
6689 } else if (vap->va_acl->acl_entrycount > 0) {
6690 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
6691 if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
6692 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
6693 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6694 KAUTH_DEBUG("CHMOD - changing ACL entries");
6695 }
6696 }
6697 }
6698 }
6699
6700 /*
6701 * Other attributes that require authorisation.
6702 */
6703 if (VATTR_IS_ACTIVE(vap, va_encoding))
6704 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6705
6706 out:
6707 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
6708 kauth_acl_free(ova.va_acl);
6709 if (error == 0)
6710 *actionp = required_action;
6711 return(error);
6712 }
6713
6714
6715 void
6716 vfs_setlocklocal(mount_t mp)
6717 {
6718 vnode_t vp;
6719
6720 mount_lock(mp);
6721 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
6722
6723 /*
6724 * We do not expect anyone to be using any vnodes at the
6725 * time this routine is called. So no need for vnode locking
6726 */
6727 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
6728 vp->v_flag |= VLOCKLOCAL;
6729 }
6730 TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) {
6731 vp->v_flag |= VLOCKLOCAL;
6732 }
6733 TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) {
6734 vp->v_flag |= VLOCKLOCAL;
6735 }
6736 mount_unlock(mp);
6737 }
6738
6739 void
6740 vn_setunionwait(vnode_t vp)
6741 {
6742 vnode_lock_spin(vp);
6743 vp->v_flag |= VISUNION;
6744 vnode_unlock(vp);
6745 }
6746
6747
6748 void
6749 vn_checkunionwait(vnode_t vp)
6750 {
6751 vnode_lock_spin(vp);
6752 while ((vp->v_flag & VISUNION) == VISUNION)
6753 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
6754 vnode_unlock(vp);
6755 }
6756
6757 void
6758 vn_clearunionwait(vnode_t vp, int locked)
6759 {
6760 if (!locked)
6761 vnode_lock_spin(vp);
6762 if((vp->v_flag & VISUNION) == VISUNION) {
6763 vp->v_flag &= ~VISUNION;
6764 wakeup((caddr_t)&vp->v_flag);
6765 }
6766 if (!locked)
6767 vnode_unlock(vp);
6768 }
6769
6770 /*
6771 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
6772 */
6773 extern int thread_notrigger(void);
6774
6775 int
6776 thread_notrigger(void)
6777 {
6778 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
6779 return (uth->uu_notrigger);
6780 }
6781
6782 /*
6783 * Removes orphaned apple double files during a rmdir
6784 * Works by:
6785 * 1. vnode_suspend().
6786 * 2. Call VNOP_READDIR() till the end of directory is reached.
6787 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
6788 * 4. Continue (2) and (3) till end of directory is reached.
6789 * 5. If all the entries in the directory were files with "._" name, delete all the files.
6790 * 6. vnode_resume()
6791 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
6792 */
6793
6794 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
6795 {
6796
6797 #define UIO_BUFF_SIZE 2048
6798 uio_t auio = NULL;
6799 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
6800 int open_flag = 0, full_erase_flag = 0;
6801 char uio_buf[ UIO_SIZEOF(1) ];
6802 char *rbuf = NULL, *cpos, *cend;
6803 struct nameidata nd_temp;
6804 struct dirent *dp;
6805 errno_t error;
6806
6807 error = vnode_suspend(vp);
6808
6809 /*
6810 * restart_flag is set so that the calling rmdir sleeps and resets
6811 */
6812 if (error == EBUSY)
6813 *restart_flag = 1;
6814 if (error != 0)
6815 goto outsc;
6816
6817 /*
6818 * set up UIO
6819 */
6820 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
6821 if (rbuf)
6822 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
6823 &uio_buf[0], sizeof(uio_buf));
6824 if (!rbuf || !auio) {
6825 error = ENOMEM;
6826 goto outsc;
6827 }
6828
6829 uio_setoffset(auio,0);
6830
6831 eofflag = 0;
6832
6833 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
6834 goto outsc;
6835 else
6836 open_flag = 1;
6837
6838 /*
6839 * First pass checks if all files are appleDouble files.
6840 */
6841
6842 do {
6843 siz = UIO_BUFF_SIZE;
6844 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
6845 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
6846
6847 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
6848 goto outsc;
6849
6850 if (uio_resid(auio) != 0)
6851 siz -= uio_resid(auio);
6852
6853 /*
6854 * Iterate through directory
6855 */
6856 cpos = rbuf;
6857 cend = rbuf + siz;
6858 dp = (struct dirent*) cpos;
6859
6860 if (cpos == cend)
6861 eofflag = 1;
6862
6863 while ((cpos < cend)) {
6864 /*
6865 * Check for . and .. as well as directories
6866 */
6867 if (dp->d_ino != 0 &&
6868 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
6869 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
6870 /*
6871 * Check for irregular files and ._ files
6872 * If there is a ._._ file abort the op
6873 */
6874 if ( dp->d_namlen < 2 ||
6875 strncmp(dp->d_name,"._",2) ||
6876 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
6877 error = ENOTEMPTY;
6878 goto outsc;
6879 }
6880 }
6881 cpos += dp->d_reclen;
6882 dp = (struct dirent*)cpos;
6883 }
6884
6885 /*
6886 * workaround for HFS/NFS setting eofflag before end of file
6887 */
6888 if (vp->v_tag == VT_HFS && nentries > 2)
6889 eofflag=0;
6890
6891 if (vp->v_tag == VT_NFS) {
6892 if (eofflag && !full_erase_flag) {
6893 full_erase_flag = 1;
6894 eofflag = 0;
6895 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6896 }
6897 else if (!eofflag && full_erase_flag)
6898 full_erase_flag = 0;
6899 }
6900
6901 } while (!eofflag);
6902 /*
6903 * If we've made it here all the files in the dir are ._ files.
6904 * We can delete the files even though the node is suspended
6905 * because we are the owner of the file.
6906 */
6907
6908 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6909 eofflag = 0;
6910 full_erase_flag = 0;
6911
6912 do {
6913 siz = UIO_BUFF_SIZE;
6914 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
6915 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
6916
6917 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
6918
6919 if (error != 0)
6920 goto outsc;
6921
6922 if (uio_resid(auio) != 0)
6923 siz -= uio_resid(auio);
6924
6925 /*
6926 * Iterate through directory
6927 */
6928 cpos = rbuf;
6929 cend = rbuf + siz;
6930 dp = (struct dirent*) cpos;
6931
6932 if (cpos == cend)
6933 eofflag = 1;
6934
6935 while ((cpos < cend)) {
6936 /*
6937 * Check for . and .. as well as directories
6938 */
6939 if (dp->d_ino != 0 &&
6940 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
6941 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
6942 ) {
6943
6944 NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
6945 nd_temp.ni_dvp = vp;
6946 error = unlink1(ctx, &nd_temp, 0);
6947 if (error && error != ENOENT) {
6948 goto outsc;
6949 }
6950 }
6951 cpos += dp->d_reclen;
6952 dp = (struct dirent*)cpos;
6953 }
6954
6955 /*
6956 * workaround for HFS/NFS setting eofflag before end of file
6957 */
6958 if (vp->v_tag == VT_HFS && nentries > 2)
6959 eofflag=0;
6960
6961 if (vp->v_tag == VT_NFS) {
6962 if (eofflag && !full_erase_flag) {
6963 full_erase_flag = 1;
6964 eofflag = 0;
6965 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6966 }
6967 else if (!eofflag && full_erase_flag)
6968 full_erase_flag = 0;
6969 }
6970
6971 } while (!eofflag);
6972
6973
6974 error = 0;
6975
6976 outsc:
6977 if (open_flag)
6978 VNOP_CLOSE(vp, FREAD, ctx);
6979
6980 uio_free(auio);
6981 FREE(rbuf, M_TEMP);
6982
6983 vnode_resume(vp);
6984
6985
6986 return(error);
6987
6988 }
6989
6990
6991 void
6992 lock_vnode_and_post(vnode_t vp, int kevent_num)
6993 {
6994 /* Only take the lock if there's something there! */
6995 if (vp->v_knotes.slh_first != NULL) {
6996 vnode_lock(vp);
6997 KNOTE(&vp->v_knotes, kevent_num);
6998 vnode_unlock(vp);
6999 }
7000 }
7001
7002 #ifdef JOE_DEBUG
7003 static void record_vp(vnode_t vp, int count) {
7004 struct uthread *ut;
7005 int i;
7006
7007 if ((vp->v_flag & VSYSTEM))
7008 return;
7009
7010 ut = get_bsdthread_info(current_thread());
7011 ut->uu_iocount += count;
7012
7013 if (ut->uu_vpindex < 32) {
7014 for (i = 0; i < ut->uu_vpindex; i++) {
7015 if (ut->uu_vps[i] == vp)
7016 return;
7017 }
7018 ut->uu_vps[ut->uu_vpindex] = vp;
7019 ut->uu_vpindex++;
7020 }
7021 }
7022 #endif