]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
9f528d32e1e6a6f100d262b38577e44a10d53936
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/kern_memorystatus.h>
109 #include <miscfs/fifofs/fifo.h>
110
111 #include <string.h>
112 #include <machine/spl.h>
113
114
115 #include <kern/assert.h>
116
117 #include <miscfs/specfs/specdev.h>
118
119 #include <mach/mach_types.h>
120 #include <mach/memory_object_types.h>
121
122 #include <kern/kalloc.h> /* kalloc()/kfree() */
123 #include <kern/clock.h> /* delay_for_interval() */
124 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
125
126
127 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #endif
132
133 extern lck_grp_t *vnode_lck_grp;
134 extern lck_attr_t *vnode_lck_attr;
135
136
137 extern lck_mtx_t * mnt_list_mtx_lock;
138
139 enum vtype iftovt_tab[16] = {
140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[9] = {
144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 S_IFSOCK, S_IFIFO, S_IFMT,
146 };
147
148 /* XXX next protptype should be from <nfs/nfs.h> */
149 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
150
151 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
152 __private_extern__ void qsort(
153 void * array,
154 size_t nmembers,
155 size_t member_size,
156 int (*)(const void *, const void *));
157
158 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
159 __private_extern__ void vntblinit(void);
160 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
161 unsigned int val2);
162 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
163
164 extern int system_inshutdown;
165
166 static void vnode_list_add(vnode_t);
167 static void vnode_list_remove(vnode_t);
168 static void vnode_list_remove_locked(vnode_t);
169
170 static errno_t vnode_drain(vnode_t);
171 static void vgone(vnode_t, int flags);
172 static void vclean(vnode_t vp, int flag);
173 static void vnode_reclaim_internal(vnode_t, int, int, int);
174
175 static void vnode_dropiocount (vnode_t);
176 static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
177
178 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
179 static int vnode_reload(vnode_t);
180 static int vnode_isinuse_locked(vnode_t, int, int);
181
182 static void insmntque(vnode_t vp, mount_t mp);
183 static int mount_getvfscnt(void);
184 static int mount_fillfsids(fsid_t *, int );
185 static void vnode_iterate_setup(mount_t);
186 int vnode_umount_preflight(mount_t, vnode_t, int);
187 static int vnode_iterate_prepare(mount_t);
188 static int vnode_iterate_reloadq(mount_t);
189 static void vnode_iterate_clear(mount_t);
190 static mount_t vfs_getvfs_locked(fsid_t *);
191
192 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
193
194 #ifdef JOE_DEBUG
195 static void record_vp(vnode_t vp, int count);
196 #endif
197
198 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
199 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
200
201 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
202 struct timeval rage_tv;
203 int rage_limit = 0;
204 int ragevnodes = 0;
205
206 #define RAGE_LIMIT_MIN 100
207 #define RAGE_TIME_LIMIT 5
208
209 struct mntlist mountlist; /* mounted filesystem list */
210 static int nummounts = 0;
211
212 #if DIAGNOSTIC
213 #define VLISTCHECK(fun, vp, list) \
214 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
215 panic("%s: %s vnode not on %slist", (fun), (list), (list));
216 #else
217 #define VLISTCHECK(fun, vp, list)
218 #endif /* DIAGNOSTIC */
219
220 #define VLISTNONE(vp) \
221 do { \
222 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
223 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
224 } while(0)
225
226 #define VONLIST(vp) \
227 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
228
229 /* remove a vnode from free vnode list */
230 #define VREMFREE(fun, vp) \
231 do { \
232 VLISTCHECK((fun), (vp), "free"); \
233 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
234 VLISTNONE((vp)); \
235 freevnodes--; \
236 } while(0)
237
238
239
240 /* remove a vnode from dead vnode list */
241 #define VREMDEAD(fun, vp) \
242 do { \
243 VLISTCHECK((fun), (vp), "dead"); \
244 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
245 VLISTNONE((vp)); \
246 vp->v_listflag &= ~VLIST_DEAD; \
247 deadvnodes--; \
248 } while(0)
249
250
251 /* remove a vnode from rage vnode list */
252 #define VREMRAGE(fun, vp) \
253 do { \
254 if ( !(vp->v_listflag & VLIST_RAGE)) \
255 panic("VREMRAGE: vp not on rage list"); \
256 VLISTCHECK((fun), (vp), "rage"); \
257 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
258 VLISTNONE((vp)); \
259 vp->v_listflag &= ~VLIST_RAGE; \
260 ragevnodes--; \
261 } while(0)
262
263
264 /*
265 * vnodetarget hasn't been used in a long time, but
266 * it was exported for some reason... I'm leaving in
267 * place for now... it should be deprecated out of the
268 * exports and removed eventually.
269 */
270 u_int32_t vnodetarget; /* target for vnreclaim() */
271 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
272
273 /*
274 * We need quite a few vnodes on the free list to sustain the
275 * rapid stat() the compilation process does, and still benefit from the name
276 * cache. Having too few vnodes on the free list causes serious disk
277 * thrashing as we cycle through them.
278 */
279 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
280
281 /*
282 * Initialize the vnode management data structures.
283 */
284 __private_extern__ void
285 vntblinit(void)
286 {
287 TAILQ_INIT(&vnode_free_list);
288 TAILQ_INIT(&vnode_rage_list);
289 TAILQ_INIT(&vnode_dead_list);
290 TAILQ_INIT(&mountlist);
291
292 if (!vnodetarget)
293 vnodetarget = VNODE_FREE_TARGET;
294
295 microuptime(&rage_tv);
296 rage_limit = desiredvnodes / 100;
297
298 if (rage_limit < RAGE_LIMIT_MIN)
299 rage_limit = RAGE_LIMIT_MIN;
300
301 /*
302 * Scale the vm_object_cache to accomodate the vnodes
303 * we want to cache
304 */
305 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
306 }
307
308 /* Reset the VM Object Cache with the values passed in */
309 __private_extern__ kern_return_t
310 reset_vmobjectcache(unsigned int val1, unsigned int val2)
311 {
312 vm_size_t oval = val1 - VNODE_FREE_MIN;
313 vm_size_t nval;
314
315 if (val1 == val2) {
316 return KERN_SUCCESS;
317 }
318
319 if(val2 < VNODE_FREE_MIN)
320 nval = 0;
321 else
322 nval = val2 - VNODE_FREE_MIN;
323
324 return(adjust_vm_object_cache(oval, nval));
325 }
326
327
328 /* the timeout is in 10 msecs */
329 int
330 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
331 int error = 0;
332 struct timespec ts;
333
334 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
335
336 if (vp->v_numoutput > output_target) {
337
338 slpflag |= PDROP;
339
340 vnode_lock_spin(vp);
341
342 while ((vp->v_numoutput > output_target) && error == 0) {
343 if (output_target)
344 vp->v_flag |= VTHROTTLED;
345 else
346 vp->v_flag |= VBWAIT;
347
348 ts.tv_sec = (slptimeout/100);
349 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
350 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
351
352 vnode_lock_spin(vp);
353 }
354 vnode_unlock(vp);
355 }
356 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
357
358 return error;
359 }
360
361
362 void
363 vnode_startwrite(vnode_t vp) {
364
365 OSAddAtomic(1, &vp->v_numoutput);
366 }
367
368
369 void
370 vnode_writedone(vnode_t vp)
371 {
372 if (vp) {
373 OSAddAtomic(-1, &vp->v_numoutput);
374
375 if (vp->v_numoutput <= 1) {
376 int need_wakeup = 0;
377
378 vnode_lock_spin(vp);
379
380 if (vp->v_numoutput < 0)
381 panic("vnode_writedone: numoutput < 0");
382
383 if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) {
384 vp->v_flag &= ~VTHROTTLED;
385 need_wakeup = 1;
386 }
387 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
388 vp->v_flag &= ~VBWAIT;
389 need_wakeup = 1;
390 }
391 vnode_unlock(vp);
392
393 if (need_wakeup)
394 wakeup((caddr_t)&vp->v_numoutput);
395 }
396 }
397 }
398
399
400
401 int
402 vnode_hasdirtyblks(vnode_t vp)
403 {
404 struct cl_writebehind *wbp;
405
406 /*
407 * Not taking the buf_mtxp as there is little
408 * point doing it. Even if the lock is taken the
409 * state can change right after that. If their
410 * needs to be a synchronization, it must be driven
411 * by the caller
412 */
413 if (vp->v_dirtyblkhd.lh_first)
414 return (1);
415
416 if (!UBCINFOEXISTS(vp))
417 return (0);
418
419 wbp = vp->v_ubcinfo->cl_wbehind;
420
421 if (wbp && (wbp->cl_number || wbp->cl_scmap))
422 return (1);
423
424 return (0);
425 }
426
427 int
428 vnode_hascleanblks(vnode_t vp)
429 {
430 /*
431 * Not taking the buf_mtxp as there is little
432 * point doing it. Even if the lock is taken the
433 * state can change right after that. If their
434 * needs to be a synchronization, it must be driven
435 * by the caller
436 */
437 if (vp->v_cleanblkhd.lh_first)
438 return (1);
439 return (0);
440 }
441
442 void
443 vnode_iterate_setup(mount_t mp)
444 {
445 while (mp->mnt_lflag & MNT_LITER) {
446 mp->mnt_lflag |= MNT_LITERWAIT;
447 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
448 }
449
450 mp->mnt_lflag |= MNT_LITER;
451
452 }
453
454 int
455 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
456 {
457 vnode_t vp;
458
459 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
460 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
461 if (vp->v_tag == VT_UDF)
462 return 0;
463 if (vp->v_type == VDIR)
464 continue;
465 if (vp == skipvp)
466 continue;
467 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
468 (vp->v_flag & VNOFLUSH)))
469 continue;
470 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
471 continue;
472 if ((flags & WRITECLOSE) &&
473 (vp->v_writecount == 0 || vp->v_type != VREG))
474 continue;
475 /* Look for busy vnode */
476 if (((vp->v_usecount != 0) &&
477 ((vp->v_usecount - vp->v_kusecount) != 0)))
478 return(1);
479 }
480
481 return(0);
482 }
483
484 /*
485 * This routine prepares iteration by moving all the vnodes to worker queue
486 * called with mount lock held
487 */
488 int
489 vnode_iterate_prepare(mount_t mp)
490 {
491 vnode_t vp;
492
493 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
494 /* nothing to do */
495 return (0);
496 }
497
498 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
499 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
500 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
501 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
502
503 TAILQ_INIT(&mp->mnt_vnodelist);
504 if (mp->mnt_newvnodes.tqh_first != NULL)
505 panic("vnode_iterate_prepare: newvnode when entering vnode");
506 TAILQ_INIT(&mp->mnt_newvnodes);
507
508 return (1);
509 }
510
511
512 /* called with mount lock held */
513 int
514 vnode_iterate_reloadq(mount_t mp)
515 {
516 int moved = 0;
517
518 /* add the remaining entries in workerq to the end of mount vnode list */
519 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
520 struct vnode * mvp;
521 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
522
523 /* Joining the workerque entities to mount vnode list */
524 if (mvp)
525 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
526 else
527 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
528 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
529 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
530 TAILQ_INIT(&mp->mnt_workerqueue);
531 }
532
533 /* add the newvnodes to the head of mount vnode list */
534 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
535 struct vnode * nlvp;
536 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
537
538 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
539 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
540 if(mp->mnt_vnodelist.tqh_first)
541 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
542 else
543 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
544 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
545 TAILQ_INIT(&mp->mnt_newvnodes);
546 moved = 1;
547 }
548
549 return(moved);
550 }
551
552
553 void
554 vnode_iterate_clear(mount_t mp)
555 {
556 mp->mnt_lflag &= ~MNT_LITER;
557 if (mp->mnt_lflag & MNT_LITERWAIT) {
558 mp->mnt_lflag &= ~MNT_LITERWAIT;
559 wakeup(mp);
560 }
561 }
562
563
564 int
565 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
566 void *arg)
567 {
568 struct vnode *vp;
569 int vid, retval;
570 int ret = 0;
571
572 mount_lock(mp);
573
574 vnode_iterate_setup(mp);
575
576 /* it is returns 0 then there is nothing to do */
577 retval = vnode_iterate_prepare(mp);
578
579 if (retval == 0) {
580 vnode_iterate_clear(mp);
581 mount_unlock(mp);
582 return(ret);
583 }
584
585 /* iterate over all the vnodes */
586 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
587 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
588 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
589 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
590 vid = vp->v_id;
591 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
592 continue;
593 }
594 mount_unlock(mp);
595
596 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
597 mount_lock(mp);
598 continue;
599 }
600 if (flags & VNODE_RELOAD) {
601 /*
602 * we're reloading the filesystem
603 * cast out any inactive vnodes...
604 */
605 if (vnode_reload(vp)) {
606 /* vnode will be recycled on the refcount drop */
607 vnode_put(vp);
608 mount_lock(mp);
609 continue;
610 }
611 }
612
613 retval = callout(vp, arg);
614
615 switch (retval) {
616 case VNODE_RETURNED:
617 case VNODE_RETURNED_DONE:
618 vnode_put(vp);
619 if (retval == VNODE_RETURNED_DONE) {
620 mount_lock(mp);
621 ret = 0;
622 goto out;
623 }
624 break;
625
626 case VNODE_CLAIMED_DONE:
627 mount_lock(mp);
628 ret = 0;
629 goto out;
630 case VNODE_CLAIMED:
631 default:
632 break;
633 }
634 mount_lock(mp);
635 }
636
637 out:
638 (void)vnode_iterate_reloadq(mp);
639 vnode_iterate_clear(mp);
640 mount_unlock(mp);
641 return (ret);
642 }
643
644 void
645 mount_lock_renames(mount_t mp)
646 {
647 lck_mtx_lock(&mp->mnt_renamelock);
648 }
649
650 void
651 mount_unlock_renames(mount_t mp)
652 {
653 lck_mtx_unlock(&mp->mnt_renamelock);
654 }
655
656 void
657 mount_lock(mount_t mp)
658 {
659 lck_mtx_lock(&mp->mnt_mlock);
660 }
661
662 void
663 mount_lock_spin(mount_t mp)
664 {
665 lck_mtx_lock_spin(&mp->mnt_mlock);
666 }
667
668 void
669 mount_unlock(mount_t mp)
670 {
671 lck_mtx_unlock(&mp->mnt_mlock);
672 }
673
674
675 void
676 mount_ref(mount_t mp, int locked)
677 {
678 if ( !locked)
679 mount_lock_spin(mp);
680
681 mp->mnt_count++;
682
683 if ( !locked)
684 mount_unlock(mp);
685 }
686
687
688 void
689 mount_drop(mount_t mp, int locked)
690 {
691 if ( !locked)
692 mount_lock_spin(mp);
693
694 mp->mnt_count--;
695
696 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
697 wakeup(&mp->mnt_lflag);
698
699 if ( !locked)
700 mount_unlock(mp);
701 }
702
703
704 int
705 mount_iterref(mount_t mp, int locked)
706 {
707 int retval = 0;
708
709 if (!locked)
710 mount_list_lock();
711 if (mp->mnt_iterref < 0) {
712 retval = 1;
713 } else {
714 mp->mnt_iterref++;
715 }
716 if (!locked)
717 mount_list_unlock();
718 return(retval);
719 }
720
721 int
722 mount_isdrained(mount_t mp, int locked)
723 {
724 int retval;
725
726 if (!locked)
727 mount_list_lock();
728 if (mp->mnt_iterref < 0)
729 retval = 1;
730 else
731 retval = 0;
732 if (!locked)
733 mount_list_unlock();
734 return(retval);
735 }
736
737 void
738 mount_iterdrop(mount_t mp)
739 {
740 mount_list_lock();
741 mp->mnt_iterref--;
742 wakeup(&mp->mnt_iterref);
743 mount_list_unlock();
744 }
745
746 void
747 mount_iterdrain(mount_t mp)
748 {
749 mount_list_lock();
750 while (mp->mnt_iterref)
751 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
752 /* mount iterations drained */
753 mp->mnt_iterref = -1;
754 mount_list_unlock();
755 }
756 void
757 mount_iterreset(mount_t mp)
758 {
759 mount_list_lock();
760 if (mp->mnt_iterref == -1)
761 mp->mnt_iterref = 0;
762 mount_list_unlock();
763 }
764
765 /* always called with mount lock held */
766 int
767 mount_refdrain(mount_t mp)
768 {
769 if (mp->mnt_lflag & MNT_LDRAIN)
770 panic("already in drain");
771 mp->mnt_lflag |= MNT_LDRAIN;
772
773 while (mp->mnt_count)
774 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
775
776 if (mp->mnt_vnodelist.tqh_first != NULL)
777 panic("mount_refdrain: dangling vnode");
778
779 mp->mnt_lflag &= ~MNT_LDRAIN;
780
781 return(0);
782 }
783
784
785 /*
786 * Mark a mount point as busy. Used to synchronize access and to delay
787 * unmounting.
788 */
789 int
790 vfs_busy(mount_t mp, int flags)
791 {
792
793 restart:
794 if (mp->mnt_lflag & MNT_LDEAD)
795 return(ENOENT);
796
797 if (mp->mnt_lflag & MNT_LUNMOUNT) {
798 if (flags & LK_NOWAIT)
799 return (ENOENT);
800
801 mount_lock(mp);
802
803 if (mp->mnt_lflag & MNT_LDEAD) {
804 mount_unlock(mp);
805 return(ENOENT);
806 }
807 if (mp->mnt_lflag & MNT_LUNMOUNT) {
808 mp->mnt_lflag |= MNT_LWAIT;
809 /*
810 * Since all busy locks are shared except the exclusive
811 * lock granted when unmounting, the only place that a
812 * wakeup needs to be done is at the release of the
813 * exclusive lock at the end of dounmount.
814 */
815 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
816 return (ENOENT);
817 }
818 mount_unlock(mp);
819 }
820
821 lck_rw_lock_shared(&mp->mnt_rwlock);
822
823 /*
824 * until we are granted the rwlock, it's possible for the mount point to
825 * change state, so reevaluate before granting the vfs_busy
826 */
827 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
828 lck_rw_done(&mp->mnt_rwlock);
829 goto restart;
830 }
831 return (0);
832 }
833
834 /*
835 * Free a busy filesystem.
836 */
837
838 void
839 vfs_unbusy(mount_t mp)
840 {
841 lck_rw_done(&mp->mnt_rwlock);
842 }
843
844
845
846 static void
847 vfs_rootmountfailed(mount_t mp) {
848
849 mount_list_lock();
850 mp->mnt_vtable->vfc_refcount--;
851 mount_list_unlock();
852
853 vfs_unbusy(mp);
854
855 mount_lock_destroy(mp);
856
857 #if CONFIG_MACF
858 mac_mount_label_destroy(mp);
859 #endif
860
861 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
862 }
863
864 /*
865 * Lookup a filesystem type, and if found allocate and initialize
866 * a mount structure for it.
867 *
868 * Devname is usually updated by mount(8) after booting.
869 */
870 static mount_t
871 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
872 {
873 mount_t mp;
874
875 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
876 bzero((char *)mp, sizeof(struct mount));
877
878 /* Initialize the default IO constraints */
879 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
880 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
881 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
882 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
883 mp->mnt_devblocksize = DEV_BSIZE;
884 mp->mnt_alignmentmask = PAGE_MASK;
885 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
886 mp->mnt_ioscale = 1;
887 mp->mnt_ioflags = 0;
888 mp->mnt_realrootvp = NULLVP;
889 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
890
891 mount_lock_init(mp);
892 (void)vfs_busy(mp, LK_NOWAIT);
893
894 TAILQ_INIT(&mp->mnt_vnodelist);
895 TAILQ_INIT(&mp->mnt_workerqueue);
896 TAILQ_INIT(&mp->mnt_newvnodes);
897
898 mp->mnt_vtable = vfsp;
899 mp->mnt_op = vfsp->vfc_vfsops;
900 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
901 mp->mnt_vnodecovered = NULLVP;
902 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
903 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
904
905 mount_list_lock();
906 vfsp->vfc_refcount++;
907 mount_list_unlock();
908
909 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
910 mp->mnt_vfsstat.f_mntonname[0] = '/';
911 /* XXX const poisoning layering violation */
912 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
913
914 #if CONFIG_MACF
915 mac_mount_label_init(mp);
916 mac_mount_label_associate(vfs_context_kernel(), mp);
917 #endif
918 return (mp);
919 }
920
921 errno_t
922 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
923 {
924 struct vfstable *vfsp;
925
926 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
927 if (!strncmp(vfsp->vfc_name, fstypename,
928 sizeof(vfsp->vfc_name)))
929 break;
930 if (vfsp == NULL)
931 return (ENODEV);
932
933 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
934
935 if (*mpp)
936 return (0);
937
938 return (ENOMEM);
939 }
940
941
942 /*
943 * Find an appropriate filesystem to use for the root. If a filesystem
944 * has not been preselected, walk through the list of known filesystems
945 * trying those that have mountroot routines, and try them until one
946 * works or we have tried them all.
947 */
948 extern int (*mountroot)(void);
949
950 int
951 vfs_mountroot(void)
952 {
953 #if CONFIG_MACF
954 struct vnode *vp;
955 #endif
956 struct vfstable *vfsp;
957 vfs_context_t ctx = vfs_context_kernel();
958 struct vfs_attr vfsattr;
959 int error;
960 mount_t mp;
961 vnode_t bdevvp_rootvp;
962
963 if (mountroot != NULL) {
964 /*
965 * used for netboot which follows a different set of rules
966 */
967 error = (*mountroot)();
968 return (error);
969 }
970 if ((error = bdevvp(rootdev, &rootvp))) {
971 printf("vfs_mountroot: can't setup bdevvp\n");
972 return (error);
973 }
974 /*
975 * 4951998 - code we call in vfc_mountroot may replace rootvp
976 * so keep a local copy for some house keeping.
977 */
978 bdevvp_rootvp = rootvp;
979
980 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
981 if (vfsp->vfc_mountroot == NULL)
982 continue;
983
984 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
985 mp->mnt_devvp = rootvp;
986
987 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
988 if ( bdevvp_rootvp != rootvp ) {
989 /*
990 * rootvp changed...
991 * bump the iocount and fix up mnt_devvp for the
992 * new rootvp (it will already have a usecount taken)...
993 * drop the iocount and the usecount on the orignal
994 * since we are no longer going to use it...
995 */
996 vnode_getwithref(rootvp);
997 mp->mnt_devvp = rootvp;
998
999 vnode_rele(bdevvp_rootvp);
1000 vnode_put(bdevvp_rootvp);
1001 }
1002 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1003
1004 vfs_unbusy(mp);
1005
1006 mount_list_add(mp);
1007
1008 /*
1009 * cache the IO attributes for the underlying physical media...
1010 * an error return indicates the underlying driver doesn't
1011 * support all the queries necessary... however, reasonable
1012 * defaults will have been set, so no reason to bail or care
1013 */
1014 vfs_init_io_attributes(rootvp, mp);
1015
1016 /*
1017 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1018 */
1019 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1020 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1021 }
1022 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1023 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1024 }
1025
1026 /*
1027 * Probe root file system for additional features.
1028 */
1029 (void)VFS_START(mp, 0, ctx);
1030
1031 VFSATTR_INIT(&vfsattr);
1032 VFSATTR_WANTED(&vfsattr, f_capabilities);
1033 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1034 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1035 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1036 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1037 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1038 }
1039 #if NAMEDSTREAMS
1040 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1041 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1042 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1043 }
1044 #endif
1045 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1046 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1047 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1048 }
1049 }
1050
1051 /*
1052 * get rid of iocount reference returned
1053 * by bdevvp (or picked up by us on the substitued
1054 * rootvp)... it (or we) will have also taken
1055 * a usecount reference which we want to keep
1056 */
1057 vnode_put(rootvp);
1058
1059 #if CONFIG_MACF
1060 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1061 return (0);
1062
1063 error = VFS_ROOT(mp, &vp, ctx);
1064 if (error) {
1065 printf("%s() VFS_ROOT() returned %d\n",
1066 __func__, error);
1067 dounmount(mp, MNT_FORCE, 0, ctx);
1068 goto fail;
1069 }
1070 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1071 /*
1072 * get rid of reference provided by VFS_ROOT
1073 */
1074 vnode_put(vp);
1075
1076 if (error) {
1077 printf("%s() vnode_label() returned %d\n",
1078 __func__, error);
1079 dounmount(mp, MNT_FORCE, 0, ctx);
1080 goto fail;
1081 }
1082 #endif
1083 return (0);
1084 }
1085 #if CONFIG_MACF
1086 fail:
1087 #endif
1088 vfs_rootmountfailed(mp);
1089
1090 if (error != EINVAL)
1091 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1092 }
1093 return (ENODEV);
1094 }
1095
1096 /*
1097 * Lookup a mount point by filesystem identifier.
1098 */
1099
1100 struct mount *
1101 vfs_getvfs(fsid_t *fsid)
1102 {
1103 return (mount_list_lookupby_fsid(fsid, 0, 0));
1104 }
1105
1106 static struct mount *
1107 vfs_getvfs_locked(fsid_t *fsid)
1108 {
1109 return(mount_list_lookupby_fsid(fsid, 1, 0));
1110 }
1111
1112 struct mount *
1113 vfs_getvfs_by_mntonname(char *path)
1114 {
1115 mount_t retmp = (mount_t)0;
1116 mount_t mp;
1117
1118 mount_list_lock();
1119 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1120 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1121 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1122 retmp = mp;
1123 goto out;
1124 }
1125 }
1126 out:
1127 mount_list_unlock();
1128 return (retmp);
1129 }
1130
1131 /* generation number for creation of new fsids */
1132 u_short mntid_gen = 0;
1133 /*
1134 * Get a new unique fsid
1135 */
1136 void
1137 vfs_getnewfsid(struct mount *mp)
1138 {
1139
1140 fsid_t tfsid;
1141 int mtype;
1142 mount_t nmp;
1143
1144 mount_list_lock();
1145
1146 /* generate a new fsid */
1147 mtype = mp->mnt_vtable->vfc_typenum;
1148 if (++mntid_gen == 0)
1149 mntid_gen++;
1150 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1151 tfsid.val[1] = mtype;
1152
1153 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1154 while (vfs_getvfs_locked(&tfsid)) {
1155 if (++mntid_gen == 0)
1156 mntid_gen++;
1157 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1158 }
1159 }
1160 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1161 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1162 mount_list_unlock();
1163 }
1164
1165 /*
1166 * Routines having to do with the management of the vnode table.
1167 */
1168 extern int (**dead_vnodeop_p)(void *);
1169 long numvnodes, freevnodes, deadvnodes;
1170
1171
1172 /*
1173 * Move a vnode from one mount queue to another.
1174 */
1175 static void
1176 insmntque(vnode_t vp, mount_t mp)
1177 {
1178 mount_t lmp;
1179 /*
1180 * Delete from old mount point vnode list, if on one.
1181 */
1182 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1183 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1184 panic("insmntque: vp not in mount vnode list");
1185 vp->v_lflag &= ~VNAMED_MOUNT;
1186
1187 mount_lock_spin(lmp);
1188
1189 mount_drop(lmp, 1);
1190
1191 if (vp->v_mntvnodes.tqe_next == NULL) {
1192 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1193 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1194 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1195 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1196 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1197 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1198 } else {
1199 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1200 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1201 }
1202 vp->v_mntvnodes.tqe_next = NULL;
1203 vp->v_mntvnodes.tqe_prev = NULL;
1204 mount_unlock(lmp);
1205 return;
1206 }
1207
1208 /*
1209 * Insert into list of vnodes for the new mount point, if available.
1210 */
1211 if ((vp->v_mount = mp) != NULL) {
1212 mount_lock_spin(mp);
1213 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1214 panic("vp already in mount list");
1215 if (mp->mnt_lflag & MNT_LITER)
1216 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1217 else
1218 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1219 if (vp->v_lflag & VNAMED_MOUNT)
1220 panic("insmntque: vp already in mount vnode list");
1221 vp->v_lflag |= VNAMED_MOUNT;
1222 mount_ref(mp, 1);
1223 mount_unlock(mp);
1224 }
1225 }
1226
1227
1228 /*
1229 * Create a vnode for a block device.
1230 * Used for root filesystem, argdev, and swap areas.
1231 * Also used for memory file system special devices.
1232 */
1233 int
1234 bdevvp(dev_t dev, vnode_t *vpp)
1235 {
1236 vnode_t nvp;
1237 int error;
1238 struct vnode_fsparam vfsp;
1239 struct vfs_context context;
1240
1241 if (dev == NODEV) {
1242 *vpp = NULLVP;
1243 return (ENODEV);
1244 }
1245
1246 context.vc_thread = current_thread();
1247 context.vc_ucred = FSCRED;
1248
1249 vfsp.vnfs_mp = (struct mount *)0;
1250 vfsp.vnfs_vtype = VBLK;
1251 vfsp.vnfs_str = "bdevvp";
1252 vfsp.vnfs_dvp = NULL;
1253 vfsp.vnfs_fsnode = NULL;
1254 vfsp.vnfs_cnp = NULL;
1255 vfsp.vnfs_vops = spec_vnodeop_p;
1256 vfsp.vnfs_rdev = dev;
1257 vfsp.vnfs_filesize = 0;
1258
1259 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1260
1261 vfsp.vnfs_marksystem = 0;
1262 vfsp.vnfs_markroot = 0;
1263
1264 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1265 *vpp = NULLVP;
1266 return (error);
1267 }
1268 vnode_lock_spin(nvp);
1269 nvp->v_flag |= VBDEVVP;
1270 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1271 vnode_unlock(nvp);
1272 if ( (error = vnode_ref(nvp)) ) {
1273 panic("bdevvp failed: vnode_ref");
1274 return (error);
1275 }
1276 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1277 panic("bdevvp failed: fsync");
1278 return (error);
1279 }
1280 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1281 panic("bdevvp failed: invalidateblks");
1282 return (error);
1283 }
1284
1285 #if CONFIG_MACF
1286 /*
1287 * XXXMAC: We can't put a MAC check here, the system will
1288 * panic without this vnode.
1289 */
1290 #endif /* MAC */
1291
1292 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1293 panic("bdevvp failed: open");
1294 return (error);
1295 }
1296 *vpp = nvp;
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Check to see if the new vnode represents a special device
1303 * for which we already have a vnode (either because of
1304 * bdevvp() or because of a different vnode representing
1305 * the same block device). If such an alias exists, deallocate
1306 * the existing contents and return the aliased vnode. The
1307 * caller is responsible for filling it with its new contents.
1308 */
1309 static vnode_t
1310 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1311 {
1312 struct vnode *vp;
1313 struct vnode **vpp;
1314 struct specinfo *sin = NULL;
1315 int vid = 0;
1316
1317 vpp = &speclisth[SPECHASH(nvp_rdev)];
1318 loop:
1319 SPECHASH_LOCK();
1320
1321 for (vp = *vpp; vp; vp = vp->v_specnext) {
1322 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1323 vid = vp->v_id;
1324 break;
1325 }
1326 }
1327 SPECHASH_UNLOCK();
1328
1329 if (vp) {
1330 found_alias:
1331 if (vnode_getwithvid(vp,vid)) {
1332 goto loop;
1333 }
1334 /*
1335 * Termination state is checked in vnode_getwithvid
1336 */
1337 vnode_lock(vp);
1338
1339 /*
1340 * Alias, but not in use, so flush it out.
1341 */
1342 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1343 vnode_reclaim_internal(vp, 1, 1, 0);
1344 vnode_put_locked(vp);
1345 vnode_unlock(vp);
1346 goto loop;
1347 }
1348
1349 }
1350 if (vp == NULL || vp->v_tag != VT_NON) {
1351 if (sin == NULL) {
1352 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1353 M_SPECINFO, M_WAITOK);
1354 }
1355
1356 nvp->v_specinfo = sin;
1357 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1358 nvp->v_rdev = nvp_rdev;
1359 nvp->v_specflags = 0;
1360 nvp->v_speclastr = -1;
1361
1362 SPECHASH_LOCK();
1363
1364 /* We dropped the lock, someone could have added */
1365 if (vp == NULLVP) {
1366 for (vp = *vpp; vp; vp = vp->v_specnext) {
1367 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1368 vid = vp->v_id;
1369 SPECHASH_UNLOCK();
1370 goto found_alias;
1371 }
1372 }
1373 }
1374
1375 nvp->v_hashchain = vpp;
1376 nvp->v_specnext = *vpp;
1377 *vpp = nvp;
1378
1379 if (vp != NULLVP) {
1380 nvp->v_specflags |= SI_ALIASED;
1381 vp->v_specflags |= SI_ALIASED;
1382 SPECHASH_UNLOCK();
1383 vnode_put_locked(vp);
1384 vnode_unlock(vp);
1385 } else {
1386 SPECHASH_UNLOCK();
1387 }
1388
1389 return (NULLVP);
1390 }
1391
1392 if (sin) {
1393 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1394 }
1395
1396 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1397 return(vp);
1398
1399 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1400
1401 return (vp);
1402 }
1403
1404
1405 /*
1406 * Get a reference on a particular vnode and lock it if requested.
1407 * If the vnode was on the inactive list, remove it from the list.
1408 * If the vnode was on the free list, remove it from the list and
1409 * move it to inactive list as needed.
1410 * The vnode lock bit is set if the vnode is being eliminated in
1411 * vgone. The process is awakened when the transition is completed,
1412 * and an error returned to indicate that the vnode is no longer
1413 * usable (possibly having been changed to a new file system type).
1414 */
1415 int
1416 vget_internal(vnode_t vp, int vid, int vflags)
1417 {
1418 int error = 0;
1419 int vpid;
1420
1421 vnode_lock_spin(vp);
1422
1423 if (vflags & VNODE_WITHID)
1424 vpid = vid;
1425 else
1426 vpid = vp->v_id; // save off the original v_id
1427
1428 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1429 /*
1430 * vnode to be returned only if it has writers opened
1431 */
1432 error = EINVAL;
1433 else
1434 error = vnode_getiocount(vp, vpid, vflags);
1435
1436 vnode_unlock(vp);
1437
1438 return (error);
1439 }
1440
1441 /*
1442 * Returns: 0 Success
1443 * ENOENT No such file or directory [terminating]
1444 */
1445 int
1446 vnode_ref(vnode_t vp)
1447 {
1448
1449 return (vnode_ref_ext(vp, 0));
1450 }
1451
1452 /*
1453 * Returns: 0 Success
1454 * ENOENT No such file or directory [terminating]
1455 */
1456 int
1457 vnode_ref_ext(vnode_t vp, int fmode)
1458 {
1459 int error = 0;
1460
1461 vnode_lock_spin(vp);
1462
1463 /*
1464 * once all the current call sites have been fixed to insure they have
1465 * taken an iocount, we can toughen this assert up and insist that the
1466 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1467 */
1468 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1469 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1470
1471 /*
1472 * if you are the owner of drain/termination, can acquire usecount
1473 */
1474 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1475 if (vp->v_owner != current_thread()) {
1476 error = ENOENT;
1477 goto out;
1478 }
1479 }
1480 vp->v_usecount++;
1481
1482 if (fmode & FWRITE) {
1483 if (++vp->v_writecount <= 0)
1484 panic("vnode_ref_ext: v_writecount");
1485 }
1486 if (fmode & O_EVTONLY) {
1487 if (++vp->v_kusecount <= 0)
1488 panic("vnode_ref_ext: v_kusecount");
1489 }
1490 if (vp->v_flag & VRAGE) {
1491 struct uthread *ut;
1492
1493 ut = get_bsdthread_info(current_thread());
1494
1495 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1496 !(ut->uu_flag & UT_RAGE_VNODES)) {
1497 /*
1498 * a 'normal' process accessed this vnode
1499 * so make sure its no longer marked
1500 * for rapid aging... also, make sure
1501 * it gets removed from the rage list...
1502 * when v_usecount drops back to 0, it
1503 * will be put back on the real free list
1504 */
1505 vp->v_flag &= ~VRAGE;
1506 vp->v_references = 0;
1507 vnode_list_remove(vp);
1508 }
1509 }
1510 out:
1511 vnode_unlock(vp);
1512
1513 return (error);
1514 }
1515
1516
1517 /*
1518 * put the vnode on appropriate free list.
1519 * called with vnode LOCKED
1520 */
1521 static void
1522 vnode_list_add(vnode_t vp)
1523 {
1524 #if DIAGNOSTIC
1525 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1526 #endif
1527 /*
1528 * if it is already on a list or non zero references return
1529 */
1530 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1531 return;
1532
1533 vnode_list_lock();
1534
1535 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1536 /*
1537 * add the new guy to the appropriate end of the RAGE list
1538 */
1539 if ((vp->v_flag & VAGE))
1540 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1541 else
1542 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1543
1544 vp->v_listflag |= VLIST_RAGE;
1545 ragevnodes++;
1546
1547 /*
1548 * reset the timestamp for the last inserted vp on the RAGE
1549 * queue to let new_vnode know that its not ok to start stealing
1550 * from this list... as long as we're actively adding to this list
1551 * we'll push out the vnodes we want to donate to the real free list
1552 * once we stop pushing, we'll let some time elapse before we start
1553 * stealing them in the new_vnode routine
1554 */
1555 microuptime(&rage_tv);
1556 } else {
1557 /*
1558 * if VL_DEAD, insert it at head of the dead list
1559 * else insert at tail of LRU list or at head if VAGE is set
1560 */
1561 if ( (vp->v_lflag & VL_DEAD)) {
1562 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1563 vp->v_listflag |= VLIST_DEAD;
1564 deadvnodes++;
1565 } else if ((vp->v_flag & VAGE)) {
1566 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1567 vp->v_flag &= ~VAGE;
1568 freevnodes++;
1569 } else {
1570 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1571 freevnodes++;
1572 }
1573 }
1574 vnode_list_unlock();
1575 }
1576
1577
1578 /*
1579 * remove the vnode from appropriate free list.
1580 * called with vnode LOCKED and
1581 * the list lock held
1582 */
1583 static void
1584 vnode_list_remove_locked(vnode_t vp)
1585 {
1586 if (VONLIST(vp)) {
1587 /*
1588 * the v_listflag field is
1589 * protected by the vnode_list_lock
1590 */
1591 if (vp->v_listflag & VLIST_RAGE)
1592 VREMRAGE("vnode_list_remove", vp);
1593 else if (vp->v_listflag & VLIST_DEAD)
1594 VREMDEAD("vnode_list_remove", vp);
1595 else
1596 VREMFREE("vnode_list_remove", vp);
1597 }
1598 }
1599
1600
1601 /*
1602 * remove the vnode from appropriate free list.
1603 * called with vnode LOCKED
1604 */
1605 static void
1606 vnode_list_remove(vnode_t vp)
1607 {
1608 #if DIAGNOSTIC
1609 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1610 #endif
1611 /*
1612 * we want to avoid taking the list lock
1613 * in the case where we're not on the free
1614 * list... this will be true for most
1615 * directories and any currently in use files
1616 *
1617 * we're guaranteed that we can't go from
1618 * the not-on-list state to the on-list
1619 * state since we hold the vnode lock...
1620 * all calls to vnode_list_add are done
1621 * under the vnode lock... so we can
1622 * check for that condition (the prevelant one)
1623 * without taking the list lock
1624 */
1625 if (VONLIST(vp)) {
1626 vnode_list_lock();
1627 /*
1628 * however, we're not guaranteed that
1629 * we won't go from the on-list state
1630 * to the not-on-list state until we
1631 * hold the vnode_list_lock... this
1632 * is due to "new_vnode" removing vnodes
1633 * from the free list uder the list_lock
1634 * w/o the vnode lock... so we need to
1635 * check again whether we're currently
1636 * on the free list
1637 */
1638 vnode_list_remove_locked(vp);
1639
1640 vnode_list_unlock();
1641 }
1642 }
1643
1644
1645 void
1646 vnode_rele(vnode_t vp)
1647 {
1648 vnode_rele_internal(vp, 0, 0, 0);
1649 }
1650
1651
1652 void
1653 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1654 {
1655 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1656 }
1657
1658
1659 void
1660 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1661 {
1662 if ( !locked)
1663 vnode_lock_spin(vp);
1664 #if DIAGNOSTIC
1665 else
1666 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1667 #endif
1668 if (--vp->v_usecount < 0)
1669 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1670
1671 if (fmode & FWRITE) {
1672 if (--vp->v_writecount < 0)
1673 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1674 }
1675 if (fmode & O_EVTONLY) {
1676 if (--vp->v_kusecount < 0)
1677 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1678 }
1679 if (vp->v_kusecount > vp->v_usecount)
1680 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1681
1682 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1683 /*
1684 * vnode is still busy... if we're the last
1685 * usecount, mark for a future call to VNOP_INACTIVE
1686 * when the iocount finally drops to 0
1687 */
1688 if (vp->v_usecount == 0) {
1689 vp->v_lflag |= VL_NEEDINACTIVE;
1690 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1691 }
1692 if ( !locked)
1693 vnode_unlock(vp);
1694 return;
1695 }
1696 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1697
1698 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1699 /*
1700 * vnode is being cleaned, or
1701 * we've requested that we don't reenter
1702 * the filesystem on this release... in
1703 * this case, we'll mark the vnode aged
1704 * if it's been marked for termination
1705 */
1706 if (dont_reenter) {
1707 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1708 vp->v_lflag |= VL_NEEDINACTIVE;
1709 vp->v_flag |= VAGE;
1710 }
1711 vnode_list_add(vp);
1712 if ( !locked)
1713 vnode_unlock(vp);
1714 return;
1715 }
1716 /*
1717 * at this point both the iocount and usecount
1718 * are zero
1719 * pick up an iocount so that we can call
1720 * VNOP_INACTIVE with the vnode lock unheld
1721 */
1722 vp->v_iocount++;
1723 #ifdef JOE_DEBUG
1724 record_vp(vp, 1);
1725 #endif
1726 vp->v_lflag &= ~VL_NEEDINACTIVE;
1727 vnode_unlock(vp);
1728
1729 VNOP_INACTIVE(vp, vfs_context_current());
1730
1731 vnode_lock_spin(vp);
1732 /*
1733 * because we dropped the vnode lock to call VNOP_INACTIVE
1734 * the state of the vnode may have changed... we may have
1735 * picked up an iocount, usecount or the MARKTERM may have
1736 * been set... we need to reevaluate the reference counts
1737 * to determine if we can call vnode_reclaim_internal at
1738 * this point... if the reference counts are up, we'll pick
1739 * up the MARKTERM state when they get subsequently dropped
1740 */
1741 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1742 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1743 struct uthread *ut;
1744
1745 ut = get_bsdthread_info(current_thread());
1746
1747 if (ut->uu_defer_reclaims) {
1748 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1749 ut->uu_vreclaims = vp;
1750 goto defer_reclaim;
1751 }
1752 vnode_lock_convert(vp);
1753 vnode_reclaim_internal(vp, 1, 1, 0);
1754 }
1755 vnode_dropiocount(vp);
1756 vnode_list_add(vp);
1757 defer_reclaim:
1758 if ( !locked)
1759 vnode_unlock(vp);
1760 return;
1761 }
1762
1763 /*
1764 * Remove any vnodes in the vnode table belonging to mount point mp.
1765 *
1766 * If MNT_NOFORCE is specified, there should not be any active ones,
1767 * return error if any are found (nb: this is a user error, not a
1768 * system error). If MNT_FORCE is specified, detach any active vnodes
1769 * that are found.
1770 */
1771 #if DIAGNOSTIC
1772 int busyprt = 0; /* print out busy vnodes */
1773 #if 0
1774 struct ctldebug debug1 = { "busyprt", &busyprt };
1775 #endif /* 0 */
1776 #endif
1777
1778 int
1779 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1780 {
1781 struct vnode *vp;
1782 int busy = 0;
1783 int reclaimed = 0;
1784 int retval;
1785 unsigned int vid;
1786
1787 mount_lock(mp);
1788 vnode_iterate_setup(mp);
1789 /*
1790 * On regular unmounts(not forced) do a
1791 * quick check for vnodes to be in use. This
1792 * preserves the caching of vnodes. automounter
1793 * tries unmounting every so often to see whether
1794 * it is still busy or not.
1795 */
1796 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1797 if (vnode_umount_preflight(mp, skipvp, flags)) {
1798 vnode_iterate_clear(mp);
1799 mount_unlock(mp);
1800 return(EBUSY);
1801 }
1802 }
1803 loop:
1804 /* it is returns 0 then there is nothing to do */
1805 retval = vnode_iterate_prepare(mp);
1806
1807 if (retval == 0) {
1808 vnode_iterate_clear(mp);
1809 mount_unlock(mp);
1810 return(retval);
1811 }
1812
1813 /* iterate over all the vnodes */
1814 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1815
1816 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1817 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1818 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1819
1820 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1821 continue;
1822 }
1823 vid = vp->v_id;
1824 mount_unlock(mp);
1825
1826 vnode_lock_spin(vp);
1827
1828 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1829 vnode_unlock(vp);
1830 mount_lock(mp);
1831 continue;
1832 }
1833
1834 /*
1835 * If requested, skip over vnodes marked VSYSTEM.
1836 * Skip over all vnodes marked VNOFLUSH.
1837 */
1838 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1839 (vp->v_flag & VNOFLUSH))) {
1840 vnode_unlock(vp);
1841 mount_lock(mp);
1842 continue;
1843 }
1844 /*
1845 * If requested, skip over vnodes marked VSWAP.
1846 */
1847 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1848 vnode_unlock(vp);
1849 mount_lock(mp);
1850 continue;
1851 }
1852 /*
1853 * If requested, skip over vnodes marked VROOT.
1854 */
1855 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1856 vnode_unlock(vp);
1857 mount_lock(mp);
1858 continue;
1859 }
1860 /*
1861 * If WRITECLOSE is set, only flush out regular file
1862 * vnodes open for writing.
1863 */
1864 if ((flags & WRITECLOSE) &&
1865 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1866 vnode_unlock(vp);
1867 mount_lock(mp);
1868 continue;
1869 }
1870 /*
1871 * If the real usecount is 0, all we need to do is clear
1872 * out the vnode data structures and we are done.
1873 */
1874 if (((vp->v_usecount == 0) ||
1875 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1876
1877 vnode_lock_convert(vp);
1878 vp->v_iocount++; /* so that drain waits for * other iocounts */
1879 #ifdef JOE_DEBUG
1880 record_vp(vp, 1);
1881 #endif
1882 vnode_reclaim_internal(vp, 1, 1, 0);
1883 vnode_dropiocount(vp);
1884 vnode_list_add(vp);
1885 vnode_unlock(vp);
1886
1887 reclaimed++;
1888 mount_lock(mp);
1889 continue;
1890 }
1891 /*
1892 * If FORCECLOSE is set, forcibly close the vnode.
1893 * For block or character devices, revert to an
1894 * anonymous device. For all other files, just kill them.
1895 */
1896 if (flags & FORCECLOSE) {
1897 vnode_lock_convert(vp);
1898
1899 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1900 vp->v_iocount++; /* so that drain waits * for other iocounts */
1901 #ifdef JOE_DEBUG
1902 record_vp(vp, 1);
1903 #endif
1904 vnode_reclaim_internal(vp, 1, 1, 0);
1905 vnode_dropiocount(vp);
1906 vnode_list_add(vp);
1907 vnode_unlock(vp);
1908 } else {
1909 vclean(vp, 0);
1910 vp->v_lflag &= ~VL_DEAD;
1911 vp->v_op = spec_vnodeop_p;
1912 vp->v_flag |= VDEVFLUSH;
1913 vnode_unlock(vp);
1914 }
1915 mount_lock(mp);
1916 continue;
1917 }
1918 #if DIAGNOSTIC
1919 if (busyprt)
1920 vprint("vflush: busy vnode", vp);
1921 #endif
1922 vnode_unlock(vp);
1923 mount_lock(mp);
1924 busy++;
1925 }
1926
1927 /* At this point the worker queue is completed */
1928 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1929 busy = 0;
1930 reclaimed = 0;
1931 (void)vnode_iterate_reloadq(mp);
1932 /* returned with mount lock held */
1933 goto loop;
1934 }
1935
1936 /* if new vnodes were created in between retry the reclaim */
1937 if ( vnode_iterate_reloadq(mp) != 0) {
1938 if (!(busy && ((flags & FORCECLOSE)==0)))
1939 goto loop;
1940 }
1941 vnode_iterate_clear(mp);
1942 mount_unlock(mp);
1943
1944 if (busy && ((flags & FORCECLOSE)==0))
1945 return (EBUSY);
1946 return (0);
1947 }
1948
1949 long num_recycledvnodes = 0;
1950 /*
1951 * Disassociate the underlying file system from a vnode.
1952 * The vnode lock is held on entry.
1953 */
1954 static void
1955 vclean(vnode_t vp, int flags)
1956 {
1957 vfs_context_t ctx = vfs_context_current();
1958 int active;
1959 int need_inactive;
1960 int already_terminating;
1961 int clflags = 0;
1962 #if NAMEDSTREAMS
1963 int is_namedstream;
1964 #endif
1965
1966 /*
1967 * Check to see if the vnode is in use.
1968 * If so we have to reference it before we clean it out
1969 * so that its count cannot fall to zero and generate a
1970 * race against ourselves to recycle it.
1971 */
1972 active = vp->v_usecount;
1973
1974 /*
1975 * just in case we missed sending a needed
1976 * VNOP_INACTIVE, we'll do it now
1977 */
1978 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
1979
1980 vp->v_lflag &= ~VL_NEEDINACTIVE;
1981
1982 /*
1983 * Prevent the vnode from being recycled or
1984 * brought into use while we clean it out.
1985 */
1986 already_terminating = (vp->v_lflag & VL_TERMINATE);
1987
1988 vp->v_lflag |= VL_TERMINATE;
1989
1990 /*
1991 * remove the vnode from any mount list
1992 * it might be on...
1993 */
1994 insmntque(vp, (struct mount *)0);
1995
1996 #if NAMEDSTREAMS
1997 is_namedstream = vnode_isnamedstream(vp);
1998 #endif
1999
2000 vnode_unlock(vp);
2001
2002 OSAddAtomicLong(1, &num_recycledvnodes);
2003
2004 if (flags & DOCLOSE)
2005 clflags |= IO_NDELAY;
2006 if (flags & REVOKEALL)
2007 clflags |= IO_REVOKE;
2008
2009 if (active && (flags & DOCLOSE))
2010 VNOP_CLOSE(vp, clflags, ctx);
2011
2012 /*
2013 * Clean out any buffers associated with the vnode.
2014 */
2015 if (flags & DOCLOSE) {
2016 #if NFSCLIENT
2017 if (vp->v_tag == VT_NFS)
2018 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2019 else
2020 #endif
2021 {
2022 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2023 buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2024 }
2025 if (UBCINFOEXISTS(vp))
2026 /*
2027 * Clean the pages in VM.
2028 */
2029 (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL);
2030 }
2031 if (active || need_inactive)
2032 VNOP_INACTIVE(vp, ctx);
2033
2034 #if NAMEDSTREAMS
2035 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2036 vnode_t pvp = vp->v_parent;
2037
2038 /* Delete the shadow stream file before we reclaim its vnode */
2039 if (vnode_isshadow(vp)) {
2040 vnode_relenamedstream(pvp, vp, ctx);
2041 }
2042
2043 /*
2044 * No more streams associated with the parent. We
2045 * have a ref on it, so its identity is stable.
2046 * If the parent is on an opaque volume, then we need to know
2047 * whether it has associated named streams.
2048 */
2049 if (vfs_authopaque(pvp->v_mount)) {
2050 vnode_lock_spin(pvp);
2051 pvp->v_lflag &= ~VL_HASSTREAMS;
2052 vnode_unlock(pvp);
2053 }
2054 }
2055 #endif
2056
2057 /*
2058 * Destroy ubc named reference
2059 * cluster_release is done on this path
2060 * along with dropping the reference on the ucred
2061 */
2062 ubc_destroy_named(vp);
2063
2064 /*
2065 * Reclaim the vnode.
2066 */
2067 if (VNOP_RECLAIM(vp, ctx))
2068 panic("vclean: cannot reclaim");
2069
2070 // make sure the name & parent ptrs get cleaned out!
2071 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2072
2073 vnode_lock(vp);
2074
2075 vp->v_mount = dead_mountp;
2076 vp->v_op = dead_vnodeop_p;
2077 vp->v_tag = VT_NON;
2078 vp->v_data = NULL;
2079
2080 vp->v_lflag |= VL_DEAD;
2081
2082 if (already_terminating == 0) {
2083 vp->v_lflag &= ~VL_TERMINATE;
2084 /*
2085 * Done with purge, notify sleepers of the grim news.
2086 */
2087 if (vp->v_lflag & VL_TERMWANT) {
2088 vp->v_lflag &= ~VL_TERMWANT;
2089 wakeup(&vp->v_lflag);
2090 }
2091 }
2092 }
2093
2094 /*
2095 * Eliminate all activity associated with the requested vnode
2096 * and with all vnodes aliased to the requested vnode.
2097 */
2098 int
2099 #if DIAGNOSTIC
2100 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2101 #else
2102 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2103 #endif
2104 {
2105 struct vnode *vq;
2106 int vid;
2107
2108 #if DIAGNOSTIC
2109 if ((flags & REVOKEALL) == 0)
2110 panic("vnop_revoke");
2111 #endif
2112
2113 if (vnode_isaliased(vp)) {
2114 /*
2115 * If a vgone (or vclean) is already in progress,
2116 * return an immediate error
2117 */
2118 if (vp->v_lflag & VL_TERMINATE)
2119 return(ENOENT);
2120
2121 /*
2122 * Ensure that vp will not be vgone'd while we
2123 * are eliminating its aliases.
2124 */
2125 SPECHASH_LOCK();
2126 while ((vp->v_specflags & SI_ALIASED)) {
2127 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2128 if (vq->v_rdev != vp->v_rdev ||
2129 vq->v_type != vp->v_type || vp == vq)
2130 continue;
2131 vid = vq->v_id;
2132 SPECHASH_UNLOCK();
2133 if (vnode_getwithvid(vq,vid)){
2134 SPECHASH_LOCK();
2135 break;
2136 }
2137 vnode_reclaim_internal(vq, 0, 1, 0);
2138 vnode_put(vq);
2139 SPECHASH_LOCK();
2140 break;
2141 }
2142 }
2143 SPECHASH_UNLOCK();
2144 }
2145 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2146
2147 return (0);
2148 }
2149
2150 /*
2151 * Recycle an unused vnode to the front of the free list.
2152 * Release the passed interlock if the vnode will be recycled.
2153 */
2154 int
2155 vnode_recycle(struct vnode *vp)
2156 {
2157 vnode_lock_spin(vp);
2158
2159 if (vp->v_iocount || vp->v_usecount) {
2160 vp->v_lflag |= VL_MARKTERM;
2161 vnode_unlock(vp);
2162 return(0);
2163 }
2164 vnode_lock_convert(vp);
2165 vnode_reclaim_internal(vp, 1, 0, 0);
2166
2167 vnode_unlock(vp);
2168
2169 return (1);
2170 }
2171
2172 static int
2173 vnode_reload(vnode_t vp)
2174 {
2175 vnode_lock_spin(vp);
2176
2177 if ((vp->v_iocount > 1) || vp->v_usecount) {
2178 vnode_unlock(vp);
2179 return(0);
2180 }
2181 if (vp->v_iocount <= 0)
2182 panic("vnode_reload with no iocount %d", vp->v_iocount);
2183
2184 /* mark for release when iocount is dopped */
2185 vp->v_lflag |= VL_MARKTERM;
2186 vnode_unlock(vp);
2187
2188 return (1);
2189 }
2190
2191
2192 static void
2193 vgone(vnode_t vp, int flags)
2194 {
2195 struct vnode *vq;
2196 struct vnode *vx;
2197
2198 /*
2199 * Clean out the filesystem specific data.
2200 * vclean also takes care of removing the
2201 * vnode from any mount list it might be on
2202 */
2203 vclean(vp, flags | DOCLOSE);
2204
2205 /*
2206 * If special device, remove it from special device alias list
2207 * if it is on one.
2208 */
2209 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2210 SPECHASH_LOCK();
2211 if (*vp->v_hashchain == vp) {
2212 *vp->v_hashchain = vp->v_specnext;
2213 } else {
2214 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2215 if (vq->v_specnext != vp)
2216 continue;
2217 vq->v_specnext = vp->v_specnext;
2218 break;
2219 }
2220 if (vq == NULL)
2221 panic("missing bdev");
2222 }
2223 if (vp->v_specflags & SI_ALIASED) {
2224 vx = NULL;
2225 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2226 if (vq->v_rdev != vp->v_rdev ||
2227 vq->v_type != vp->v_type)
2228 continue;
2229 if (vx)
2230 break;
2231 vx = vq;
2232 }
2233 if (vx == NULL)
2234 panic("missing alias");
2235 if (vq == NULL)
2236 vx->v_specflags &= ~SI_ALIASED;
2237 vp->v_specflags &= ~SI_ALIASED;
2238 }
2239 SPECHASH_UNLOCK();
2240 {
2241 struct specinfo *tmp = vp->v_specinfo;
2242 vp->v_specinfo = NULL;
2243 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2244 }
2245 }
2246 }
2247
2248 /*
2249 * Lookup a vnode by device number.
2250 */
2251 int
2252 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2253 {
2254 vnode_t vp;
2255 int rc = 0;
2256 int vid;
2257
2258 loop:
2259 SPECHASH_LOCK();
2260 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2261 if (dev != vp->v_rdev || type != vp->v_type)
2262 continue;
2263 vid = vp->v_id;
2264 SPECHASH_UNLOCK();
2265 if (vnode_getwithvid(vp,vid))
2266 goto loop;
2267 vnode_lock_spin(vp);
2268 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2269 vnode_unlock(vp);
2270 if ((*errorp = vfs_mountedon(vp)) != 0)
2271 rc = 1;
2272 } else
2273 vnode_unlock(vp);
2274 vnode_put(vp);
2275 return(rc);
2276 }
2277 SPECHASH_UNLOCK();
2278 return (0);
2279 }
2280
2281 /*
2282 * Calculate the total number of references to a special device.
2283 */
2284 int
2285 vcount(vnode_t vp)
2286 {
2287 vnode_t vq, vnext;
2288 int count;
2289 int vid;
2290
2291 loop:
2292 if (!vnode_isaliased(vp))
2293 return (vp->v_usecount - vp->v_kusecount);
2294 count = 0;
2295
2296 SPECHASH_LOCK();
2297 /*
2298 * Grab first vnode and its vid.
2299 */
2300 vq = *vp->v_hashchain;
2301 vid = vq ? vq->v_id : 0;
2302
2303 SPECHASH_UNLOCK();
2304
2305 while (vq) {
2306 /*
2307 * Attempt to get the vnode outside the SPECHASH lock.
2308 */
2309 if (vnode_getwithvid(vq, vid)) {
2310 goto loop;
2311 }
2312 vnode_lock(vq);
2313
2314 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2315 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2316 /*
2317 * Alias, but not in use, so flush it out.
2318 */
2319 vnode_reclaim_internal(vq, 1, 1, 0);
2320 vnode_put_locked(vq);
2321 vnode_unlock(vq);
2322 goto loop;
2323 }
2324 count += (vq->v_usecount - vq->v_kusecount);
2325 }
2326 vnode_unlock(vq);
2327
2328 SPECHASH_LOCK();
2329 /*
2330 * must do this with the reference still held on 'vq'
2331 * so that it can't be destroyed while we're poking
2332 * through v_specnext
2333 */
2334 vnext = vq->v_specnext;
2335 vid = vnext ? vnext->v_id : 0;
2336
2337 SPECHASH_UNLOCK();
2338
2339 vnode_put(vq);
2340
2341 vq = vnext;
2342 }
2343
2344 return (count);
2345 }
2346
2347 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2348
2349 /*
2350 * Print out a description of a vnode.
2351 */
2352 static const char *typename[] =
2353 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2354
2355 void
2356 vprint(const char *label, struct vnode *vp)
2357 {
2358 char sbuf[64];
2359
2360 if (label != NULL)
2361 printf("%s: ", label);
2362 printf("type %s, usecount %d, writecount %d",
2363 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2364 sbuf[0] = '\0';
2365 if (vp->v_flag & VROOT)
2366 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2367 if (vp->v_flag & VTEXT)
2368 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2369 if (vp->v_flag & VSYSTEM)
2370 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2371 if (vp->v_flag & VNOFLUSH)
2372 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2373 if (vp->v_flag & VBWAIT)
2374 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2375 if (vnode_isaliased(vp))
2376 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2377 if (sbuf[0] != '\0')
2378 printf(" flags (%s)", &sbuf[1]);
2379 }
2380
2381
2382 int
2383 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2384 {
2385 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2386 }
2387
2388 int
2389 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2390 {
2391 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2392 }
2393
2394 int
2395 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2396 {
2397 return ubc_cs_getcdhash(vp, offset, cdhash);
2398 }
2399
2400
2401 static char *extension_table=NULL;
2402 static int nexts;
2403 static int max_ext_width;
2404
2405 static int
2406 extension_cmp(const void *a, const void *b)
2407 {
2408 return (strlen((const char *)a) - strlen((const char *)b));
2409 }
2410
2411
2412 //
2413 // This is the api LaunchServices uses to inform the kernel
2414 // the list of package extensions to ignore.
2415 //
2416 // Internally we keep the list sorted by the length of the
2417 // the extension (from longest to shortest). We sort the
2418 // list of extensions so that we can speed up our searches
2419 // when comparing file names -- we only compare extensions
2420 // that could possibly fit into the file name, not all of
2421 // them (i.e. a short 8 character name can't have an 8
2422 // character extension).
2423 //
2424 extern lck_mtx_t *pkg_extensions_lck;
2425
2426 __private_extern__ int
2427 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2428 {
2429 char *new_exts, *old_exts;
2430 int error;
2431
2432 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2433 return EINVAL;
2434 }
2435
2436
2437 // allocate one byte extra so we can guarantee null termination
2438 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2439 if (new_exts == NULL) {
2440 return ENOMEM;
2441 }
2442
2443 error = copyin(data, new_exts, nentries * maxwidth);
2444 if (error) {
2445 FREE(new_exts, M_TEMP);
2446 return error;
2447 }
2448
2449 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2450
2451 qsort(new_exts, nentries, maxwidth, extension_cmp);
2452
2453 lck_mtx_lock(pkg_extensions_lck);
2454
2455 old_exts = extension_table;
2456 extension_table = new_exts;
2457 nexts = nentries;
2458 max_ext_width = maxwidth;
2459
2460 lck_mtx_unlock(pkg_extensions_lck);
2461
2462 if (old_exts) {
2463 FREE(old_exts, M_TEMP);
2464 }
2465
2466 return 0;
2467 }
2468
2469
2470 __private_extern__ int
2471 is_package_name(const char *name, int len)
2472 {
2473 int i, extlen;
2474 const char *ptr, *name_ext;
2475
2476 if (len <= 3) {
2477 return 0;
2478 }
2479
2480 name_ext = NULL;
2481 for(ptr=name; *ptr != '\0'; ptr++) {
2482 if (*ptr == '.') {
2483 name_ext = ptr;
2484 }
2485 }
2486
2487 // if there is no "." extension, it can't match
2488 if (name_ext == NULL) {
2489 return 0;
2490 }
2491
2492 // advance over the "."
2493 name_ext++;
2494
2495 lck_mtx_lock(pkg_extensions_lck);
2496
2497 // now iterate over all the extensions to see if any match
2498 ptr = &extension_table[0];
2499 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2500 extlen = strlen(ptr);
2501 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2502 // aha, a match!
2503 lck_mtx_unlock(pkg_extensions_lck);
2504 return 1;
2505 }
2506 }
2507
2508 lck_mtx_unlock(pkg_extensions_lck);
2509
2510 // if we get here, no extension matched
2511 return 0;
2512 }
2513
2514 int
2515 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2516 {
2517 char *ptr, *end;
2518 int comp=0;
2519
2520 *component = -1;
2521 if (*path != '/') {
2522 return EINVAL;
2523 }
2524
2525 end = path + 1;
2526 while(end < path + pathlen && *end != '\0') {
2527 while(end < path + pathlen && *end == '/' && *end != '\0') {
2528 end++;
2529 }
2530
2531 ptr = end;
2532
2533 while(end < path + pathlen && *end != '/' && *end != '\0') {
2534 end++;
2535 }
2536
2537 if (end > path + pathlen) {
2538 // hmm, string wasn't null terminated
2539 return EINVAL;
2540 }
2541
2542 *end = '\0';
2543 if (is_package_name(ptr, end - ptr)) {
2544 *component = comp;
2545 break;
2546 }
2547
2548 end++;
2549 comp++;
2550 }
2551
2552 return 0;
2553 }
2554
2555 /*
2556 * Determine if a name is inappropriate for a searchfs query.
2557 * This list consists of /System currently.
2558 */
2559
2560 int vn_searchfs_inappropriate_name(const char *name, int len) {
2561 const char *bad_names[] = { "System" };
2562 int bad_len[] = { 6 };
2563 int i;
2564
2565 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2566 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2567 return 1;
2568 }
2569 }
2570
2571 // if we get here, no name matched
2572 return 0;
2573 }
2574
2575 /*
2576 * Top level filesystem related information gathering.
2577 */
2578 extern unsigned int vfs_nummntops;
2579
2580 int
2581 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2582 user_addr_t newp, size_t newlen, proc_t p)
2583 {
2584 struct vfstable *vfsp;
2585 int *username;
2586 u_int usernamelen;
2587 int error;
2588 struct vfsconf vfsc;
2589
2590 /* All non VFS_GENERIC and in VFS_GENERIC,
2591 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2592 * needs to have root priv to have modifiers.
2593 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2594 */
2595 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2596 ((name[1] == VFS_MAXTYPENUM) ||
2597 (name[1] == VFS_CONF) ||
2598 (name[1] == VFS_SET_PACKAGE_EXTS)))
2599 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2600 return(error);
2601 }
2602 /*
2603 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2604 * is a VFS generic variable. So now we must check
2605 * namelen so we don't end up covering any UFS
2606 * variables (sinc UFS vfc_typenum is 1).
2607 *
2608 * It should have been:
2609 * name[0]: VFS_GENERIC
2610 * name[1]: VFS_NUMMNTOPS
2611 */
2612 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2613 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2614 }
2615
2616 /* all sysctl names at this level are at least name and field */
2617 if (namelen < 2)
2618 return (EISDIR); /* overloaded */
2619 if (name[0] != VFS_GENERIC) {
2620
2621 mount_list_lock();
2622 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2623 if (vfsp->vfc_typenum == name[0]) {
2624 vfsp->vfc_refcount++;
2625 break;
2626 }
2627 mount_list_unlock();
2628
2629 if (vfsp == NULL)
2630 return (ENOTSUP);
2631
2632 /* XXX current context proxy for proc p? */
2633 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2634 oldp, oldlenp, newp, newlen,
2635 vfs_context_current()));
2636
2637 mount_list_lock();
2638 vfsp->vfc_refcount--;
2639 mount_list_unlock();
2640 return error;
2641 }
2642 switch (name[1]) {
2643 case VFS_MAXTYPENUM:
2644 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2645 case VFS_CONF:
2646 if (namelen < 3)
2647 return (ENOTDIR); /* overloaded */
2648
2649 mount_list_lock();
2650 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2651 if (vfsp->vfc_typenum == name[2])
2652 break;
2653
2654 if (vfsp == NULL) {
2655 mount_list_unlock();
2656 return (ENOTSUP);
2657 }
2658
2659 vfsc.vfc_reserved1 = 0;
2660 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2661 vfsc.vfc_typenum = vfsp->vfc_typenum;
2662 vfsc.vfc_refcount = vfsp->vfc_refcount;
2663 vfsc.vfc_flags = vfsp->vfc_flags;
2664 vfsc.vfc_reserved2 = 0;
2665 vfsc.vfc_reserved3 = 0;
2666
2667 mount_list_unlock();
2668 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2669 sizeof(struct vfsconf)));
2670
2671 case VFS_SET_PACKAGE_EXTS:
2672 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2673 }
2674 /*
2675 * We need to get back into the general MIB, so we need to re-prepend
2676 * CTL_VFS to our name and try userland_sysctl().
2677 */
2678 usernamelen = namelen + 1;
2679 MALLOC(username, int *, usernamelen * sizeof(*username),
2680 M_TEMP, M_WAITOK);
2681 bcopy(name, username + 1, namelen * sizeof(*name));
2682 username[0] = CTL_VFS;
2683 error = userland_sysctl(p, username, usernamelen, oldp,
2684 oldlenp, newp, newlen, oldlenp);
2685 FREE(username, M_TEMP);
2686 return (error);
2687 }
2688
2689 /*
2690 * Dump vnode list (via sysctl) - defunct
2691 * use "pstat" instead
2692 */
2693 /* ARGSUSED */
2694 int
2695 sysctl_vnode
2696 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2697 {
2698 return(EINVAL);
2699 }
2700
2701 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2702 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED,
2703 0, 0, sysctl_vnode, "S,", "");
2704
2705
2706 /*
2707 * Check to see if a filesystem is mounted on a block device.
2708 */
2709 int
2710 vfs_mountedon(struct vnode *vp)
2711 {
2712 struct vnode *vq;
2713 int error = 0;
2714
2715 SPECHASH_LOCK();
2716 if (vp->v_specflags & SI_MOUNTEDON) {
2717 error = EBUSY;
2718 goto out;
2719 }
2720 if (vp->v_specflags & SI_ALIASED) {
2721 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2722 if (vq->v_rdev != vp->v_rdev ||
2723 vq->v_type != vp->v_type)
2724 continue;
2725 if (vq->v_specflags & SI_MOUNTEDON) {
2726 error = EBUSY;
2727 break;
2728 }
2729 }
2730 }
2731 out:
2732 SPECHASH_UNLOCK();
2733 return (error);
2734 }
2735
2736 /*
2737 * Unmount all filesystems. The list is traversed in reverse order
2738 * of mounting to avoid dependencies.
2739 */
2740 __private_extern__ void
2741 vfs_unmountall(void)
2742 {
2743 struct mount *mp;
2744 int error;
2745
2746 /*
2747 * Since this only runs when rebooting, it is not interlocked.
2748 */
2749 mount_list_lock();
2750 while(!TAILQ_EMPTY(&mountlist)) {
2751 mp = TAILQ_LAST(&mountlist, mntlist);
2752 mount_list_unlock();
2753 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2754 if ((error != 0) && (error != EBUSY)) {
2755 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2756 printf("%d)\n", error);
2757 mount_list_lock();
2758 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2759 continue;
2760 } else if (error == EBUSY) {
2761 /* If EBUSY is returned, the unmount was already in progress */
2762 printf("unmount of %p failed (", mp);
2763 printf("BUSY)\n");
2764 }
2765 mount_list_lock();
2766 }
2767 mount_list_unlock();
2768 }
2769
2770
2771 /*
2772 * This routine is called from vnode_pager_deallocate out of the VM
2773 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2774 * on a vnode that has a UBCINFO
2775 */
2776 __private_extern__ void
2777 vnode_pager_vrele(vnode_t vp)
2778 {
2779 struct ubc_info *uip;
2780
2781 vnode_lock_spin(vp);
2782
2783 vp->v_lflag &= ~VNAMED_UBC;
2784
2785 uip = vp->v_ubcinfo;
2786 vp->v_ubcinfo = UBC_INFO_NULL;
2787
2788 vnode_unlock(vp);
2789
2790 ubc_info_deallocate(uip);
2791 }
2792
2793
2794 #include <sys/disk.h>
2795
2796 errno_t
2797 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2798 {
2799 int error;
2800 off_t readblockcnt = 0;
2801 off_t writeblockcnt = 0;
2802 off_t readmaxcnt = 0;
2803 off_t writemaxcnt = 0;
2804 off_t readsegcnt = 0;
2805 off_t writesegcnt = 0;
2806 off_t readsegsize = 0;
2807 off_t writesegsize = 0;
2808 off_t alignment = 0;
2809 off_t ioqueue_depth = 0;
2810 u_int32_t blksize;
2811 u_int64_t temp;
2812 u_int32_t features;
2813 vfs_context_t ctx = vfs_context_current();
2814
2815 int isvirtual = 0;
2816 /*
2817 * determine if this mount point exists on the same device as the root
2818 * partition... if so, then it comes under the hard throttle control
2819 */
2820 int thisunit = -1;
2821 static int rootunit = -1;
2822
2823 if (rootunit == -1) {
2824 if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx))
2825 rootunit = -1;
2826 else if (rootvp == devvp)
2827 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2828 }
2829 if (devvp != rootvp && rootunit != -1) {
2830 if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) {
2831 if (thisunit == rootunit)
2832 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2833 }
2834 }
2835 /*
2836 * force the spec device to re-cache
2837 * the underlying block size in case
2838 * the filesystem overrode the initial value
2839 */
2840 set_fsblocksize(devvp);
2841
2842
2843 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2844 (caddr_t)&blksize, 0, ctx)))
2845 return (error);
2846
2847 mp->mnt_devblocksize = blksize;
2848
2849 /*
2850 * set the maximum possible I/O size
2851 * this may get clipped to a smaller value
2852 * based on which constraints are being advertised
2853 * and if those advertised constraints result in a smaller
2854 * limit for a given I/O
2855 */
2856 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2857 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2858
2859 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2860 if (isvirtual)
2861 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2862 }
2863
2864 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2865 (caddr_t)&features, 0, ctx)))
2866 return (error);
2867
2868 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2869 (caddr_t)&readblockcnt, 0, ctx)))
2870 return (error);
2871
2872 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2873 (caddr_t)&writeblockcnt, 0, ctx)))
2874 return (error);
2875
2876 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2877 (caddr_t)&readmaxcnt, 0, ctx)))
2878 return (error);
2879
2880 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2881 (caddr_t)&writemaxcnt, 0, ctx)))
2882 return (error);
2883
2884 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2885 (caddr_t)&readsegcnt, 0, ctx)))
2886 return (error);
2887
2888 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2889 (caddr_t)&writesegcnt, 0, ctx)))
2890 return (error);
2891
2892 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2893 (caddr_t)&readsegsize, 0, ctx)))
2894 return (error);
2895
2896 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2897 (caddr_t)&writesegsize, 0, ctx)))
2898 return (error);
2899
2900 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2901 (caddr_t)&alignment, 0, ctx)))
2902 return (error);
2903
2904 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2905 (caddr_t)&ioqueue_depth, 0, ctx)))
2906 return (error);
2907
2908 if (readmaxcnt)
2909 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2910
2911 if (readblockcnt) {
2912 temp = readblockcnt * blksize;
2913 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2914
2915 if (temp < mp->mnt_maxreadcnt)
2916 mp->mnt_maxreadcnt = (u_int32_t)temp;
2917 }
2918
2919 if (writemaxcnt)
2920 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2921
2922 if (writeblockcnt) {
2923 temp = writeblockcnt * blksize;
2924 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2925
2926 if (temp < mp->mnt_maxwritecnt)
2927 mp->mnt_maxwritecnt = (u_int32_t)temp;
2928 }
2929
2930 if (readsegcnt) {
2931 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2932 } else {
2933 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2934
2935 if (temp > UINT16_MAX)
2936 temp = UINT16_MAX;
2937 }
2938 mp->mnt_segreadcnt = (u_int16_t)temp;
2939
2940 if (writesegcnt) {
2941 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
2942 } else {
2943 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
2944
2945 if (temp > UINT16_MAX)
2946 temp = UINT16_MAX;
2947 }
2948 mp->mnt_segwritecnt = (u_int16_t)temp;
2949
2950 if (readsegsize)
2951 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
2952 else
2953 temp = mp->mnt_maxreadcnt;
2954 mp->mnt_maxsegreadsize = (u_int32_t)temp;
2955
2956 if (writesegsize)
2957 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
2958 else
2959 temp = mp->mnt_maxwritecnt;
2960 mp->mnt_maxsegwritesize = (u_int32_t)temp;
2961
2962 if (alignment)
2963 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
2964 else
2965 temp = 0;
2966 mp->mnt_alignmentmask = temp;
2967
2968
2969 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
2970 temp = ioqueue_depth;
2971 else
2972 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
2973
2974 mp->mnt_ioqueue_depth = temp;
2975 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
2976
2977 if (mp->mnt_ioscale > 1)
2978 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
2979
2980 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
2981 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
2982
2983 return (error);
2984 }
2985
2986 static struct klist fs_klist;
2987 lck_grp_t *fs_klist_lck_grp;
2988 lck_mtx_t *fs_klist_lock;
2989
2990 void
2991 vfs_event_init(void)
2992 {
2993
2994 klist_init(&fs_klist);
2995 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
2996 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
2997 }
2998
2999 void
3000 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3001 {
3002 lck_mtx_lock(fs_klist_lock);
3003 KNOTE(&fs_klist, event);
3004 lck_mtx_unlock(fs_klist_lock);
3005 }
3006
3007 /*
3008 * return the number of mounted filesystems.
3009 */
3010 static int
3011 sysctl_vfs_getvfscnt(void)
3012 {
3013 return(mount_getvfscnt());
3014 }
3015
3016
3017 static int
3018 mount_getvfscnt(void)
3019 {
3020 int ret;
3021
3022 mount_list_lock();
3023 ret = nummounts;
3024 mount_list_unlock();
3025 return (ret);
3026
3027 }
3028
3029
3030
3031 static int
3032 mount_fillfsids(fsid_t *fsidlst, int count)
3033 {
3034 struct mount *mp;
3035 int actual=0;
3036
3037 actual = 0;
3038 mount_list_lock();
3039 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3040 if (actual <= count) {
3041 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3042 actual++;
3043 }
3044 }
3045 mount_list_unlock();
3046 return (actual);
3047
3048 }
3049
3050 /*
3051 * fill in the array of fsid_t's up to a max of 'count', the actual
3052 * number filled in will be set in '*actual'. If there are more fsid_t's
3053 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3054 * have the actual count.
3055 * having *actual filled out even in the error case is depended upon.
3056 */
3057 static int
3058 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3059 {
3060 struct mount *mp;
3061
3062 *actual = 0;
3063 mount_list_lock();
3064 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3065 (*actual)++;
3066 if (*actual <= count)
3067 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3068 }
3069 mount_list_unlock();
3070 return (*actual <= count ? 0 : ENOMEM);
3071 }
3072
3073 static int
3074 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3075 __unused int arg2, struct sysctl_req *req)
3076 {
3077 int actual, error;
3078 size_t space;
3079 fsid_t *fsidlst;
3080
3081 /* This is a readonly node. */
3082 if (req->newptr != USER_ADDR_NULL)
3083 return (EPERM);
3084
3085 /* they are querying us so just return the space required. */
3086 if (req->oldptr == USER_ADDR_NULL) {
3087 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3088 return 0;
3089 }
3090 again:
3091 /*
3092 * Retrieve an accurate count of the amount of space required to copy
3093 * out all the fsids in the system.
3094 */
3095 space = req->oldlen;
3096 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3097
3098 /* they didn't give us enough space. */
3099 if (space < req->oldlen)
3100 return (ENOMEM);
3101
3102 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3103 if (fsidlst == NULL) {
3104 return (ENOMEM);
3105 }
3106
3107 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3108 &actual);
3109 /*
3110 * If we get back ENOMEM, then another mount has been added while we
3111 * slept in malloc above. If this is the case then try again.
3112 */
3113 if (error == ENOMEM) {
3114 FREE(fsidlst, M_TEMP);
3115 req->oldlen = space;
3116 goto again;
3117 }
3118 if (error == 0) {
3119 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3120 }
3121 FREE(fsidlst, M_TEMP);
3122 return (error);
3123 }
3124
3125 /*
3126 * Do a sysctl by fsid.
3127 */
3128 static int
3129 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3130 struct sysctl_req *req)
3131 {
3132 union union_vfsidctl vc;
3133 struct mount *mp;
3134 struct vfsstatfs *sp;
3135 int *name, flags, namelen;
3136 int error=0, gotref=0;
3137 vfs_context_t ctx = vfs_context_current();
3138 proc_t p = req->p; /* XXX req->p != current_proc()? */
3139 boolean_t is_64_bit;
3140
3141 name = arg1;
3142 namelen = arg2;
3143 is_64_bit = proc_is64bit(p);
3144
3145 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3146 if (error)
3147 goto out;
3148 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3149 error = EINVAL;
3150 goto out;
3151 }
3152 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3153 if (mp == NULL) {
3154 error = ENOENT;
3155 goto out;
3156 }
3157 gotref = 1;
3158 /* reset so that the fs specific code can fetch it. */
3159 req->newidx = 0;
3160 /*
3161 * Note if this is a VFS_CTL then we pass the actual sysctl req
3162 * in for "oldp" so that the lower layer can DTRT and use the
3163 * SYSCTL_IN/OUT routines.
3164 */
3165 if (mp->mnt_op->vfs_sysctl != NULL) {
3166 if (is_64_bit) {
3167 if (vfs_64bitready(mp)) {
3168 error = mp->mnt_op->vfs_sysctl(name, namelen,
3169 CAST_USER_ADDR_T(req),
3170 NULL, USER_ADDR_NULL, 0,
3171 ctx);
3172 }
3173 else {
3174 error = ENOTSUP;
3175 }
3176 }
3177 else {
3178 error = mp->mnt_op->vfs_sysctl(name, namelen,
3179 CAST_USER_ADDR_T(req),
3180 NULL, USER_ADDR_NULL, 0,
3181 ctx);
3182 }
3183 if (error != ENOTSUP) {
3184 goto out;
3185 }
3186 }
3187 switch (name[0]) {
3188 case VFS_CTL_UMOUNT:
3189 req->newidx = 0;
3190 if (is_64_bit) {
3191 req->newptr = vc.vc64.vc_ptr;
3192 req->newlen = (size_t)vc.vc64.vc_len;
3193 }
3194 else {
3195 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3196 req->newlen = vc.vc32.vc_len;
3197 }
3198 error = SYSCTL_IN(req, &flags, sizeof(flags));
3199 if (error)
3200 break;
3201
3202 mount_ref(mp, 0);
3203 mount_iterdrop(mp);
3204 gotref = 0;
3205 /* safedounmount consumes a ref */
3206 error = safedounmount(mp, flags, ctx);
3207 break;
3208 case VFS_CTL_STATFS:
3209 req->newidx = 0;
3210 if (is_64_bit) {
3211 req->newptr = vc.vc64.vc_ptr;
3212 req->newlen = (size_t)vc.vc64.vc_len;
3213 }
3214 else {
3215 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3216 req->newlen = vc.vc32.vc_len;
3217 }
3218 error = SYSCTL_IN(req, &flags, sizeof(flags));
3219 if (error)
3220 break;
3221 sp = &mp->mnt_vfsstat;
3222 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3223 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3224 goto out;
3225 if (is_64_bit) {
3226 struct user64_statfs sfs;
3227 bzero(&sfs, sizeof(sfs));
3228 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3229 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3230 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3231 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3232 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3233 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3234 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3235 sfs.f_files = (user64_long_t)sp->f_files;
3236 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3237 sfs.f_fsid = sp->f_fsid;
3238 sfs.f_owner = sp->f_owner;
3239
3240 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3241 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3242 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3243
3244 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3245 }
3246 else {
3247 struct user32_statfs sfs;
3248 bzero(&sfs, sizeof(sfs));
3249 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3250 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3251
3252 /*
3253 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3254 * have to fudge the numbers here in that case. We inflate the blocksize in order
3255 * to reflect the filesystem size as best we can.
3256 */
3257 if (sp->f_blocks > INT_MAX) {
3258 int shift;
3259
3260 /*
3261 * Work out how far we have to shift the block count down to make it fit.
3262 * Note that it's possible to have to shift so far that the resulting
3263 * blocksize would be unreportably large. At that point, we will clip
3264 * any values that don't fit.
3265 *
3266 * For safety's sake, we also ensure that f_iosize is never reported as
3267 * being smaller than f_bsize.
3268 */
3269 for (shift = 0; shift < 32; shift++) {
3270 if ((sp->f_blocks >> shift) <= INT_MAX)
3271 break;
3272 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3273 break;
3274 }
3275 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3276 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3277 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3278 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3279 #undef __SHIFT_OR_CLIP
3280 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3281 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3282 } else {
3283 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3284 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3285 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3286 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3287 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3288 }
3289 sfs.f_files = (user32_long_t)sp->f_files;
3290 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3291 sfs.f_fsid = sp->f_fsid;
3292 sfs.f_owner = sp->f_owner;
3293
3294 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3295 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3296 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3297
3298 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3299 }
3300 break;
3301 default:
3302 error = ENOTSUP;
3303 goto out;
3304 }
3305 out:
3306 if(gotref != 0)
3307 mount_iterdrop(mp);
3308 return (error);
3309 }
3310
3311 static int filt_fsattach(struct knote *kn);
3312 static void filt_fsdetach(struct knote *kn);
3313 static int filt_fsevent(struct knote *kn, long hint);
3314 struct filterops fs_filtops = {
3315 .f_attach = filt_fsattach,
3316 .f_detach = filt_fsdetach,
3317 .f_event = filt_fsevent,
3318 };
3319
3320 static int
3321 filt_fsattach(struct knote *kn)
3322 {
3323
3324 lck_mtx_lock(fs_klist_lock);
3325 kn->kn_flags |= EV_CLEAR;
3326 KNOTE_ATTACH(&fs_klist, kn);
3327 lck_mtx_unlock(fs_klist_lock);
3328 return (0);
3329 }
3330
3331 static void
3332 filt_fsdetach(struct knote *kn)
3333 {
3334 lck_mtx_lock(fs_klist_lock);
3335 KNOTE_DETACH(&fs_klist, kn);
3336 lck_mtx_unlock(fs_klist_lock);
3337 }
3338
3339 static int
3340 filt_fsevent(struct knote *kn, long hint)
3341 {
3342 /*
3343 * Backwards compatibility:
3344 * Other filters would do nothing if kn->kn_sfflags == 0
3345 */
3346
3347 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3348 kn->kn_fflags |= hint;
3349 }
3350
3351 return (kn->kn_fflags != 0);
3352 }
3353
3354 static int
3355 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3356 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3357 {
3358 int out, error;
3359 pid_t pid;
3360 proc_t p;
3361
3362 /* We need a pid. */
3363 if (req->newptr == USER_ADDR_NULL)
3364 return (EINVAL);
3365
3366 error = SYSCTL_IN(req, &pid, sizeof(pid));
3367 if (error)
3368 return (error);
3369
3370 p = proc_find(pid < 0 ? -pid : pid);
3371 if (p == NULL)
3372 return (ESRCH);
3373
3374 /*
3375 * Fetching the value is ok, but we only fetch if the old
3376 * pointer is given.
3377 */
3378 if (req->oldptr != USER_ADDR_NULL) {
3379 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3380 proc_rele(p);
3381 error = SYSCTL_OUT(req, &out, sizeof(out));
3382 return (error);
3383 }
3384
3385 /* cansignal offers us enough security. */
3386 if (p != req->p && proc_suser(req->p) != 0) {
3387 proc_rele(p);
3388 return (EPERM);
3389 }
3390
3391 if (pid < 0)
3392 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3393 else
3394 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3395 proc_rele(p);
3396
3397 return (0);
3398 }
3399
3400 /* the vfs.generic. branch. */
3401 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3402 /* retreive a list of mounted filesystem fsid_t */
3403 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD,
3404 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3405 /* perform operations on filesystem via fsid_t */
3406 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED,
3407 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3408 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY,
3409 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3410
3411
3412 long num_reusedvnodes = 0;
3413
3414 static int
3415 new_vnode(vnode_t *vpp)
3416 {
3417 vnode_t vp;
3418 int retries = 0; /* retry incase of tablefull */
3419 int force_alloc = 0, walk_count = 0;
3420 unsigned int vpid;
3421 struct timespec ts;
3422 struct timeval current_tv;
3423 #ifndef __LP64__
3424 struct unsafe_fsnode *l_unsafefs = 0;
3425 #endif /* __LP64__ */
3426 proc_t curproc = current_proc();
3427
3428 retry:
3429 microuptime(&current_tv);
3430
3431 vp = NULLVP;
3432
3433 vnode_list_lock();
3434
3435 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3436 /*
3437 * Can always reuse a dead one
3438 */
3439 vp = TAILQ_FIRST(&vnode_dead_list);
3440 goto steal_this_vp;
3441 }
3442 /*
3443 * no dead vnodes available... if we're under
3444 * the limit, we'll create a new vnode
3445 */
3446 if (numvnodes < desiredvnodes || force_alloc) {
3447 numvnodes++;
3448 vnode_list_unlock();
3449
3450 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3451 bzero((char *)vp, sizeof(*vp));
3452 VLISTNONE(vp); /* avoid double queue removal */
3453 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3454
3455 klist_init(&vp->v_knotes);
3456 nanouptime(&ts);
3457 vp->v_id = ts.tv_nsec;
3458 vp->v_flag = VSTANDARD;
3459
3460 #if CONFIG_MACF
3461 if (mac_vnode_label_init_needed(vp))
3462 mac_vnode_label_init(vp);
3463 #endif /* MAC */
3464
3465 vp->v_iocount = 1;
3466 goto done;
3467 }
3468
3469 #define MAX_WALK_COUNT 1000
3470
3471 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3472 (ragevnodes >= rage_limit ||
3473 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3474
3475 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3476 if ( !(vp->v_listflag & VLIST_RAGE))
3477 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3478
3479 // if we're a dependency-capable process, skip vnodes that can
3480 // cause recycling deadlocks. (i.e. this process is diskimages
3481 // helper and the vnode is in a disk image).
3482 //
3483 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3484 break;
3485 }
3486
3487 // don't iterate more than MAX_WALK_COUNT vnodes to
3488 // avoid keeping the vnode list lock held for too long.
3489 if (walk_count++ > MAX_WALK_COUNT) {
3490 vp = NULL;
3491 break;
3492 }
3493 }
3494
3495 }
3496
3497 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3498 /*
3499 * Pick the first vp for possible reuse
3500 */
3501 walk_count = 0;
3502 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3503 // if we're a dependency-capable process, skip vnodes that can
3504 // cause recycling deadlocks. (i.e. this process is diskimages
3505 // helper and the vnode is in a disk image)
3506 //
3507 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3508 break;
3509 }
3510
3511 // don't iterate more than MAX_WALK_COUNT vnodes to
3512 // avoid keeping the vnode list lock held for too long.
3513 if (walk_count++ > MAX_WALK_COUNT) {
3514 vp = NULL;
3515 break;
3516 }
3517 }
3518
3519 }
3520
3521 //
3522 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3523 // then we're trying to create a vnode on behalf of a
3524 // process like diskimages-helper that has file systems
3525 // mounted on top of itself (and thus we can't reclaim
3526 // vnodes in the file systems on top of us). if we can't
3527 // find a vnode to reclaim then we'll just have to force
3528 // the allocation.
3529 //
3530 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3531 force_alloc = 1;
3532 vnode_list_unlock();
3533 goto retry;
3534 }
3535
3536 if (vp == NULL) {
3537 /*
3538 * we've reached the system imposed maximum number of vnodes
3539 * but there isn't a single one available
3540 * wait a bit and then retry... if we can't get a vnode
3541 * after 100 retries, than log a complaint
3542 */
3543 if (++retries <= 100) {
3544 vnode_list_unlock();
3545 delay_for_interval(1, 1000 * 1000);
3546 goto retry;
3547 }
3548
3549 vnode_list_unlock();
3550 tablefull("vnode");
3551 log(LOG_EMERG, "%d desired, %d numvnodes, "
3552 "%d free, %d dead, %d rage\n",
3553 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3554 #if CONFIG_EMBEDDED
3555 /*
3556 * Running out of vnodes tends to make a system unusable. Start killing
3557 * processes that jetsam knows are killable.
3558 */
3559 if (jetsam_kill_top_proc() < 0) {
3560 /*
3561 * If jetsam can't find any more processes to kill and there
3562 * still aren't any free vnodes, panic. Hopefully we'll get a
3563 * panic log to tell us why we ran out.
3564 */
3565 panic("vnode table is full\n");
3566 }
3567
3568 delay_for_interval(1, 1000 * 1000);
3569 goto retry;
3570 #endif
3571
3572 *vpp = NULL;
3573 return (ENFILE);
3574 }
3575 steal_this_vp:
3576 vpid = vp->v_id;
3577
3578 vnode_list_remove_locked(vp);
3579
3580 vnode_list_unlock();
3581
3582 vnode_lock_spin(vp);
3583
3584 /*
3585 * We could wait for the vnode_lock after removing the vp from the freelist
3586 * and the vid is bumped only at the very end of reclaim. So it is possible
3587 * that we are looking at a vnode that is being terminated. If so skip it.
3588 */
3589 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3590 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3591 /*
3592 * we lost the race between dropping the list lock
3593 * and picking up the vnode_lock... someone else
3594 * used this vnode and it is now in a new state
3595 * so we need to go back and try again
3596 */
3597 vnode_unlock(vp);
3598 goto retry;
3599 }
3600 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3601 /*
3602 * we did a vnode_rele_ext that asked for
3603 * us not to reenter the filesystem during
3604 * the release even though VL_NEEDINACTIVE was
3605 * set... we'll do it here by doing a
3606 * vnode_get/vnode_put
3607 *
3608 * pick up an iocount so that we can call
3609 * vnode_put and drive the VNOP_INACTIVE...
3610 * vnode_put will either leave us off
3611 * the freelist if a new ref comes in,
3612 * or put us back on the end of the freelist
3613 * or recycle us if we were marked for termination...
3614 * so we'll just go grab a new candidate
3615 */
3616 vp->v_iocount++;
3617 #ifdef JOE_DEBUG
3618 record_vp(vp, 1);
3619 #endif
3620 vnode_put_locked(vp);
3621 vnode_unlock(vp);
3622 goto retry;
3623 }
3624 OSAddAtomicLong(1, &num_reusedvnodes);
3625
3626 /* Checks for anyone racing us for recycle */
3627 if (vp->v_type != VBAD) {
3628 if (vp->v_lflag & VL_DEAD)
3629 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3630 vnode_lock_convert(vp);
3631 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3632
3633 if ((VONLIST(vp)))
3634 panic("new_vnode(%p): vp on list", vp);
3635 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3636 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3637 panic("new_vnode(%p): free vnode still referenced", vp);
3638 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3639 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3640 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3641 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3642 }
3643
3644 #ifndef __LP64__
3645 if (vp->v_unsafefs) {
3646 l_unsafefs = vp->v_unsafefs;
3647 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3648 }
3649 #endif /* __LP64__ */
3650
3651 #if CONFIG_MACF
3652 /*
3653 * We should never see VL_LABELWAIT or VL_LABEL here.
3654 * as those operations hold a reference.
3655 */
3656 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3657 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3658 if (vp->v_lflag & VL_LABELED) {
3659 vnode_lock_convert(vp);
3660 mac_vnode_label_recycle(vp);
3661 } else if (mac_vnode_label_init_needed(vp)) {
3662 vnode_lock_convert(vp);
3663 mac_vnode_label_init(vp);
3664 }
3665
3666 #endif /* MAC */
3667
3668 vp->v_iocount = 1;
3669 vp->v_lflag = 0;
3670 vp->v_writecount = 0;
3671 vp->v_references = 0;
3672 vp->v_iterblkflags = 0;
3673 vp->v_flag = VSTANDARD;
3674 /* vbad vnodes can point to dead_mountp */
3675 vp->v_mount = NULL;
3676 vp->v_defer_reclaimlist = (vnode_t)0;
3677
3678 vnode_unlock(vp);
3679
3680 #ifndef __LP64__
3681 if (l_unsafefs) {
3682 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3683 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3684 }
3685 #endif /* __LP64__ */
3686
3687 done:
3688 *vpp = vp;
3689
3690 return (0);
3691 }
3692
3693 void
3694 vnode_lock(vnode_t vp)
3695 {
3696 lck_mtx_lock(&vp->v_lock);
3697 }
3698
3699 void
3700 vnode_lock_spin(vnode_t vp)
3701 {
3702 lck_mtx_lock_spin(&vp->v_lock);
3703 }
3704
3705 void
3706 vnode_unlock(vnode_t vp)
3707 {
3708 lck_mtx_unlock(&vp->v_lock);
3709 }
3710
3711
3712
3713 int
3714 vnode_get(struct vnode *vp)
3715 {
3716 int retval;
3717
3718 vnode_lock_spin(vp);
3719 retval = vnode_get_locked(vp);
3720 vnode_unlock(vp);
3721
3722 return(retval);
3723 }
3724
3725 int
3726 vnode_get_locked(struct vnode *vp)
3727 {
3728 #if DIAGNOSTIC
3729 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3730 #endif
3731 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3732 return(ENOENT);
3733 }
3734 vp->v_iocount++;
3735 #ifdef JOE_DEBUG
3736 record_vp(vp, 1);
3737 #endif
3738 return (0);
3739 }
3740
3741 int
3742 vnode_getwithvid(vnode_t vp, uint32_t vid)
3743 {
3744 return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
3745 }
3746
3747 int
3748 vnode_getwithref(vnode_t vp)
3749 {
3750 return(vget_internal(vp, 0, 0));
3751 }
3752
3753
3754 __private_extern__ int
3755 vnode_getalways(vnode_t vp)
3756 {
3757 return(vget_internal(vp, 0, VNODE_ALWAYS));
3758 }
3759
3760 int
3761 vnode_put(vnode_t vp)
3762 {
3763 int retval;
3764
3765 vnode_lock_spin(vp);
3766 retval = vnode_put_locked(vp);
3767 vnode_unlock(vp);
3768
3769 return(retval);
3770 }
3771
3772 int
3773 vnode_put_locked(vnode_t vp)
3774 {
3775 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3776
3777 #if DIAGNOSTIC
3778 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3779 #endif
3780 retry:
3781 if (vp->v_iocount < 1)
3782 panic("vnode_put(%p): iocount < 1", vp);
3783
3784 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3785 vnode_dropiocount(vp);
3786 return(0);
3787 }
3788 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3789
3790 vp->v_lflag &= ~VL_NEEDINACTIVE;
3791 vnode_unlock(vp);
3792
3793 VNOP_INACTIVE(vp, ctx);
3794
3795 vnode_lock_spin(vp);
3796 /*
3797 * because we had to drop the vnode lock before calling
3798 * VNOP_INACTIVE, the state of this vnode may have changed...
3799 * we may pick up both VL_MARTERM and either
3800 * an iocount or a usecount while in the VNOP_INACTIVE call
3801 * we don't want to call vnode_reclaim_internal on a vnode
3802 * that has active references on it... so loop back around
3803 * and reevaluate the state
3804 */
3805 goto retry;
3806 }
3807 vp->v_lflag &= ~VL_NEEDINACTIVE;
3808
3809 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3810 vnode_lock_convert(vp);
3811 vnode_reclaim_internal(vp, 1, 1, 0);
3812 }
3813 vnode_dropiocount(vp);
3814 vnode_list_add(vp);
3815
3816 return(0);
3817 }
3818
3819 /* is vnode_t in use by others? */
3820 int
3821 vnode_isinuse(vnode_t vp, int refcnt)
3822 {
3823 return(vnode_isinuse_locked(vp, refcnt, 0));
3824 }
3825
3826
3827 static int
3828 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3829 {
3830 int retval = 0;
3831
3832 if (!locked)
3833 vnode_lock_spin(vp);
3834 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3835 retval = 1;
3836 goto out;
3837 }
3838 if (vp->v_type == VREG) {
3839 retval = ubc_isinuse_locked(vp, refcnt, 1);
3840 }
3841
3842 out:
3843 if (!locked)
3844 vnode_unlock(vp);
3845 return(retval);
3846 }
3847
3848
3849 /* resume vnode_t */
3850 errno_t
3851 vnode_resume(vnode_t vp)
3852 {
3853 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3854
3855 vnode_lock_spin(vp);
3856 vp->v_lflag &= ~VL_SUSPENDED;
3857 vp->v_owner = NULL;
3858 vnode_unlock(vp);
3859
3860 wakeup(&vp->v_iocount);
3861 }
3862 return(0);
3863 }
3864
3865 /* suspend vnode_t
3866 * Please do not use on more than one vnode at a time as it may
3867 * cause deadlocks.
3868 * xxx should we explicity prevent this from happening?
3869 */
3870
3871 errno_t
3872 vnode_suspend(vnode_t vp)
3873 {
3874 if (vp->v_lflag & VL_SUSPENDED) {
3875 return(EBUSY);
3876 }
3877
3878 vnode_lock_spin(vp);
3879
3880 /*
3881 * xxx is this sufficient to check if a vnode_drain is
3882 * progress?
3883 */
3884
3885 if (vp->v_owner == NULL) {
3886 vp->v_lflag |= VL_SUSPENDED;
3887 vp->v_owner = current_thread();
3888 }
3889 vnode_unlock(vp);
3890
3891 return(0);
3892 }
3893
3894
3895
3896 static errno_t
3897 vnode_drain(vnode_t vp)
3898 {
3899
3900 if (vp->v_lflag & VL_DRAIN) {
3901 panic("vnode_drain: recursuve drain");
3902 return(ENOENT);
3903 }
3904 vp->v_lflag |= VL_DRAIN;
3905 vp->v_owner = current_thread();
3906
3907 while (vp->v_iocount > 1)
3908 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
3909 return(0);
3910 }
3911
3912
3913 /*
3914 * if the number of recent references via vnode_getwithvid or vnode_getwithref
3915 * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from
3916 * the LRU list if it's currently on it... once the iocount and usecount both drop
3917 * to 0, it will get put back on the end of the list, effectively making it younger
3918 * this allows us to keep actively referenced vnodes in the list without having
3919 * to constantly remove and add to the list each time a vnode w/o a usecount is
3920 * referenced which costs us taking and dropping a global lock twice.
3921 */
3922 #define UNAGE_THRESHHOLD 25
3923
3924 static errno_t
3925 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
3926 {
3927 int nodead = vflags & VNODE_NODEAD;
3928 int nosusp = vflags & VNODE_NOSUSPEND;
3929 int always = vflags & VNODE_ALWAYS;
3930
3931 for (;;) {
3932 /*
3933 * if it is a dead vnode with deadfs
3934 */
3935 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
3936 return(ENOENT);
3937 }
3938 /*
3939 * will return VL_DEAD ones
3940 */
3941 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
3942 break;
3943 }
3944 /*
3945 * if suspended vnodes are to be failed
3946 */
3947 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
3948 return(ENOENT);
3949 }
3950 /*
3951 * if you are the owner of drain/suspend/termination , can acquire iocount
3952 * check for VL_TERMINATE; it does not set owner
3953 */
3954 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
3955 (vp->v_owner == current_thread())) {
3956 break;
3957 }
3958
3959 if (always != 0)
3960 break;
3961 vnode_lock_convert(vp);
3962
3963 if (vp->v_lflag & VL_TERMINATE) {
3964 vp->v_lflag |= VL_TERMWANT;
3965
3966 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
3967 } else
3968 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
3969 }
3970 if (vid != vp->v_id) {
3971 return(ENOENT);
3972 }
3973 if (++vp->v_references >= UNAGE_THRESHHOLD) {
3974 vp->v_references = 0;
3975 vnode_list_remove(vp);
3976 }
3977 vp->v_iocount++;
3978 #ifdef JOE_DEBUG
3979 record_vp(vp, 1);
3980 #endif
3981 return(0);
3982 }
3983
3984 static void
3985 vnode_dropiocount (vnode_t vp)
3986 {
3987 if (vp->v_iocount < 1)
3988 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
3989
3990 vp->v_iocount--;
3991 #ifdef JOE_DEBUG
3992 record_vp(vp, -1);
3993 #endif
3994 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
3995 wakeup(&vp->v_iocount);
3996 }
3997
3998
3999 void
4000 vnode_reclaim(struct vnode * vp)
4001 {
4002 vnode_reclaim_internal(vp, 0, 0, 0);
4003 }
4004
4005 __private_extern__
4006 void
4007 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4008 {
4009 int isfifo = 0;
4010
4011 if (!locked)
4012 vnode_lock(vp);
4013
4014 if (vp->v_lflag & VL_TERMINATE) {
4015 panic("vnode reclaim in progress");
4016 }
4017 vp->v_lflag |= VL_TERMINATE;
4018
4019 vn_clearunionwait(vp, 1);
4020
4021 vnode_drain(vp);
4022
4023 isfifo = (vp->v_type == VFIFO);
4024
4025 if (vp->v_type != VBAD)
4026 vgone(vp, flags); /* clean and reclaim the vnode */
4027
4028 /*
4029 * give the vnode a new identity so that vnode_getwithvid will fail
4030 * on any stale cache accesses...
4031 * grab the list_lock so that if we're in "new_vnode"
4032 * behind the list_lock trying to steal this vnode, the v_id is stable...
4033 * once new_vnode drops the list_lock, it will block trying to take
4034 * the vnode lock until we release it... at that point it will evaluate
4035 * whether the v_vid has changed
4036 * also need to make sure that the vnode isn't on a list where "new_vnode"
4037 * can find it after the v_id has been bumped until we are completely done
4038 * with the vnode (i.e. putting it back on a list has to be the very last
4039 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4040 * are holding an io_count on the vnode... they need to drop the io_count
4041 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4042 * they are completely done with the vnode
4043 */
4044 vnode_list_lock();
4045
4046 vnode_list_remove_locked(vp);
4047 vp->v_id++;
4048
4049 vnode_list_unlock();
4050
4051 if (isfifo) {
4052 struct fifoinfo * fip;
4053
4054 fip = vp->v_fifoinfo;
4055 vp->v_fifoinfo = NULL;
4056 FREE(fip, M_TEMP);
4057 }
4058 vp->v_type = VBAD;
4059
4060 if (vp->v_data)
4061 panic("vnode_reclaim_internal: cleaned vnode isn't");
4062 if (vp->v_numoutput)
4063 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4064 if (UBCINFOEXISTS(vp))
4065 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4066 if (vp->v_parent)
4067 panic("vnode_reclaim_internal: vparent not removed");
4068 if (vp->v_name)
4069 panic("vnode_reclaim_internal: vname not removed");
4070
4071 vp->v_socket = NULL;
4072
4073 vp->v_lflag &= ~VL_TERMINATE;
4074 vp->v_lflag &= ~VL_DRAIN;
4075 vp->v_owner = NULL;
4076
4077 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4078
4079 /* Make sure that when we reuse the vnode, no knotes left over */
4080 klist_init(&vp->v_knotes);
4081
4082 if (vp->v_lflag & VL_TERMWANT) {
4083 vp->v_lflag &= ~VL_TERMWANT;
4084 wakeup(&vp->v_lflag);
4085 }
4086 if (!reuse) {
4087 /*
4088 * make sure we get on the
4089 * dead list if appropriate
4090 */
4091 vnode_list_add(vp);
4092 }
4093 if (!locked)
4094 vnode_unlock(vp);
4095 }
4096
4097 /* USAGE:
4098 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4099 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4100 * is obsoleted by this.
4101 * vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
4102 */
4103 int
4104 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4105 {
4106 int error;
4107 int insert = 1;
4108 vnode_t vp;
4109 vnode_t nvp;
4110 vnode_t dvp;
4111 struct uthread *ut;
4112 struct componentname *cnp;
4113 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4114
4115 if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) {
4116 if ( (error = new_vnode(&vp)) ) {
4117 return(error);
4118 } else {
4119 dvp = param->vnfs_dvp;
4120 cnp = param->vnfs_cnp;
4121
4122 vp->v_op = param->vnfs_vops;
4123 vp->v_type = param->vnfs_vtype;
4124 vp->v_data = param->vnfs_fsnode;
4125
4126 if (param->vnfs_markroot)
4127 vp->v_flag |= VROOT;
4128 if (param->vnfs_marksystem)
4129 vp->v_flag |= VSYSTEM;
4130 if (vp->v_type == VREG) {
4131 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4132 if (error) {
4133 #ifdef JOE_DEBUG
4134 record_vp(vp, 1);
4135 #endif
4136 vp->v_mount = NULL;
4137 vp->v_op = dead_vnodeop_p;
4138 vp->v_tag = VT_NON;
4139 vp->v_data = NULL;
4140 vp->v_type = VBAD;
4141 vp->v_lflag |= VL_DEAD;
4142
4143 vnode_put(vp);
4144 return(error);
4145 }
4146 }
4147 #ifdef JOE_DEBUG
4148 record_vp(vp, 1);
4149 #endif
4150 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4151
4152 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4153
4154 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4155 /*
4156 * if checkalias returns a vnode, it will be locked
4157 *
4158 * first get rid of the unneeded vnode we acquired
4159 */
4160 vp->v_data = NULL;
4161 vp->v_op = spec_vnodeop_p;
4162 vp->v_type = VBAD;
4163 vp->v_lflag = VL_DEAD;
4164 vp->v_data = NULL;
4165 vp->v_tag = VT_NON;
4166 vnode_put(vp);
4167
4168 /*
4169 * switch to aliased vnode and finish
4170 * preparing it
4171 */
4172 vp = nvp;
4173
4174 vclean(vp, 0);
4175 vp->v_op = param->vnfs_vops;
4176 vp->v_type = param->vnfs_vtype;
4177 vp->v_data = param->vnfs_fsnode;
4178 vp->v_lflag = 0;
4179 vp->v_mount = NULL;
4180 insmntque(vp, param->vnfs_mp);
4181 insert = 0;
4182 vnode_unlock(vp);
4183 }
4184 }
4185
4186 if (vp->v_type == VFIFO) {
4187 struct fifoinfo *fip;
4188
4189 MALLOC(fip, struct fifoinfo *,
4190 sizeof(*fip), M_TEMP, M_WAITOK);
4191 bzero(fip, sizeof(struct fifoinfo ));
4192 vp->v_fifoinfo = fip;
4193 }
4194 /* The file systems must pass the address of the location where
4195 * they store the vnode pointer. When we add the vnode into the mount
4196 * list and name cache they become discoverable. So the file system node
4197 * must have the connection to vnode setup by then
4198 */
4199 *vpp = vp;
4200
4201 /* Add fs named reference. */
4202 if (param->vnfs_flags & VNFS_ADDFSREF) {
4203 vp->v_lflag |= VNAMED_FSHASH;
4204 }
4205 if (param->vnfs_mp) {
4206 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4207 vp->v_flag |= VLOCKLOCAL;
4208 if (insert) {
4209 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4210 panic("insmntque: vp on the free list\n");
4211 /*
4212 * enter in mount vnode list
4213 */
4214 insmntque(vp, param->vnfs_mp);
4215 }
4216 #ifndef __LP64__
4217 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4218 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4219 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4220 vp->v_unsafefs->fsnode_count = 0;
4221 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4222 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4223 }
4224 #endif /* __LP64__ */
4225 }
4226 if (dvp && vnode_ref(dvp) == 0) {
4227 vp->v_parent = dvp;
4228 }
4229 if (cnp) {
4230 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4231 /*
4232 * enter into name cache
4233 * we've got the info to enter it into the name cache now
4234 * cache_enter_create will pick up an extra reference on
4235 * the name entered into the string cache
4236 */
4237 vp->v_name = cache_enter_create(dvp, vp, cnp);
4238 } else
4239 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4240
4241 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4242 vp->v_flag |= VISUNION;
4243 }
4244 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4245 /*
4246 * this vnode is being created as cacheable in the name cache
4247 * this allows us to re-enter it in the cache
4248 */
4249 vp->v_flag |= VNCACHEABLE;
4250 }
4251 ut = get_bsdthread_info(current_thread());
4252
4253 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4254 (ut->uu_flag & UT_RAGE_VNODES)) {
4255 /*
4256 * process has indicated that it wants any
4257 * vnodes created on its behalf to be rapidly
4258 * aged to reduce the impact on the cached set
4259 * of vnodes
4260 */
4261 vp->v_flag |= VRAGE;
4262 }
4263 return(0);
4264 }
4265 }
4266 return (EINVAL);
4267 }
4268
4269 int
4270 vnode_addfsref(vnode_t vp)
4271 {
4272 vnode_lock_spin(vp);
4273 if (vp->v_lflag & VNAMED_FSHASH)
4274 panic("add_fsref: vp already has named reference");
4275 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4276 panic("addfsref: vp on the free list\n");
4277 vp->v_lflag |= VNAMED_FSHASH;
4278 vnode_unlock(vp);
4279 return(0);
4280
4281 }
4282 int
4283 vnode_removefsref(vnode_t vp)
4284 {
4285 vnode_lock_spin(vp);
4286 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4287 panic("remove_fsref: no named reference");
4288 vp->v_lflag &= ~VNAMED_FSHASH;
4289 vnode_unlock(vp);
4290 return(0);
4291
4292 }
4293
4294
4295 int
4296 vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg)
4297 {
4298 mount_t mp;
4299 int ret = 0;
4300 fsid_t * fsid_list;
4301 int count, actualcount, i;
4302 void * allocmem;
4303
4304 count = mount_getvfscnt();
4305 count += 10;
4306
4307 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4308 allocmem = (void *)fsid_list;
4309
4310 actualcount = mount_fillfsids(fsid_list, count);
4311
4312 for (i=0; i< actualcount; i++) {
4313
4314 /* obtain the mount point with iteration reference */
4315 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4316
4317 if(mp == (struct mount *)0)
4318 continue;
4319 mount_lock(mp);
4320 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4321 mount_unlock(mp);
4322 mount_iterdrop(mp);
4323 continue;
4324
4325 }
4326 mount_unlock(mp);
4327
4328 /* iterate over all the vnodes */
4329 ret = callout(mp, arg);
4330
4331 mount_iterdrop(mp);
4332
4333 switch (ret) {
4334 case VFS_RETURNED:
4335 case VFS_RETURNED_DONE:
4336 if (ret == VFS_RETURNED_DONE) {
4337 ret = 0;
4338 goto out;
4339 }
4340 break;
4341
4342 case VFS_CLAIMED_DONE:
4343 ret = 0;
4344 goto out;
4345 case VFS_CLAIMED:
4346 default:
4347 break;
4348 }
4349 ret = 0;
4350 }
4351
4352 out:
4353 kfree(allocmem, (count * sizeof(fsid_t)));
4354 return (ret);
4355 }
4356
4357 /*
4358 * Update the vfsstatfs structure in the mountpoint.
4359 * MAC: Parameter eventtype added, indicating whether the event that
4360 * triggered this update came from user space, via a system call
4361 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4362 */
4363 int
4364 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4365 {
4366 struct vfs_attr va;
4367 int error;
4368
4369 /*
4370 * Request the attributes we want to propagate into
4371 * the per-mount vfsstat structure.
4372 */
4373 VFSATTR_INIT(&va);
4374 VFSATTR_WANTED(&va, f_iosize);
4375 VFSATTR_WANTED(&va, f_blocks);
4376 VFSATTR_WANTED(&va, f_bfree);
4377 VFSATTR_WANTED(&va, f_bavail);
4378 VFSATTR_WANTED(&va, f_bused);
4379 VFSATTR_WANTED(&va, f_files);
4380 VFSATTR_WANTED(&va, f_ffree);
4381 VFSATTR_WANTED(&va, f_bsize);
4382 VFSATTR_WANTED(&va, f_fssubtype);
4383 #if CONFIG_MACF
4384 if (eventtype == VFS_USER_EVENT) {
4385 error = mac_mount_check_getattr(ctx, mp, &va);
4386 if (error != 0)
4387 return (error);
4388 }
4389 #endif
4390
4391 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4392 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4393 return(error);
4394 }
4395
4396 /*
4397 * Unpack into the per-mount structure.
4398 *
4399 * We only overwrite these fields, which are likely to change:
4400 * f_blocks
4401 * f_bfree
4402 * f_bavail
4403 * f_bused
4404 * f_files
4405 * f_ffree
4406 *
4407 * And these which are not, but which the FS has no other way
4408 * of providing to us:
4409 * f_bsize
4410 * f_iosize
4411 * f_fssubtype
4412 *
4413 */
4414 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4415 /* 4822056 - protect against malformed server mount */
4416 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4417 } else {
4418 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4419 }
4420 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4421 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4422 } else {
4423 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4424 }
4425 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4426 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4427 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4428 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4429 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4430 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4431 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4432 mp->mnt_vfsstat.f_bused = va.f_bused;
4433 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4434 mp->mnt_vfsstat.f_files = va.f_files;
4435 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4436 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4437
4438 /* this is unlikely to change, but has to be queried for */
4439 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4440 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4441
4442 return(0);
4443 }
4444
4445 int
4446 mount_list_add(mount_t mp)
4447 {
4448 int res;
4449
4450 mount_list_lock();
4451 if (system_inshutdown != 0) {
4452 res = -1;
4453 } else {
4454 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4455 nummounts++;
4456 res = 0;
4457 }
4458 mount_list_unlock();
4459
4460 return res;
4461 }
4462
4463 void
4464 mount_list_remove(mount_t mp)
4465 {
4466 mount_list_lock();
4467 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4468 nummounts--;
4469 mp->mnt_list.tqe_next = NULL;
4470 mp->mnt_list.tqe_prev = NULL;
4471 mount_list_unlock();
4472 }
4473
4474 mount_t
4475 mount_lookupby_volfsid(int volfs_id, int withref)
4476 {
4477 mount_t cur_mount = (mount_t)0;
4478 mount_t mp;
4479
4480 mount_list_lock();
4481 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4482 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4483 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4484 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4485 cur_mount = mp;
4486 if (withref) {
4487 if (mount_iterref(cur_mount, 1)) {
4488 cur_mount = (mount_t)0;
4489 mount_list_unlock();
4490 goto out;
4491 }
4492 }
4493 break;
4494 }
4495 }
4496 mount_list_unlock();
4497 if (withref && (cur_mount != (mount_t)0)) {
4498 mp = cur_mount;
4499 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4500 cur_mount = (mount_t)0;
4501 }
4502 mount_iterdrop(mp);
4503 }
4504 out:
4505 return(cur_mount);
4506 }
4507
4508 mount_t
4509 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4510 {
4511 mount_t retmp = (mount_t)0;
4512 mount_t mp;
4513
4514 if (!locked)
4515 mount_list_lock();
4516 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4517 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4518 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4519 retmp = mp;
4520 if (withref) {
4521 if (mount_iterref(retmp, 1))
4522 retmp = (mount_t)0;
4523 }
4524 goto out;
4525 }
4526 out:
4527 if (!locked)
4528 mount_list_unlock();
4529 return (retmp);
4530 }
4531
4532 errno_t
4533 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4534 {
4535 struct nameidata nd;
4536 int error;
4537 u_int32_t ndflags = 0;
4538
4539 if (ctx == NULL) { /* XXX technically an error */
4540 ctx = vfs_context_current();
4541 }
4542
4543 if (flags & VNODE_LOOKUP_NOFOLLOW)
4544 ndflags = NOFOLLOW;
4545 else
4546 ndflags = FOLLOW;
4547
4548 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4549 ndflags |= NOCROSSMOUNT;
4550 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4551 ndflags |= DOWHITEOUT;
4552
4553 /* XXX AUDITVNPATH1 needed ? */
4554 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4555
4556 if ((error = namei(&nd)))
4557 return (error);
4558 *vpp = nd.ni_vp;
4559 nameidone(&nd);
4560
4561 return (0);
4562 }
4563
4564 errno_t
4565 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4566 {
4567 struct nameidata nd;
4568 int error;
4569 u_int32_t ndflags = 0;
4570 int lflags = flags;
4571
4572 if (ctx == NULL) { /* XXX technically an error */
4573 ctx = vfs_context_current();
4574 }
4575
4576 if (fmode & O_NOFOLLOW)
4577 lflags |= VNODE_LOOKUP_NOFOLLOW;
4578
4579 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4580 ndflags = NOFOLLOW;
4581 else
4582 ndflags = FOLLOW;
4583
4584 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4585 ndflags |= NOCROSSMOUNT;
4586 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4587 ndflags |= DOWHITEOUT;
4588
4589 /* XXX AUDITVNPATH1 needed ? */
4590 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4591
4592 if ((error = vn_open(&nd, fmode, cmode)))
4593 *vpp = NULL;
4594 else
4595 *vpp = nd.ni_vp;
4596
4597 return (error);
4598 }
4599
4600 errno_t
4601 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4602 {
4603 int error;
4604
4605 if (ctx == NULL) {
4606 ctx = vfs_context_current();
4607 }
4608
4609 error = vn_close(vp, flags, ctx);
4610 vnode_put(vp);
4611 return (error);
4612 }
4613
4614 /*
4615 * Returns: 0 Success
4616 * vnode_getattr:???
4617 */
4618 errno_t
4619 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4620 {
4621 struct vnode_attr va;
4622 int error;
4623
4624 VATTR_INIT(&va);
4625 VATTR_WANTED(&va, va_data_size);
4626 error = vnode_getattr(vp, &va, ctx);
4627 if (!error)
4628 *sizep = va.va_data_size;
4629 return(error);
4630 }
4631
4632 errno_t
4633 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4634 {
4635 struct vnode_attr va;
4636
4637 VATTR_INIT(&va);
4638 VATTR_SET(&va, va_data_size, size);
4639 va.va_vaflags = ioflag & 0xffff;
4640 return(vnode_setattr(vp, &va, ctx));
4641 }
4642
4643 /*
4644 * Create a filesystem object of arbitrary type with arbitrary attributes in
4645 * the spevied directory with the specified name.
4646 *
4647 * Parameters: dvp Pointer to the vnode of the directory
4648 * in which to create the object.
4649 * vpp Pointer to the area into which to
4650 * return the vnode of the created object.
4651 * cnp Component name pointer from the namei
4652 * data structure, containing the name to
4653 * use for the create object.
4654 * vap Pointer to the vnode_attr structure
4655 * describing the object to be created,
4656 * including the type of object.
4657 * flags VN_* flags controlling ACL inheritance
4658 * and whether or not authorization is to
4659 * be required for the operation.
4660 *
4661 * Returns: 0 Success
4662 * !0 errno value
4663 *
4664 * Implicit: *vpp Contains the vnode of the object that
4665 * was created, if successful.
4666 * *cnp May be modified by the underlying VFS.
4667 * *vap May be modified by the underlying VFS.
4668 * modified by either ACL inheritance or
4669 *
4670 *
4671 * be modified, even if the operation is
4672 *
4673 *
4674 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4675 *
4676 * Modification of '*cnp' and '*vap' by the underlying VFS is
4677 * strongly discouraged.
4678 *
4679 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4680 *
4681 * XXX: We should enummerate the possible errno values here, and where
4682 * in the code they originated.
4683 */
4684 errno_t
4685 vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx)
4686 {
4687 kauth_acl_t oacl, nacl;
4688 int initial_acl;
4689 errno_t error;
4690 vnode_t vp = (vnode_t)0;
4691
4692 error = 0;
4693 oacl = nacl = NULL;
4694 initial_acl = 0;
4695
4696 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4697
4698 /*
4699 * Handle ACL inheritance.
4700 */
4701 if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
4702 /* save the original filesec */
4703 if (VATTR_IS_ACTIVE(vap, va_acl)) {
4704 initial_acl = 1;
4705 oacl = vap->va_acl;
4706 }
4707
4708 vap->va_acl = NULL;
4709 if ((error = kauth_acl_inherit(dvp,
4710 oacl,
4711 &nacl,
4712 vap->va_type == VDIR,
4713 ctx)) != 0) {
4714 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
4715 return(error);
4716 }
4717
4718 /*
4719 * If the generated ACL is NULL, then we can save ourselves some effort
4720 * by clearing the active bit.
4721 */
4722 if (nacl == NULL) {
4723 VATTR_CLEAR_ACTIVE(vap, va_acl);
4724 } else {
4725 VATTR_SET(vap, va_acl, nacl);
4726 }
4727 }
4728
4729 /*
4730 * Check and default new attributes.
4731 * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller
4732 * hasn't supplied them.
4733 */
4734 if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) {
4735 KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error);
4736 goto out;
4737 }
4738
4739
4740 /*
4741 * Create the requested node.
4742 */
4743 switch(vap->va_type) {
4744 case VREG:
4745 error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx);
4746 break;
4747 case VDIR:
4748 error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx);
4749 break;
4750 case VSOCK:
4751 case VFIFO:
4752 case VBLK:
4753 case VCHR:
4754 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4755 break;
4756 default:
4757 panic("vnode_create: unknown vtype %d", vap->va_type);
4758 }
4759 if (error != 0) {
4760 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4761 goto out;
4762 }
4763
4764 vp = *vpp;
4765 #if CONFIG_MACF
4766 if (!(flags & VN_CREATE_NOLABEL)) {
4767 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4768 if (error)
4769 goto error;
4770 }
4771 #endif
4772
4773 /*
4774 * If some of the requested attributes weren't handled by the VNOP,
4775 * use our fallback code.
4776 */
4777 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4778 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4779 error = vnode_setattr_fallback(*vpp, vap, ctx);
4780 }
4781 #if CONFIG_MACF
4782 error:
4783 #endif
4784 if ((error != 0 ) && (vp != (vnode_t)0)) {
4785 *vpp = (vnode_t) 0;
4786 vnode_put(vp);
4787 }
4788
4789 out:
4790 /*
4791 * If the caller supplied a filesec in vap, it has been replaced
4792 * now by the post-inheritance copy. We need to put the original back
4793 * and free the inherited product.
4794 */
4795 if (initial_acl) {
4796 VATTR_SET(vap, va_acl, oacl);
4797 } else {
4798 VATTR_CLEAR_ACTIVE(vap, va_acl);
4799 }
4800 if (nacl != NULL)
4801 kauth_acl_free(nacl);
4802
4803 return(error);
4804 }
4805
4806 static kauth_scope_t vnode_scope;
4807 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4808 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4809 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4810 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4811
4812 typedef struct _vnode_authorize_context {
4813 vnode_t vp;
4814 struct vnode_attr *vap;
4815 vnode_t dvp;
4816 struct vnode_attr *dvap;
4817 vfs_context_t ctx;
4818 int flags;
4819 int flags_valid;
4820 #define _VAC_IS_OWNER (1<<0)
4821 #define _VAC_IN_GROUP (1<<1)
4822 #define _VAC_IS_DIR_OWNER (1<<2)
4823 #define _VAC_IN_DIR_GROUP (1<<3)
4824 } *vauth_ctx;
4825
4826 void
4827 vnode_authorize_init(void)
4828 {
4829 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4830 }
4831
4832 /*
4833 * Authorize an operation on a vnode.
4834 *
4835 * This is KPI, but here because it needs vnode_scope.
4836 *
4837 * Returns: 0 Success
4838 * kauth_authorize_action:EPERM ...
4839 * xlate => EACCES Permission denied
4840 * kauth_authorize_action:0 Success
4841 * kauth_authorize_action: Depends on callback return; this is
4842 * usually only vnode_authorize_callback(),
4843 * but may include other listerners, if any
4844 * exist.
4845 * EROFS
4846 * EACCES
4847 * EPERM
4848 * ???
4849 */
4850 int
4851 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
4852 {
4853 int error, result;
4854
4855 /*
4856 * We can't authorize against a dead vnode; allow all operations through so that
4857 * the correct error can be returned.
4858 */
4859 if (vp->v_type == VBAD)
4860 return(0);
4861
4862 error = 0;
4863 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
4864 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
4865 if (result == EPERM) /* traditional behaviour */
4866 result = EACCES;
4867 /* did the lower layers give a better error return? */
4868 if ((result != 0) && (error != 0))
4869 return(error);
4870 return(result);
4871 }
4872
4873 /*
4874 * Test for vnode immutability.
4875 *
4876 * The 'append' flag is set when the authorization request is constrained
4877 * to operations which only request the right to append to a file.
4878 *
4879 * The 'ignore' flag is set when an operation modifying the immutability flags
4880 * is being authorized. We check the system securelevel to determine which
4881 * immutability flags we can ignore.
4882 */
4883 static int
4884 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
4885 {
4886 int mask;
4887
4888 /* start with all bits precluding the operation */
4889 mask = IMMUTABLE | APPEND;
4890
4891 /* if appending only, remove the append-only bits */
4892 if (append)
4893 mask &= ~APPEND;
4894
4895 /* ignore only set when authorizing flags changes */
4896 if (ignore) {
4897 if (securelevel <= 0) {
4898 /* in insecure state, flags do not inhibit changes */
4899 mask = 0;
4900 } else {
4901 /* in secure state, user flags don't inhibit */
4902 mask &= ~(UF_IMMUTABLE | UF_APPEND);
4903 }
4904 }
4905 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
4906 if ((vap->va_flags & mask) != 0)
4907 return(EPERM);
4908 return(0);
4909 }
4910
4911 static int
4912 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
4913 {
4914 int result;
4915
4916 /* default assumption is not-owner */
4917 result = 0;
4918
4919 /*
4920 * If the filesystem has given us a UID, we treat this as authoritative.
4921 */
4922 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
4923 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
4924 }
4925 /* we could test the owner UUID here if we had a policy for it */
4926
4927 return(result);
4928 }
4929
4930 static int
4931 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember)
4932 {
4933 int error;
4934 int result;
4935
4936 error = 0;
4937 result = 0;
4938
4939 /* the caller is expected to have asked the filesystem for a group at some point */
4940 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
4941 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
4942 }
4943 /* we could test the group UUID here if we had a policy for it */
4944
4945 if (!error)
4946 *ismember = result;
4947 return(error);
4948 }
4949
4950 static int
4951 vauth_file_owner(vauth_ctx vcp)
4952 {
4953 int result;
4954
4955 if (vcp->flags_valid & _VAC_IS_OWNER) {
4956 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
4957 } else {
4958 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
4959
4960 /* cache our result */
4961 vcp->flags_valid |= _VAC_IS_OWNER;
4962 if (result) {
4963 vcp->flags |= _VAC_IS_OWNER;
4964 } else {
4965 vcp->flags &= ~_VAC_IS_OWNER;
4966 }
4967 }
4968 return(result);
4969 }
4970
4971 static int
4972 vauth_file_ingroup(vauth_ctx vcp, int *ismember)
4973 {
4974 int error;
4975
4976 if (vcp->flags_valid & _VAC_IN_GROUP) {
4977 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
4978 error = 0;
4979 } else {
4980 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember);
4981
4982 if (!error) {
4983 /* cache our result */
4984 vcp->flags_valid |= _VAC_IN_GROUP;
4985 if (*ismember) {
4986 vcp->flags |= _VAC_IN_GROUP;
4987 } else {
4988 vcp->flags &= ~_VAC_IN_GROUP;
4989 }
4990 }
4991
4992 }
4993 return(error);
4994 }
4995
4996 static int
4997 vauth_dir_owner(vauth_ctx vcp)
4998 {
4999 int result;
5000
5001 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
5002 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
5003 } else {
5004 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
5005
5006 /* cache our result */
5007 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
5008 if (result) {
5009 vcp->flags |= _VAC_IS_DIR_OWNER;
5010 } else {
5011 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5012 }
5013 }
5014 return(result);
5015 }
5016
5017 static int
5018 vauth_dir_ingroup(vauth_ctx vcp, int *ismember)
5019 {
5020 int error;
5021
5022 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5023 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5024 error = 0;
5025 } else {
5026 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember);
5027
5028 if (!error) {
5029 /* cache our result */
5030 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5031 if (*ismember) {
5032 vcp->flags |= _VAC_IN_DIR_GROUP;
5033 } else {
5034 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5035 }
5036 }
5037 }
5038 return(error);
5039 }
5040
5041 /*
5042 * Test the posix permissions in (vap) to determine whether (credential)
5043 * may perform (action)
5044 */
5045 static int
5046 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5047 {
5048 struct vnode_attr *vap;
5049 int needed, error, owner_ok, group_ok, world_ok, ismember;
5050 #ifdef KAUTH_DEBUG_ENABLE
5051 const char *where = "uninitialized";
5052 # define _SETWHERE(c) where = c;
5053 #else
5054 # define _SETWHERE(c)
5055 #endif
5056
5057 /* checking file or directory? */
5058 if (on_dir) {
5059 vap = vcp->dvap;
5060 } else {
5061 vap = vcp->vap;
5062 }
5063
5064 error = 0;
5065
5066 /*
5067 * We want to do as little work here as possible. So first we check
5068 * which sets of permissions grant us the access we need, and avoid checking
5069 * whether specific permissions grant access when more generic ones would.
5070 */
5071
5072 /* owner permissions */
5073 needed = 0;
5074 if (action & VREAD)
5075 needed |= S_IRUSR;
5076 if (action & VWRITE)
5077 needed |= S_IWUSR;
5078 if (action & VEXEC)
5079 needed |= S_IXUSR;
5080 owner_ok = (needed & vap->va_mode) == needed;
5081
5082 /* group permissions */
5083 needed = 0;
5084 if (action & VREAD)
5085 needed |= S_IRGRP;
5086 if (action & VWRITE)
5087 needed |= S_IWGRP;
5088 if (action & VEXEC)
5089 needed |= S_IXGRP;
5090 group_ok = (needed & vap->va_mode) == needed;
5091
5092 /* world permissions */
5093 needed = 0;
5094 if (action & VREAD)
5095 needed |= S_IROTH;
5096 if (action & VWRITE)
5097 needed |= S_IWOTH;
5098 if (action & VEXEC)
5099 needed |= S_IXOTH;
5100 world_ok = (needed & vap->va_mode) == needed;
5101
5102 /* If granted/denied by all three, we're done */
5103 if (owner_ok && group_ok && world_ok) {
5104 _SETWHERE("all");
5105 goto out;
5106 }
5107 if (!owner_ok && !group_ok && !world_ok) {
5108 _SETWHERE("all");
5109 error = EACCES;
5110 goto out;
5111 }
5112
5113 /* Check ownership (relatively cheap) */
5114 if ((on_dir && vauth_dir_owner(vcp)) ||
5115 (!on_dir && vauth_file_owner(vcp))) {
5116 _SETWHERE("user");
5117 if (!owner_ok)
5118 error = EACCES;
5119 goto out;
5120 }
5121
5122 /* Not owner; if group and world both grant it we're done */
5123 if (group_ok && world_ok) {
5124 _SETWHERE("group/world");
5125 goto out;
5126 }
5127 if (!group_ok && !world_ok) {
5128 _SETWHERE("group/world");
5129 error = EACCES;
5130 goto out;
5131 }
5132
5133 /* Check group membership (most expensive) */
5134 ismember = 0;
5135 if (on_dir) {
5136 error = vauth_dir_ingroup(vcp, &ismember);
5137 } else {
5138 error = vauth_file_ingroup(vcp, &ismember);
5139 }
5140 if (error)
5141 goto out;
5142 if (ismember) {
5143 _SETWHERE("group");
5144 if (!group_ok)
5145 error = EACCES;
5146 goto out;
5147 }
5148
5149 /* Not owner, not in group, use world result */
5150 _SETWHERE("world");
5151 if (!world_ok)
5152 error = EACCES;
5153
5154 /* FALLTHROUGH */
5155
5156 out:
5157 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5158 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5159 (action & VREAD) ? "r" : "-",
5160 (action & VWRITE) ? "w" : "-",
5161 (action & VEXEC) ? "x" : "-",
5162 needed,
5163 (vap->va_mode & S_IRUSR) ? "r" : "-",
5164 (vap->va_mode & S_IWUSR) ? "w" : "-",
5165 (vap->va_mode & S_IXUSR) ? "x" : "-",
5166 (vap->va_mode & S_IRGRP) ? "r" : "-",
5167 (vap->va_mode & S_IWGRP) ? "w" : "-",
5168 (vap->va_mode & S_IXGRP) ? "x" : "-",
5169 (vap->va_mode & S_IROTH) ? "r" : "-",
5170 (vap->va_mode & S_IWOTH) ? "w" : "-",
5171 (vap->va_mode & S_IXOTH) ? "x" : "-",
5172 kauth_cred_getuid(vcp->ctx->vc_ucred),
5173 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5174 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5175 return(error);
5176 }
5177
5178 /*
5179 * Authorize the deletion of the node vp from the directory dvp.
5180 *
5181 * We assume that:
5182 * - Neither the node nor the directory are immutable.
5183 * - The user is not the superuser.
5184 *
5185 * Deletion is not permitted if the directory is sticky and the caller is
5186 * not owner of the node or directory.
5187 *
5188 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5189 * the node may be deleted. If neither denies the permission, and the
5190 * caller has Posix write access to the directory, then the node may be
5191 * deleted.
5192 *
5193 * As an optimization, we cache whether or not delete child is permitted
5194 * on directories without the sticky bit set.
5195 */
5196 int
5197 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5198 /*static*/ int
5199 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5200 {
5201 struct vnode_attr *vap = vcp->vap;
5202 struct vnode_attr *dvap = vcp->dvap;
5203 kauth_cred_t cred = vcp->ctx->vc_ucred;
5204 struct kauth_acl_eval eval;
5205 int error, delete_denied, delete_child_denied, ismember;
5206
5207 /* check the ACL on the directory */
5208 delete_child_denied = 0;
5209 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5210 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5211 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5212 eval.ae_count = dvap->va_acl->acl_entrycount;
5213 eval.ae_options = 0;
5214 if (vauth_dir_owner(vcp))
5215 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5216 if ((error = vauth_dir_ingroup(vcp, &ismember)) != 0)
5217 return(error);
5218 if (ismember)
5219 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5220 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5221 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5222 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5223 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5224
5225 error = kauth_acl_evaluate(cred, &eval);
5226
5227 if (error != 0) {
5228 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5229 return(error);
5230 }
5231 if (eval.ae_result == KAUTH_RESULT_DENY)
5232 delete_child_denied = 1;
5233 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5234 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5235 return(0);
5236 }
5237 }
5238
5239 /* check the ACL on the node */
5240 delete_denied = 0;
5241 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5242 eval.ae_requested = KAUTH_VNODE_DELETE;
5243 eval.ae_acl = &vap->va_acl->acl_ace[0];
5244 eval.ae_count = vap->va_acl->acl_entrycount;
5245 eval.ae_options = 0;
5246 if (vauth_file_owner(vcp))
5247 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5248 if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
5249 return(error);
5250 if (ismember)
5251 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5252 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5253 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5254 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5255 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5256
5257 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5258 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5259 return(error);
5260 }
5261 if (eval.ae_result == KAUTH_RESULT_DENY)
5262 delete_denied = 1;
5263 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5264 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5265 return(0);
5266 }
5267 }
5268
5269 /* if denied by ACL on directory or node, return denial */
5270 if (delete_denied || delete_child_denied) {
5271 KAUTH_DEBUG("%p ALLOWED - denied by ACL", vcp->vp);
5272 return(EACCES);
5273 }
5274
5275 /*
5276 * enforce sticky bit behaviour; the cached_delete_child property will
5277 * be false and the dvap contents valis for sticky bit directories;
5278 * this makes us check the directory each time, but it's unavoidable,
5279 * as sticky bit is an exception to caching.
5280 */
5281 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
5282 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
5283 vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
5284 return(EACCES);
5285 }
5286
5287 /* check the directory */
5288 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
5289 KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
5290 return(error);
5291 }
5292
5293 /* not denied, must be OK */
5294 return(0);
5295 }
5296
5297
5298 /*
5299 * Authorize an operation based on the node's attributes.
5300 */
5301 static int
5302 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
5303 {
5304 struct vnode_attr *vap = vcp->vap;
5305 kauth_cred_t cred = vcp->ctx->vc_ucred;
5306 struct kauth_acl_eval eval;
5307 int error, ismember;
5308 mode_t posix_action;
5309
5310 /*
5311 * If we are the file owner, we automatically have some rights.
5312 *
5313 * Do we need to expand this to support group ownership?
5314 */
5315 if (vauth_file_owner(vcp))
5316 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
5317
5318 /*
5319 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
5320 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
5321 * change ownership to themselves, and WRITE_SECURITY is implicitly
5322 * granted to the owner. We need to do this because at this point
5323 * WRITE_SECURITY may not be granted as the caller is not currently
5324 * the owner.
5325 */
5326 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
5327 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
5328 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
5329
5330 if (acl_rights == 0) {
5331 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
5332 return(0);
5333 }
5334
5335 /* if we have an ACL, evaluate it */
5336 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5337 eval.ae_requested = acl_rights;
5338 eval.ae_acl = &vap->va_acl->acl_ace[0];
5339 eval.ae_count = vap->va_acl->acl_entrycount;
5340 eval.ae_options = 0;
5341 if (vauth_file_owner(vcp))
5342 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5343 if ((error = vauth_file_ingroup(vcp, &ismember)) != 0)
5344 return(error);
5345 if (ismember)
5346 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5347 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5348 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5349 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5350 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5351
5352 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5353 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5354 return(error);
5355 }
5356
5357 if (eval.ae_result == KAUTH_RESULT_DENY) {
5358 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
5359 return(EACCES); /* deny, deny, counter-allege */
5360 }
5361 if (eval.ae_result == KAUTH_RESULT_ALLOW) {
5362 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
5363 return(0);
5364 }
5365 *found_deny = eval.ae_found_deny;
5366
5367 /* fall through and evaluate residual rights */
5368 } else {
5369 /* no ACL, everything is residual */
5370 eval.ae_residual = acl_rights;
5371 }
5372
5373 /*
5374 * Grant residual rights that have been pre-authorized.
5375 */
5376 eval.ae_residual &= ~preauth_rights;
5377
5378 /*
5379 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
5380 */
5381 if (vauth_file_owner(vcp))
5382 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
5383
5384 if (eval.ae_residual == 0) {
5385 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
5386 return(0);
5387 }
5388
5389 /*
5390 * Bail if we have residual rights that can't be granted by posix permissions,
5391 * or aren't presumed granted at this point.
5392 *
5393 * XXX these can be collapsed for performance
5394 */
5395 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
5396 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
5397 return(EACCES);
5398 }
5399 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
5400 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
5401 return(EACCES);
5402 }
5403
5404 #if DIAGNOSTIC
5405 if (eval.ae_residual & KAUTH_VNODE_DELETE)
5406 panic("vnode_authorize: can't be checking delete permission here");
5407 #endif
5408
5409 /*
5410 * Compute the fallback posix permissions that will satisfy the remaining
5411 * rights.
5412 */
5413 posix_action = 0;
5414 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
5415 KAUTH_VNODE_LIST_DIRECTORY |
5416 KAUTH_VNODE_READ_EXTATTRIBUTES))
5417 posix_action |= VREAD;
5418 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
5419 KAUTH_VNODE_ADD_FILE |
5420 KAUTH_VNODE_ADD_SUBDIRECTORY |
5421 KAUTH_VNODE_DELETE_CHILD |
5422 KAUTH_VNODE_WRITE_ATTRIBUTES |
5423 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
5424 posix_action |= VWRITE;
5425 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
5426 KAUTH_VNODE_SEARCH))
5427 posix_action |= VEXEC;
5428
5429 if (posix_action != 0) {
5430 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
5431 } else {
5432 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
5433 vcp->vp,
5434 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
5435 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5436 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
5437 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5438 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
5439 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
5440 (eval.ae_residual & KAUTH_VNODE_DELETE)
5441 ? " DELETE" : "",
5442 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
5443 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5444 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
5445 ? " DELETE_CHILD" : "",
5446 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
5447 ? " READ_ATTRIBUTES" : "",
5448 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
5449 ? " WRITE_ATTRIBUTES" : "",
5450 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
5451 ? " READ_EXTATTRIBUTES" : "",
5452 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
5453 ? " WRITE_EXTATTRIBUTES" : "",
5454 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
5455 ? " READ_SECURITY" : "",
5456 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
5457 ? " WRITE_SECURITY" : "",
5458 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
5459 ? " CHECKIMMUTABLE" : "",
5460 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
5461 ? " CHANGE_OWNER" : "");
5462 }
5463
5464 /*
5465 * Lack of required Posix permissions implies no reason to deny access.
5466 */
5467 return(0);
5468 }
5469
5470 /*
5471 * Check for file immutability.
5472 */
5473 static int
5474 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
5475 {
5476 mount_t mp;
5477 int error;
5478 int append;
5479
5480 /*
5481 * Perform immutability checks for operations that change data.
5482 *
5483 * Sockets, fifos and devices require special handling.
5484 */
5485 switch(vp->v_type) {
5486 case VSOCK:
5487 case VFIFO:
5488 case VBLK:
5489 case VCHR:
5490 /*
5491 * Writing to these nodes does not change the filesystem data,
5492 * so forget that it's being tried.
5493 */
5494 rights &= ~KAUTH_VNODE_WRITE_DATA;
5495 break;
5496 default:
5497 break;
5498 }
5499
5500 error = 0;
5501 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
5502
5503 /* check per-filesystem options if possible */
5504 mp = vp->v_mount;
5505 if (mp != NULL) {
5506
5507 /* check for no-EA filesystems */
5508 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
5509 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
5510 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
5511 error = EACCES; /* User attributes disabled */
5512 goto out;
5513 }
5514 }
5515
5516 /*
5517 * check for file immutability. first, check if the requested rights are
5518 * allowable for a UF_APPEND file.
5519 */
5520 append = 0;
5521 if (vp->v_type == VDIR) {
5522 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5523 append = 1;
5524 } else {
5525 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5526 append = 1;
5527 }
5528 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
5529 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
5530 goto out;
5531 }
5532 }
5533 out:
5534 return(error);
5535 }
5536
5537 /*
5538 * Handle authorization actions for filesystems that advertise that the
5539 * server will be enforcing.
5540 *
5541 * Returns: 0 Authorization should be handled locally
5542 * 1 Authorization was handled by the FS
5543 *
5544 * Note: Imputed returns will only occur if the authorization request
5545 * was handled by the FS.
5546 *
5547 * Imputed: *resultp, modified Return code from FS when the request is
5548 * handled by the FS.
5549 * VNOP_ACCESS:???
5550 * VNOP_OPEN:???
5551 */
5552 static int
5553 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
5554 {
5555 int error;
5556
5557 /*
5558 * If the vp is a device node, socket or FIFO it actually represents a local
5559 * endpoint, so we need to handle it locally.
5560 */
5561 switch(vp->v_type) {
5562 case VBLK:
5563 case VCHR:
5564 case VSOCK:
5565 case VFIFO:
5566 return(0);
5567 default:
5568 break;
5569 }
5570
5571 /*
5572 * In the advisory request case, if the filesystem doesn't think it's reliable
5573 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
5574 */
5575 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
5576 return(0);
5577
5578 /*
5579 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
5580 * VNOP_ACCESS, as most will authorise inline with the actual request.
5581 */
5582 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
5583 *resultp = error;
5584 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
5585 return(1);
5586 }
5587
5588 /*
5589 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
5590 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
5591 */
5592 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
5593 /* try a VNOP_OPEN for readonly access */
5594 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
5595 *resultp = error;
5596 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
5597 return(1);
5598 }
5599 VNOP_CLOSE(vp, FREAD, ctx);
5600 }
5601
5602 /*
5603 * We don't have any reason to believe that the request has to be denied at this point,
5604 * so go ahead and allow it.
5605 */
5606 *resultp = 0;
5607 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
5608 return(1);
5609 }
5610
5611
5612
5613
5614 /*
5615 * Returns: KAUTH_RESULT_ALLOW
5616 * KAUTH_RESULT_DENY
5617 *
5618 * Imputed: *arg3, modified Error code in the deny case
5619 * EROFS Read-only file system
5620 * EACCES Permission denied
5621 * EPERM Operation not permitted [no execute]
5622 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
5623 * vnode_getattr:???
5624 * vnode_authorize_opaque:*arg2 ???
5625 * vnode_authorize_checkimmutable:???
5626 * vnode_authorize_delete:???
5627 * vnode_authorize_simple:???
5628 */
5629
5630
5631 static int
5632 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
5633 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5634 {
5635 vfs_context_t ctx;
5636 vnode_t cvp = NULLVP;
5637 vnode_t vp, dvp;
5638 int result = KAUTH_RESULT_DENY;
5639 int parent_iocount = 0;
5640 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
5641
5642 ctx = (vfs_context_t)arg0;
5643 vp = (vnode_t)arg1;
5644 dvp = (vnode_t)arg2;
5645
5646 /*
5647 * if there are 2 vnodes passed in, we don't know at
5648 * this point which rights to look at based on the
5649 * combined action being passed in... defer until later...
5650 * otherwise check the kauth 'rights' cache hung
5651 * off of the vnode we're interested in... if we've already
5652 * been granted the right we're currently interested in,
5653 * we can just return success... otherwise we'll go through
5654 * the process of authorizing the requested right(s)... if that
5655 * succeeds, we'll add the right(s) to the cache.
5656 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
5657 */
5658 if (dvp && vp)
5659 goto defer;
5660 if (dvp) {
5661 cvp = dvp;
5662 } else {
5663 /*
5664 * For named streams on local-authorization volumes, rights are cached on the parent;
5665 * authorization is determined by looking at the parent's properties anyway, so storing
5666 * on the parent means that we don't recompute for the named stream and that if
5667 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
5668 * stream to flush its cache separately. If we miss in the cache, then we authorize
5669 * as if there were no cached rights (passing the named stream vnode and desired rights to
5670 * vnode_authorize_callback_int()).
5671 *
5672 * On an opaquely authorized volume, we don't know the relationship between the
5673 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
5674 * on such a volume are authorized directly (rather than using the parent) and have their
5675 * own caches. When a named stream vnode is created, we mark the parent as having a named
5676 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
5677 * find the stream and flush its cache.
5678 */
5679 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
5680 cvp = vp->v_parent;
5681 if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
5682 parent_iocount = 1;
5683 } else {
5684 cvp = NULL;
5685 goto defer; /* If we can't use the parent, take the slow path */
5686 }
5687
5688 /* Have to translate some actions */
5689 parent_action = action;
5690 if (parent_action & KAUTH_VNODE_READ_DATA) {
5691 parent_action &= ~KAUTH_VNODE_READ_DATA;
5692 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5693 }
5694 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
5695 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
5696 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5697 }
5698
5699 } else {
5700 cvp = vp;
5701 }
5702 }
5703
5704 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
5705 result = KAUTH_RESULT_ALLOW;
5706 goto out;
5707 }
5708 defer:
5709 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
5710
5711 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
5712 vnode_cache_authorized_action(cvp, ctx, action);
5713
5714 out:
5715 if (parent_iocount) {
5716 vnode_put(cvp);
5717 }
5718
5719 return result;
5720 }
5721
5722
5723 static int
5724 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
5725 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5726 {
5727 struct _vnode_authorize_context auth_context;
5728 vauth_ctx vcp;
5729 vfs_context_t ctx;
5730 vnode_t vp, dvp;
5731 kauth_cred_t cred;
5732 kauth_ace_rights_t rights;
5733 struct vnode_attr va, dva;
5734 int result;
5735 int *errorp;
5736 int noimmutable;
5737 boolean_t parent_authorized_for_delete_child = FALSE;
5738 boolean_t found_deny = FALSE;
5739 boolean_t parent_ref= FALSE;
5740
5741 vcp = &auth_context;
5742 ctx = vcp->ctx = (vfs_context_t)arg0;
5743 vp = vcp->vp = (vnode_t)arg1;
5744 dvp = vcp->dvp = (vnode_t)arg2;
5745 errorp = (int *)arg3;
5746 /*
5747 * Note that we authorize against the context, not the passed cred
5748 * (the same thing anyway)
5749 */
5750 cred = ctx->vc_ucred;
5751
5752 VATTR_INIT(&va);
5753 vcp->vap = &va;
5754 VATTR_INIT(&dva);
5755 vcp->dvap = &dva;
5756
5757 vcp->flags = vcp->flags_valid = 0;
5758
5759 #if DIAGNOSTIC
5760 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
5761 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
5762 #endif
5763
5764 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
5765 vp, vfs_context_proc(ctx)->p_comm,
5766 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
5767 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5768 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5769 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
5770 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
5771 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5772 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
5773 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
5774 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
5775 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
5776 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
5777 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
5778 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
5779 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
5780 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
5781 vnode_isdir(vp) ? "directory" : "file",
5782 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
5783
5784 /*
5785 * Extract the control bits from the action, everything else is
5786 * requested rights.
5787 */
5788 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
5789 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
5790
5791 if (rights & KAUTH_VNODE_DELETE) {
5792 #if DIAGNOSTIC
5793 if (dvp == NULL)
5794 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
5795 #endif
5796 /*
5797 * check to see if we've already authorized the parent
5798 * directory for deletion of its children... if so, we
5799 * can skip a whole bunch of work... we will still have to
5800 * authorize that this specific child can be removed
5801 */
5802 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
5803 parent_authorized_for_delete_child = TRUE;
5804 } else {
5805 dvp = NULL;
5806 }
5807
5808 /*
5809 * Check for read-only filesystems.
5810 */
5811 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
5812 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
5813 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
5814 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
5815 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
5816 result = EROFS;
5817 goto out;
5818 }
5819
5820 /*
5821 * Check for noexec filesystems.
5822 */
5823 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
5824 result = EACCES;
5825 goto out;
5826 }
5827
5828 /*
5829 * Handle cases related to filesystems with non-local enforcement.
5830 * This call can return 0, in which case we will fall through to perform a
5831 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
5832 * an appropriate result, at which point we can return immediately.
5833 */
5834 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
5835 goto out;
5836
5837 /*
5838 * Get vnode attributes and extended security information for the vnode
5839 * and directory if required.
5840 */
5841 VATTR_WANTED(&va, va_mode);
5842 VATTR_WANTED(&va, va_uid);
5843 VATTR_WANTED(&va, va_gid);
5844 VATTR_WANTED(&va, va_flags);
5845 VATTR_WANTED(&va, va_acl);
5846 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
5847 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
5848 goto out;
5849 }
5850 if (dvp && parent_authorized_for_delete_child == FALSE) {
5851 VATTR_WANTED(&dva, va_mode);
5852 VATTR_WANTED(&dva, va_uid);
5853 VATTR_WANTED(&dva, va_gid);
5854 VATTR_WANTED(&dva, va_flags);
5855 VATTR_WANTED(&dva, va_acl);
5856 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
5857 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
5858 goto out;
5859 }
5860 }
5861
5862 /*
5863 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
5864 * *_EXTATTRIBUTES.
5865 */
5866 if (vnode_isnamedstream(vp)) {
5867 if (rights & KAUTH_VNODE_READ_DATA) {
5868 rights &= ~KAUTH_VNODE_READ_DATA;
5869 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5870 }
5871 if (rights & KAUTH_VNODE_WRITE_DATA) {
5872 rights &= ~KAUTH_VNODE_WRITE_DATA;
5873 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5874 }
5875 }
5876
5877 /*
5878 * Point 'vp' to the resource fork's parent for ACL checking
5879 */
5880 if (vnode_isnamedstream(vp) &&
5881 (vp->v_parent != NULL) &&
5882 (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) {
5883 parent_ref = TRUE;
5884 vcp->vp = vp = vp->v_parent;
5885 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
5886 kauth_acl_free(va.va_acl);
5887 VATTR_INIT(&va);
5888 VATTR_WANTED(&va, va_mode);
5889 VATTR_WANTED(&va, va_uid);
5890 VATTR_WANTED(&va, va_gid);
5891 VATTR_WANTED(&va, va_flags);
5892 VATTR_WANTED(&va, va_acl);
5893 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
5894 goto out;
5895 }
5896
5897 /*
5898 * Check for immutability.
5899 *
5900 * In the deletion case, parent directory immutability vetoes specific
5901 * file rights.
5902 */
5903 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
5904 goto out;
5905 if ((rights & KAUTH_VNODE_DELETE) &&
5906 parent_authorized_for_delete_child == FALSE &&
5907 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
5908 goto out;
5909
5910 /*
5911 * Clear rights that have been authorized by reaching this point, bail if nothing left to
5912 * check.
5913 */
5914 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
5915 if (rights == 0)
5916 goto out;
5917
5918 /*
5919 * If we're not the superuser, authorize based on file properties;
5920 * note that even if parent_authorized_for_delete_child is TRUE, we
5921 * need to check on the node itself.
5922 */
5923 if (!vfs_context_issuser(ctx)) {
5924 /* process delete rights */
5925 if ((rights & KAUTH_VNODE_DELETE) &&
5926 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
5927 goto out;
5928
5929 /* process remaining rights */
5930 if ((rights & ~KAUTH_VNODE_DELETE) &&
5931 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
5932 goto out;
5933 } else {
5934
5935 /*
5936 * Execute is only granted to root if one of the x bits is set. This check only
5937 * makes sense if the posix mode bits are actually supported.
5938 */
5939 if ((rights & KAUTH_VNODE_EXECUTE) &&
5940 (vp->v_type == VREG) &&
5941 VATTR_IS_SUPPORTED(&va, va_mode) &&
5942 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
5943 result = EPERM;
5944 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
5945 goto out;
5946 }
5947
5948 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
5949 }
5950 out:
5951 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
5952 kauth_acl_free(va.va_acl);
5953 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
5954 kauth_acl_free(dva.va_acl);
5955
5956 if (result) {
5957 if (parent_ref)
5958 vnode_put(vp);
5959 *errorp = result;
5960 KAUTH_DEBUG("%p DENIED - auth denied", vp);
5961 return(KAUTH_RESULT_DENY);
5962 }
5963 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
5964 /*
5965 * if we were successfully granted the right to search this directory
5966 * and there were NO ACL DENYs for search and the posix permissions also don't
5967 * deny execute, we can synthesize a global right that allows anyone to
5968 * traverse this directory during a pathname lookup without having to
5969 * match the credential associated with this cache of rights.
5970 */
5971 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
5972 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
5973 (S_IXUSR | S_IXGRP | S_IXOTH))) {
5974 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
5975 }
5976 }
5977 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
5978 /*
5979 * parent was successfully and newly authorized for content deletions
5980 * add it to the cache, but only if it doesn't have the sticky
5981 * bit set on it. This same check is done earlier guarding
5982 * fetching of dva, and if we jumped to out without having done
5983 * this, we will have returned already because of a non-zero
5984 * 'result' value.
5985 */
5986 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
5987 !(dva.va_mode & (S_ISVTX))) {
5988 /* OK to cache delete rights */
5989 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
5990 }
5991 }
5992 if (parent_ref)
5993 vnode_put(vp);
5994 /*
5995 * Note that this implies that we will allow requests for no rights, as well as
5996 * for rights that we do not recognise. There should be none of these.
5997 */
5998 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
5999 return(KAUTH_RESULT_ALLOW);
6000 }
6001
6002 /*
6003 * Check that the attribute information in vattr can be legally applied to
6004 * a new file by the context.
6005 */
6006 int
6007 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
6008 {
6009 int error;
6010 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6011 kauth_cred_t cred;
6012 guid_t changer;
6013 mount_t dmp;
6014
6015 error = 0;
6016 defaulted_owner = defaulted_group = defaulted_mode = 0;
6017
6018 /*
6019 * Require that the filesystem support extended security to apply any.
6020 */
6021 if (!vfs_extendedsecurity(dvp->v_mount) &&
6022 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6023 error = EINVAL;
6024 goto out;
6025 }
6026
6027 /*
6028 * Default some fields.
6029 */
6030 dmp = dvp->v_mount;
6031
6032 /*
6033 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6034 * owner takes ownership of all new files.
6035 */
6036 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6037 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6038 defaulted_owner = 1;
6039 } else {
6040 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6041 /* default owner is current user */
6042 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6043 defaulted_owner = 1;
6044 }
6045 }
6046
6047 /*
6048 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6049 * group takes ownership of all new files.
6050 */
6051 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6052 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6053 defaulted_group = 1;
6054 } else {
6055 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6056 /* default group comes from parent object, fallback to current user */
6057 struct vnode_attr dva;
6058 VATTR_INIT(&dva);
6059 VATTR_WANTED(&dva, va_gid);
6060 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6061 goto out;
6062 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6063 VATTR_SET(vap, va_gid, dva.va_gid);
6064 } else {
6065 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6066 }
6067 defaulted_group = 1;
6068 }
6069 }
6070
6071 if (!VATTR_IS_ACTIVE(vap, va_flags))
6072 VATTR_SET(vap, va_flags, 0);
6073
6074 /* default mode is everything, masked with current umask */
6075 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6076 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6077 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6078 defaulted_mode = 1;
6079 }
6080 /* set timestamps to now */
6081 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6082 nanotime(&vap->va_create_time);
6083 VATTR_SET_ACTIVE(vap, va_create_time);
6084 }
6085
6086 /*
6087 * Check for attempts to set nonsensical fields.
6088 */
6089 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6090 error = EINVAL;
6091 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6092 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6093 goto out;
6094 }
6095
6096 /*
6097 * Quickly check for the applicability of any enforcement here.
6098 * Tests below maintain the integrity of the local security model.
6099 */
6100 if (vfs_authopaque(dvp->v_mount))
6101 goto out;
6102
6103 /*
6104 * We need to know if the caller is the superuser, or if the work is
6105 * otherwise already authorised.
6106 */
6107 cred = vfs_context_ucred(ctx);
6108 if (noauth) {
6109 /* doing work for the kernel */
6110 has_priv_suser = 1;
6111 } else {
6112 has_priv_suser = vfs_context_issuser(ctx);
6113 }
6114
6115
6116 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6117 if (has_priv_suser) {
6118 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6119 error = EPERM;
6120 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6121 goto out;
6122 }
6123 } else {
6124 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6125 error = EPERM;
6126 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6127 goto out;
6128 }
6129 }
6130 }
6131
6132 /* if not superuser, validate legality of new-item attributes */
6133 if (!has_priv_suser) {
6134 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6135 /* setgid? */
6136 if (vap->va_mode & S_ISGID) {
6137 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6138 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6139 goto out;
6140 }
6141 if (!ismember) {
6142 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6143 error = EPERM;
6144 goto out;
6145 }
6146 }
6147
6148 /* setuid? */
6149 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6150 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6151 error = EPERM;
6152 goto out;
6153 }
6154 }
6155 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6156 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6157 error = EPERM;
6158 goto out;
6159 }
6160 if (!defaulted_group) {
6161 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6162 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6163 goto out;
6164 }
6165 if (!ismember) {
6166 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6167 error = EPERM;
6168 goto out;
6169 }
6170 }
6171
6172 /* initialising owner/group UUID */
6173 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6174 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6175 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6176 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6177 goto out;
6178 }
6179 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6180 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6181 error = EPERM;
6182 goto out;
6183 }
6184 }
6185 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6186 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6187 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6188 goto out;
6189 }
6190 if (!ismember) {
6191 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6192 error = EPERM;
6193 goto out;
6194 }
6195 }
6196 }
6197 out:
6198 return(error);
6199 }
6200
6201 /*
6202 * Check that the attribute information in vap can be legally written by the
6203 * context.
6204 *
6205 * Call this when you're not sure about the vnode_attr; either its contents
6206 * have come from an unknown source, or when they are variable.
6207 *
6208 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6209 * must be authorized to be permitted to write the vattr.
6210 */
6211 int
6212 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6213 {
6214 struct vnode_attr ova;
6215 kauth_action_t required_action;
6216 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6217 guid_t changer;
6218 gid_t group;
6219 uid_t owner;
6220 mode_t newmode;
6221 kauth_cred_t cred;
6222 uint32_t fdelta;
6223
6224 VATTR_INIT(&ova);
6225 required_action = 0;
6226 error = 0;
6227
6228 /*
6229 * Quickly check for enforcement applicability.
6230 */
6231 if (vfs_authopaque(vp->v_mount))
6232 goto out;
6233
6234 /*
6235 * Check for attempts to set nonsensical fields.
6236 */
6237 if (vap->va_active & VNODE_ATTR_RDONLY) {
6238 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
6239 error = EINVAL;
6240 goto out;
6241 }
6242
6243 /*
6244 * We need to know if the caller is the superuser.
6245 */
6246 cred = vfs_context_ucred(ctx);
6247 has_priv_suser = kauth_cred_issuser(cred);
6248
6249 /*
6250 * If any of the following are changing, we need information from the old file:
6251 * va_uid
6252 * va_gid
6253 * va_mode
6254 * va_uuuid
6255 * va_guuid
6256 */
6257 if (VATTR_IS_ACTIVE(vap, va_uid) ||
6258 VATTR_IS_ACTIVE(vap, va_gid) ||
6259 VATTR_IS_ACTIVE(vap, va_mode) ||
6260 VATTR_IS_ACTIVE(vap, va_uuuid) ||
6261 VATTR_IS_ACTIVE(vap, va_guuid)) {
6262 VATTR_WANTED(&ova, va_mode);
6263 VATTR_WANTED(&ova, va_uid);
6264 VATTR_WANTED(&ova, va_gid);
6265 VATTR_WANTED(&ova, va_uuuid);
6266 VATTR_WANTED(&ova, va_guuid);
6267 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
6268 }
6269
6270 /*
6271 * If timestamps are being changed, we need to know who the file is owned
6272 * by.
6273 */
6274 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6275 VATTR_IS_ACTIVE(vap, va_change_time) ||
6276 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6277 VATTR_IS_ACTIVE(vap, va_access_time) ||
6278 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6279
6280 VATTR_WANTED(&ova, va_uid);
6281 #if 0 /* enable this when we support UUIDs as official owners */
6282 VATTR_WANTED(&ova, va_uuuid);
6283 #endif
6284 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
6285 }
6286
6287 /*
6288 * If flags are being changed, we need the old flags.
6289 */
6290 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6291 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
6292 VATTR_WANTED(&ova, va_flags);
6293 }
6294
6295 /*
6296 * If the size is being set, make sure it's not a directory.
6297 */
6298 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6299 /* size is meaningless on a directory, don't permit this */
6300 if (vnode_isdir(vp)) {
6301 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
6302 error = EISDIR;
6303 goto out;
6304 }
6305 }
6306
6307 /*
6308 * Get old data.
6309 */
6310 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
6311 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
6312 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
6313 goto out;
6314 }
6315
6316 /*
6317 * Size changes require write access to the file data.
6318 */
6319 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6320 /* if we can't get the size, or it's different, we need write access */
6321 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
6322 required_action |= KAUTH_VNODE_WRITE_DATA;
6323 }
6324
6325 /*
6326 * Changing timestamps?
6327 *
6328 * Note that we are only called to authorize user-requested time changes;
6329 * side-effect time changes are not authorized. Authorisation is only
6330 * required for existing files.
6331 *
6332 * Non-owners are not permitted to change the time on an existing
6333 * file to anything other than the current time.
6334 */
6335 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6336 VATTR_IS_ACTIVE(vap, va_change_time) ||
6337 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6338 VATTR_IS_ACTIVE(vap, va_access_time) ||
6339 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6340 /*
6341 * The owner and root may set any timestamps they like,
6342 * provided that the file is not immutable. The owner still needs
6343 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
6344 */
6345 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
6346 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
6347 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
6348 } else {
6349 /* just setting the current time? */
6350 if (vap->va_vaflags & VA_UTIMES_NULL) {
6351 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
6352 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6353 } else {
6354 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
6355 error = EACCES;
6356 goto out;
6357 }
6358 }
6359 }
6360
6361 /*
6362 * Changing file mode?
6363 */
6364 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
6365 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
6366
6367 /*
6368 * Mode changes always have the same basic auth requirements.
6369 */
6370 if (has_priv_suser) {
6371 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
6372 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6373 } else {
6374 /* need WRITE_SECURITY */
6375 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
6376 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6377 }
6378
6379 /*
6380 * Can't set the setgid bit if you're not in the group and not root. Have to have
6381 * existing group information in the case we're not setting it right now.
6382 */
6383 if (vap->va_mode & S_ISGID) {
6384 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6385 if (!has_priv_suser) {
6386 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6387 group = vap->va_gid;
6388 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
6389 group = ova.va_gid;
6390 } else {
6391 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
6392 error = EINVAL;
6393 goto out;
6394 }
6395 /*
6396 * This might be too restrictive; WRITE_SECURITY might be implied by
6397 * membership in this case, rather than being an additional requirement.
6398 */
6399 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
6400 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6401 goto out;
6402 }
6403 if (!ismember) {
6404 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
6405 error = EPERM;
6406 goto out;
6407 }
6408 }
6409 }
6410
6411 /*
6412 * Can't set the setuid bit unless you're root or the file's owner.
6413 */
6414 if (vap->va_mode & S_ISUID) {
6415 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6416 if (!has_priv_suser) {
6417 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6418 owner = vap->va_uid;
6419 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
6420 owner = ova.va_uid;
6421 } else {
6422 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
6423 error = EINVAL;
6424 goto out;
6425 }
6426 if (owner != kauth_cred_getuid(cred)) {
6427 /*
6428 * We could allow this if WRITE_SECURITY is permitted, perhaps.
6429 */
6430 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6431 error = EPERM;
6432 goto out;
6433 }
6434 }
6435 }
6436 }
6437
6438 /*
6439 * Validate/mask flags changes. This checks that only the flags in
6440 * the UF_SETTABLE mask are being set, and preserves the flags in
6441 * the SF_SETTABLE case.
6442 *
6443 * Since flags changes may be made in conjunction with other changes,
6444 * we will ask the auth code to ignore immutability in the case that
6445 * the SF_* flags are not set and we are only manipulating the file flags.
6446 *
6447 */
6448 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6449 /* compute changing flags bits */
6450 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
6451 fdelta = vap->va_flags ^ ova.va_flags;
6452 } else {
6453 fdelta = vap->va_flags;
6454 }
6455
6456 if (fdelta != 0) {
6457 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
6458 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6459
6460 /* check that changing bits are legal */
6461 if (has_priv_suser) {
6462 /*
6463 * The immutability check will prevent us from clearing the SF_*
6464 * flags unless the system securelevel permits it, so just check
6465 * for legal flags here.
6466 */
6467 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
6468 error = EPERM;
6469 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6470 goto out;
6471 }
6472 } else {
6473 if (fdelta & ~UF_SETTABLE) {
6474 error = EPERM;
6475 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6476 goto out;
6477 }
6478 }
6479 /*
6480 * If the caller has the ability to manipulate file flags,
6481 * security is not reduced by ignoring them for this operation.
6482 *
6483 * A more complete test here would consider the 'after' states of the flags
6484 * to determine whether it would permit the operation, but this becomes
6485 * very complex.
6486 *
6487 * Ignoring immutability is conditional on securelevel; this does not bypass
6488 * the SF_* flags if securelevel > 0.
6489 */
6490 required_action |= KAUTH_VNODE_NOIMMUTABLE;
6491 }
6492 }
6493
6494 /*
6495 * Validate ownership information.
6496 */
6497 chowner = 0;
6498 chgroup = 0;
6499 clear_suid = 0;
6500 clear_sgid = 0;
6501
6502 /*
6503 * uid changing
6504 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
6505 * support them in general, and will ignore it if/when we try to set it.
6506 * We might want to clear the uid out of vap completely here.
6507 */
6508 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6509 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
6510 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
6511 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
6512 error = EPERM;
6513 goto out;
6514 }
6515 chowner = 1;
6516 }
6517 clear_suid = 1;
6518 }
6519
6520 /*
6521 * gid changing
6522 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
6523 * support them in general, and will ignore it if/when we try to set it.
6524 * We might want to clear the gid out of vap completely here.
6525 */
6526 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6527 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
6528 if (!has_priv_suser) {
6529 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6530 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6531 goto out;
6532 }
6533 if (!ismember) {
6534 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
6535 ova.va_gid, vap->va_gid);
6536 error = EPERM;
6537 goto out;
6538 }
6539 }
6540 chgroup = 1;
6541 }
6542 clear_sgid = 1;
6543 }
6544
6545 /*
6546 * Owner UUID being set or changed.
6547 */
6548 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6549 /* if the owner UUID is not actually changing ... */
6550 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
6551 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
6552 goto no_uuuid_change;
6553
6554 /*
6555 * If the current owner UUID is a null GUID, check
6556 * it against the UUID corresponding to the owner UID.
6557 */
6558 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
6559 VATTR_IS_SUPPORTED(&ova, va_uid)) {
6560 guid_t uid_guid;
6561
6562 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
6563 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
6564 goto no_uuuid_change;
6565 }
6566 }
6567
6568 /*
6569 * The owner UUID cannot be set by a non-superuser to anything other than
6570 * their own or a null GUID (to "unset" the owner UUID).
6571 * Note that file systems must be prepared to handle the
6572 * null UUID case in a manner appropriate for that file
6573 * system.
6574 */
6575 if (!has_priv_suser) {
6576 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6577 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6578 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
6579 goto out;
6580 }
6581 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
6582 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
6583 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
6584 error = EPERM;
6585 goto out;
6586 }
6587 }
6588 chowner = 1;
6589 clear_suid = 1;
6590 }
6591 no_uuuid_change:
6592 /*
6593 * Group UUID being set or changed.
6594 */
6595 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6596 /* if the group UUID is not actually changing ... */
6597 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
6598 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
6599 goto no_guuid_change;
6600
6601 /*
6602 * If the current group UUID is a null UUID, check
6603 * it against the UUID corresponding to the group GID.
6604 */
6605 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
6606 VATTR_IS_SUPPORTED(&ova, va_gid)) {
6607 guid_t gid_guid;
6608
6609 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
6610 kauth_guid_equal(&vap->va_guuid, &gid_guid))
6611 goto no_guuid_change;
6612 }
6613 }
6614
6615 /*
6616 * The group UUID cannot be set by a non-superuser to anything other than
6617 * one of which they are a member or a null GUID (to "unset"
6618 * the group UUID).
6619 * Note that file systems must be prepared to handle the
6620 * null UUID case in a manner appropriate for that file
6621 * system.
6622 */
6623 if (!has_priv_suser) {
6624 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
6625 ismember = 1;
6626 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6627 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6628 goto out;
6629 }
6630 if (!ismember) {
6631 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
6632 error = EPERM;
6633 goto out;
6634 }
6635 }
6636 chgroup = 1;
6637 }
6638 no_guuid_change:
6639
6640 /*
6641 * Compute authorisation for group/ownership changes.
6642 */
6643 if (chowner || chgroup || clear_suid || clear_sgid) {
6644 if (has_priv_suser) {
6645 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
6646 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6647 } else {
6648 if (chowner) {
6649 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
6650 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
6651 }
6652 if (chgroup && !chowner) {
6653 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
6654 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6655 }
6656
6657 /* clear set-uid and set-gid bits as required by Posix */
6658 if (VATTR_IS_ACTIVE(vap, va_mode)) {
6659 newmode = vap->va_mode;
6660 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
6661 newmode = ova.va_mode;
6662 } else {
6663 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
6664 newmode = 0;
6665 }
6666 if (newmode & (S_ISUID | S_ISGID)) {
6667 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
6668 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
6669 }
6670 }
6671 }
6672
6673 /*
6674 * Authorise changes in the ACL.
6675 */
6676 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6677
6678 /* no existing ACL */
6679 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
6680
6681 /* adding an ACL */
6682 if (vap->va_acl != NULL) {
6683 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6684 KAUTH_DEBUG("CHMOD - adding ACL");
6685 }
6686
6687 /* removing an existing ACL */
6688 } else if (vap->va_acl == NULL) {
6689 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6690 KAUTH_DEBUG("CHMOD - removing ACL");
6691
6692 /* updating an existing ACL */
6693 } else {
6694 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
6695 /* entry count changed, must be different */
6696 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6697 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
6698 } else if (vap->va_acl->acl_entrycount > 0) {
6699 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
6700 if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
6701 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
6702 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6703 KAUTH_DEBUG("CHMOD - changing ACL entries");
6704 }
6705 }
6706 }
6707 }
6708
6709 /*
6710 * Other attributes that require authorisation.
6711 */
6712 if (VATTR_IS_ACTIVE(vap, va_encoding))
6713 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6714
6715 out:
6716 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
6717 kauth_acl_free(ova.va_acl);
6718 if (error == 0)
6719 *actionp = required_action;
6720 return(error);
6721 }
6722
6723
6724 void
6725 vfs_setlocklocal(mount_t mp)
6726 {
6727 vnode_t vp;
6728
6729 mount_lock(mp);
6730 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
6731
6732 /*
6733 * We do not expect anyone to be using any vnodes at the
6734 * time this routine is called. So no need for vnode locking
6735 */
6736 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
6737 vp->v_flag |= VLOCKLOCAL;
6738 }
6739 TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) {
6740 vp->v_flag |= VLOCKLOCAL;
6741 }
6742 TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) {
6743 vp->v_flag |= VLOCKLOCAL;
6744 }
6745 mount_unlock(mp);
6746 }
6747
6748 void
6749 vfs_setunmountpreflight(mount_t mp)
6750 {
6751 mount_lock_spin(mp);
6752 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
6753 mount_unlock(mp);
6754 }
6755
6756 void
6757 vn_setunionwait(vnode_t vp)
6758 {
6759 vnode_lock_spin(vp);
6760 vp->v_flag |= VISUNION;
6761 vnode_unlock(vp);
6762 }
6763
6764
6765 void
6766 vn_checkunionwait(vnode_t vp)
6767 {
6768 vnode_lock_spin(vp);
6769 while ((vp->v_flag & VISUNION) == VISUNION)
6770 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
6771 vnode_unlock(vp);
6772 }
6773
6774 void
6775 vn_clearunionwait(vnode_t vp, int locked)
6776 {
6777 if (!locked)
6778 vnode_lock_spin(vp);
6779 if((vp->v_flag & VISUNION) == VISUNION) {
6780 vp->v_flag &= ~VISUNION;
6781 wakeup((caddr_t)&vp->v_flag);
6782 }
6783 if (!locked)
6784 vnode_unlock(vp);
6785 }
6786
6787 /*
6788 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
6789 */
6790 extern int thread_notrigger(void);
6791
6792 int
6793 thread_notrigger(void)
6794 {
6795 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
6796 return (uth->uu_notrigger);
6797 }
6798
6799 /*
6800 * Removes orphaned apple double files during a rmdir
6801 * Works by:
6802 * 1. vnode_suspend().
6803 * 2. Call VNOP_READDIR() till the end of directory is reached.
6804 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
6805 * 4. Continue (2) and (3) till end of directory is reached.
6806 * 5. If all the entries in the directory were files with "._" name, delete all the files.
6807 * 6. vnode_resume()
6808 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
6809 */
6810
6811 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
6812 {
6813
6814 #define UIO_BUFF_SIZE 2048
6815 uio_t auio = NULL;
6816 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
6817 int open_flag = 0, full_erase_flag = 0;
6818 char uio_buf[ UIO_SIZEOF(1) ];
6819 char *rbuf = NULL, *cpos, *cend;
6820 struct nameidata nd_temp;
6821 struct dirent *dp;
6822 errno_t error;
6823
6824 error = vnode_suspend(vp);
6825
6826 /*
6827 * restart_flag is set so that the calling rmdir sleeps and resets
6828 */
6829 if (error == EBUSY)
6830 *restart_flag = 1;
6831 if (error != 0)
6832 goto outsc;
6833
6834 /*
6835 * set up UIO
6836 */
6837 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
6838 if (rbuf)
6839 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
6840 &uio_buf[0], sizeof(uio_buf));
6841 if (!rbuf || !auio) {
6842 error = ENOMEM;
6843 goto outsc;
6844 }
6845
6846 uio_setoffset(auio,0);
6847
6848 eofflag = 0;
6849
6850 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
6851 goto outsc;
6852 else
6853 open_flag = 1;
6854
6855 /*
6856 * First pass checks if all files are appleDouble files.
6857 */
6858
6859 do {
6860 siz = UIO_BUFF_SIZE;
6861 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
6862 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
6863
6864 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
6865 goto outsc;
6866
6867 if (uio_resid(auio) != 0)
6868 siz -= uio_resid(auio);
6869
6870 /*
6871 * Iterate through directory
6872 */
6873 cpos = rbuf;
6874 cend = rbuf + siz;
6875 dp = (struct dirent*) cpos;
6876
6877 if (cpos == cend)
6878 eofflag = 1;
6879
6880 while ((cpos < cend)) {
6881 /*
6882 * Check for . and .. as well as directories
6883 */
6884 if (dp->d_ino != 0 &&
6885 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
6886 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
6887 /*
6888 * Check for irregular files and ._ files
6889 * If there is a ._._ file abort the op
6890 */
6891 if ( dp->d_namlen < 2 ||
6892 strncmp(dp->d_name,"._",2) ||
6893 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
6894 error = ENOTEMPTY;
6895 goto outsc;
6896 }
6897 }
6898 cpos += dp->d_reclen;
6899 dp = (struct dirent*)cpos;
6900 }
6901
6902 /*
6903 * workaround for HFS/NFS setting eofflag before end of file
6904 */
6905 if (vp->v_tag == VT_HFS && nentries > 2)
6906 eofflag=0;
6907
6908 if (vp->v_tag == VT_NFS) {
6909 if (eofflag && !full_erase_flag) {
6910 full_erase_flag = 1;
6911 eofflag = 0;
6912 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6913 }
6914 else if (!eofflag && full_erase_flag)
6915 full_erase_flag = 0;
6916 }
6917
6918 } while (!eofflag);
6919 /*
6920 * If we've made it here all the files in the dir are ._ files.
6921 * We can delete the files even though the node is suspended
6922 * because we are the owner of the file.
6923 */
6924
6925 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6926 eofflag = 0;
6927 full_erase_flag = 0;
6928
6929 do {
6930 siz = UIO_BUFF_SIZE;
6931 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
6932 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
6933
6934 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
6935
6936 if (error != 0)
6937 goto outsc;
6938
6939 if (uio_resid(auio) != 0)
6940 siz -= uio_resid(auio);
6941
6942 /*
6943 * Iterate through directory
6944 */
6945 cpos = rbuf;
6946 cend = rbuf + siz;
6947 dp = (struct dirent*) cpos;
6948
6949 if (cpos == cend)
6950 eofflag = 1;
6951
6952 while ((cpos < cend)) {
6953 /*
6954 * Check for . and .. as well as directories
6955 */
6956 if (dp->d_ino != 0 &&
6957 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
6958 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
6959 ) {
6960
6961 NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
6962 nd_temp.ni_dvp = vp;
6963 error = unlink1(ctx, &nd_temp, 0);
6964 if (error && error != ENOENT) {
6965 goto outsc;
6966 }
6967 }
6968 cpos += dp->d_reclen;
6969 dp = (struct dirent*)cpos;
6970 }
6971
6972 /*
6973 * workaround for HFS/NFS setting eofflag before end of file
6974 */
6975 if (vp->v_tag == VT_HFS && nentries > 2)
6976 eofflag=0;
6977
6978 if (vp->v_tag == VT_NFS) {
6979 if (eofflag && !full_erase_flag) {
6980 full_erase_flag = 1;
6981 eofflag = 0;
6982 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
6983 }
6984 else if (!eofflag && full_erase_flag)
6985 full_erase_flag = 0;
6986 }
6987
6988 } while (!eofflag);
6989
6990
6991 error = 0;
6992
6993 outsc:
6994 if (open_flag)
6995 VNOP_CLOSE(vp, FREAD, ctx);
6996
6997 uio_free(auio);
6998 FREE(rbuf, M_TEMP);
6999
7000 vnode_resume(vp);
7001
7002
7003 return(error);
7004
7005 }
7006
7007
7008 void
7009 lock_vnode_and_post(vnode_t vp, int kevent_num)
7010 {
7011 /* Only take the lock if there's something there! */
7012 if (vp->v_knotes.slh_first != NULL) {
7013 vnode_lock(vp);
7014 KNOTE(&vp->v_knotes, kevent_num);
7015 vnode_unlock(vp);
7016 }
7017 }
7018
7019 #ifdef JOE_DEBUG
7020 static void record_vp(vnode_t vp, int count) {
7021 struct uthread *ut;
7022 int i;
7023
7024 if ((vp->v_flag & VSYSTEM))
7025 return;
7026
7027 ut = get_bsdthread_info(current_thread());
7028 ut->uu_iocount += count;
7029
7030 if (ut->uu_vpindex < 32) {
7031 for (i = 0; i < ut->uu_vpindex; i++) {
7032 if (ut->uu_vps[i] == vp)
7033 return;
7034 }
7035 ut->uu_vps[ut->uu_vpindex] = vp;
7036 ut->uu_vpindex++;
7037 }
7038 }
7039 #endif