]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
4ae72ee3d3cf9a4f4cdff1112f2a23e607cc06d7
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/proc_internal.h>
82 #include <sys/kauth.h>
83 #include <sys/mount_internal.h>
84 #include <sys/time.h>
85 #include <sys/lock.h>
86 #include <sys/vnode.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf_internal.h>
92 #include <sys/errno.h>
93 #include <kern/kalloc.h>
94 #include <sys/uio_internal.h>
95 #include <sys/uio.h>
96 #include <sys/domain.h>
97 #include <sys/mbuf.h>
98 #include <sys/syslog.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/kdebug.h>
105 #include <sys/kauth.h>
106 #include <sys/user.h>
107 #include <sys/systm.h>
108 #include <sys/kern_memorystatus.h>
109 #include <sys/lockf.h>
110 #include <sys/reboot.h>
111 #include <miscfs/fifofs/fifo.h>
112
113 #include <nfs/nfs_conf.h>
114
115 #include <string.h>
116 #include <machine/machine_routines.h>
117
118 #include <kern/assert.h>
119 #include <mach/kern_return.h>
120 #include <kern/thread.h>
121 #include <kern/sched_prim.h>
122
123 #include <miscfs/specfs/specdev.h>
124
125 #include <mach/mach_types.h>
126 #include <mach/memory_object_types.h>
127 #include <mach/memory_object_control.h>
128
129 #include <kern/kalloc.h> /* kalloc()/kfree() */
130 #include <kern/clock.h> /* delay_for_interval() */
131 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
132 #include <os/atomic_private.h>
133 #if defined(XNU_TARGET_OS_OSX)
134 #include <console/video_console.h>
135 #endif
136
137 #ifdef JOE_DEBUG
138 #include <libkern/OSDebug.h>
139 #endif
140
141 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
142
143 #if CONFIG_MACF
144 #include <security/mac_framework.h>
145 #endif
146
147 #include <vfs/vfs_disk_conditioner.h>
148 #include <libkern/section_keywords.h>
149
150 extern lck_grp_t *vnode_lck_grp;
151 extern lck_attr_t *vnode_lck_attr;
152
153 #if CONFIG_TRIGGERS
154 extern lck_grp_t *trigger_vnode_lck_grp;
155 extern lck_attr_t *trigger_vnode_lck_attr;
156 #endif
157
158 extern lck_mtx_t * mnt_list_mtx_lock;
159
160 ZONE_DECLARE(specinfo_zone, "specinfo",
161 sizeof(struct specinfo), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
162
163 ZONE_DECLARE(vnode_zone, "vnodes",
164 sizeof(struct vnode), ZC_NOENCRYPT | ZC_NOGC | ZC_ZFREE_CLEARMEM);
165
166 enum vtype iftovt_tab[16] = {
167 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
168 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
169 };
170 int vttoif_tab[9] = {
171 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
172 S_IFSOCK, S_IFIFO, S_IFMT,
173 };
174
175
176 /* XXX These should be in a BSD accessible Mach header, but aren't. */
177 extern void memory_object_mark_used(
178 memory_object_control_t control);
179
180 extern void memory_object_mark_unused(
181 memory_object_control_t control,
182 boolean_t rage);
183
184 extern void memory_object_mark_io_tracking(
185 memory_object_control_t control);
186
187 /* XXX next protptype should be from <nfs/nfs.h> */
188 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
189
190 extern int paniclog_append_noflush(const char *format, ...);
191
192 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
193 __private_extern__ void qsort(
194 void * array,
195 size_t nmembers,
196 size_t member_size,
197 int (*)(const void *, const void *));
198
199 __private_extern__ void vntblinit(void);
200 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
201 enum uio_seg, int);
202
203 static void vnode_list_add(vnode_t);
204 static void vnode_async_list_add(vnode_t);
205 static void vnode_list_remove(vnode_t);
206 static void vnode_list_remove_locked(vnode_t);
207
208 static void vnode_abort_advlocks(vnode_t);
209 static errno_t vnode_drain(vnode_t);
210 static void vgone(vnode_t, int flags);
211 static void vclean(vnode_t vp, int flag);
212 static void vnode_reclaim_internal(vnode_t, int, int, int);
213
214 static void vnode_dropiocount(vnode_t);
215
216 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
217 static int vnode_reload(vnode_t);
218
219 static int unmount_callback(mount_t, __unused void *);
220
221 static void insmntque(vnode_t vp, mount_t mp);
222 static int mount_getvfscnt(void);
223 static int mount_fillfsids(fsid_t *, int );
224 static void vnode_iterate_setup(mount_t);
225 int vnode_umount_preflight(mount_t, vnode_t, int);
226 static int vnode_iterate_prepare(mount_t);
227 static int vnode_iterate_reloadq(mount_t);
228 static void vnode_iterate_clear(mount_t);
229 static mount_t vfs_getvfs_locked(fsid_t *);
230 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
231 struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
232 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
233
234 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
235
236 #ifdef JOE_DEBUG
237 static void record_vp(vnode_t vp, int count);
238 #endif
239
240 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
241 extern int bootarg_no_vnode_jetsam; /* from bsd_init.c default value is 0 */
242 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
243
244 extern int bootarg_no_vnode_drain; /* from bsd_init.c default value is 0 */
245
246 boolean_t root_is_CF_drive = FALSE;
247
248 #if CONFIG_TRIGGERS
249 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
250 static void vnode_resolver_detach(vnode_t);
251 #endif
252
253 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
254 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
255 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
256
257
258 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
259 struct timeval rage_tv;
260 int rage_limit = 0;
261 int ragevnodes = 0;
262 static int vfs_unmountall_started = 0;
263
264 #define RAGE_LIMIT_MIN 100
265 #define RAGE_TIME_LIMIT 5
266
267 /*
268 * ROSV definitions
269 * NOTE: These are shadowed from PlatformSupport definitions, but XNU
270 * builds standalone.
271 */
272 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
273
274 /*
275 * These could be in PlatformSupport but aren't yet
276 */
277 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
278 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
279
280 #if CONFIG_MOUNT_VM
281 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
282 #endif
283
284 struct mntlist mountlist; /* mounted filesystem list */
285 static int nummounts = 0;
286
287 static int print_busy_vnodes = 0; /* print out busy vnodes */
288
289 #if DIAGNOSTIC
290 #define VLISTCHECK(fun, vp, list) \
291 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
292 panic("%s: %s vnode not on %slist", (fun), (list), (list));
293 #else
294 #define VLISTCHECK(fun, vp, list)
295 #endif /* DIAGNOSTIC */
296
297 #define VLISTNONE(vp) \
298 do { \
299 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
300 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
301 } while(0)
302
303 #define VONLIST(vp) \
304 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
305
306 /* remove a vnode from free vnode list */
307 #define VREMFREE(fun, vp) \
308 do { \
309 VLISTCHECK((fun), (vp), "free"); \
310 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
311 VLISTNONE((vp)); \
312 freevnodes--; \
313 } while(0)
314
315
316 /* remove a vnode from dead vnode list */
317 #define VREMDEAD(fun, vp) \
318 do { \
319 VLISTCHECK((fun), (vp), "dead"); \
320 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
321 VLISTNONE((vp)); \
322 vp->v_listflag &= ~VLIST_DEAD; \
323 deadvnodes--; \
324 } while(0)
325
326
327 /* remove a vnode from async work vnode list */
328 #define VREMASYNC_WORK(fun, vp) \
329 do { \
330 VLISTCHECK((fun), (vp), "async_work"); \
331 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
332 VLISTNONE((vp)); \
333 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
334 async_work_vnodes--; \
335 } while(0)
336
337
338 /* remove a vnode from rage vnode list */
339 #define VREMRAGE(fun, vp) \
340 do { \
341 if ( !(vp->v_listflag & VLIST_RAGE)) \
342 panic("VREMRAGE: vp not on rage list"); \
343 VLISTCHECK((fun), (vp), "rage"); \
344 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
345 VLISTNONE((vp)); \
346 vp->v_listflag &= ~VLIST_RAGE; \
347 ragevnodes--; \
348 } while(0)
349
350 static void async_work_continue(void);
351
352 /*
353 * Initialize the vnode management data structures.
354 */
355 __private_extern__ void
356 vntblinit(void)
357 {
358 thread_t thread = THREAD_NULL;
359
360 TAILQ_INIT(&vnode_free_list);
361 TAILQ_INIT(&vnode_rage_list);
362 TAILQ_INIT(&vnode_dead_list);
363 TAILQ_INIT(&vnode_async_work_list);
364 TAILQ_INIT(&mountlist);
365
366 microuptime(&rage_tv);
367 rage_limit = desiredvnodes / 100;
368
369 if (rage_limit < RAGE_LIMIT_MIN) {
370 rage_limit = RAGE_LIMIT_MIN;
371 }
372
373 /*
374 * create worker threads
375 */
376 kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
377 thread_deallocate(thread);
378 }
379
380 /* the timeout is in 10 msecs */
381 int
382 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
383 {
384 int error = 0;
385 struct timespec ts;
386
387 if (output_target < 0) {
388 return EINVAL;
389 }
390
391 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
392
393 if (vp->v_numoutput > output_target) {
394 slpflag |= PDROP;
395
396 vnode_lock_spin(vp);
397
398 while ((vp->v_numoutput > output_target) && error == 0) {
399 if (output_target) {
400 vp->v_flag |= VTHROTTLED;
401 } else {
402 vp->v_flag |= VBWAIT;
403 }
404
405 ts.tv_sec = (slptimeout / 100);
406 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000;
407 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
408
409 vnode_lock_spin(vp);
410 }
411 vnode_unlock(vp);
412 }
413 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
414
415 return error;
416 }
417
418
419 void
420 vnode_startwrite(vnode_t vp)
421 {
422 OSAddAtomic(1, &vp->v_numoutput);
423 }
424
425
426 void
427 vnode_writedone(vnode_t vp)
428 {
429 if (vp) {
430 int need_wakeup = 0;
431
432 OSAddAtomic(-1, &vp->v_numoutput);
433
434 vnode_lock_spin(vp);
435
436 if (vp->v_numoutput < 0) {
437 panic("vnode_writedone: numoutput < 0");
438 }
439
440 if ((vp->v_flag & VTHROTTLED)) {
441 vp->v_flag &= ~VTHROTTLED;
442 need_wakeup = 1;
443 }
444 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
445 vp->v_flag &= ~VBWAIT;
446 need_wakeup = 1;
447 }
448 vnode_unlock(vp);
449
450 if (need_wakeup) {
451 wakeup((caddr_t)&vp->v_numoutput);
452 }
453 }
454 }
455
456
457
458 int
459 vnode_hasdirtyblks(vnode_t vp)
460 {
461 struct cl_writebehind *wbp;
462
463 /*
464 * Not taking the buf_mtxp as there is little
465 * point doing it. Even if the lock is taken the
466 * state can change right after that. If their
467 * needs to be a synchronization, it must be driven
468 * by the caller
469 */
470 if (vp->v_dirtyblkhd.lh_first) {
471 return 1;
472 }
473
474 if (!UBCINFOEXISTS(vp)) {
475 return 0;
476 }
477
478 wbp = vp->v_ubcinfo->cl_wbehind;
479
480 if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
481 return 1;
482 }
483
484 return 0;
485 }
486
487 int
488 vnode_hascleanblks(vnode_t vp)
489 {
490 /*
491 * Not taking the buf_mtxp as there is little
492 * point doing it. Even if the lock is taken the
493 * state can change right after that. If their
494 * needs to be a synchronization, it must be driven
495 * by the caller
496 */
497 if (vp->v_cleanblkhd.lh_first) {
498 return 1;
499 }
500 return 0;
501 }
502
503 void
504 vnode_iterate_setup(mount_t mp)
505 {
506 mp->mnt_lflag |= MNT_LITER;
507 }
508
509 int
510 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
511 {
512 vnode_t vp;
513 int ret = 0;
514
515 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
516 if (vp->v_type == VDIR) {
517 continue;
518 }
519 if (vp == skipvp) {
520 continue;
521 }
522 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
523 continue;
524 }
525 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
526 continue;
527 }
528 if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
529 continue;
530 }
531
532 /* Look for busy vnode */
533 if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
534 ret = 1;
535 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
536 vprint("vnode_umount_preflight - busy vnode", vp);
537 } else {
538 return ret;
539 }
540 } else if (vp->v_iocount > 0) {
541 /* Busy if iocount is > 0 for more than 3 seconds */
542 tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
543 if (vp->v_iocount > 0) {
544 ret = 1;
545 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
546 vprint("vnode_umount_preflight - busy vnode", vp);
547 } else {
548 return ret;
549 }
550 }
551 continue;
552 }
553 }
554
555 return ret;
556 }
557
558 /*
559 * This routine prepares iteration by moving all the vnodes to worker queue
560 * called with mount lock held
561 */
562 int
563 vnode_iterate_prepare(mount_t mp)
564 {
565 vnode_t vp;
566
567 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
568 /* nothing to do */
569 return 0;
570 }
571
572 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
573 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
574 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
575 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
576
577 TAILQ_INIT(&mp->mnt_vnodelist);
578 if (mp->mnt_newvnodes.tqh_first != NULL) {
579 panic("vnode_iterate_prepare: newvnode when entering vnode");
580 }
581 TAILQ_INIT(&mp->mnt_newvnodes);
582
583 return 1;
584 }
585
586
587 /* called with mount lock held */
588 int
589 vnode_iterate_reloadq(mount_t mp)
590 {
591 int moved = 0;
592
593 /* add the remaining entries in workerq to the end of mount vnode list */
594 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
595 struct vnode * mvp;
596 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
597
598 /* Joining the workerque entities to mount vnode list */
599 if (mvp) {
600 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
601 } else {
602 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
603 }
604 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
605 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
606 TAILQ_INIT(&mp->mnt_workerqueue);
607 }
608
609 /* add the newvnodes to the head of mount vnode list */
610 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
611 struct vnode * nlvp;
612 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
613
614 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
615 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
616 if (mp->mnt_vnodelist.tqh_first) {
617 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
618 } else {
619 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
620 }
621 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
622 TAILQ_INIT(&mp->mnt_newvnodes);
623 moved = 1;
624 }
625
626 return moved;
627 }
628
629
630 void
631 vnode_iterate_clear(mount_t mp)
632 {
633 mp->mnt_lflag &= ~MNT_LITER;
634 }
635
636 #if defined(__x86_64__)
637
638 #include <i386/panic_hooks.h>
639
640 struct vnode_iterate_panic_hook {
641 panic_hook_t hook;
642 mount_t mp;
643 struct vnode *vp;
644 };
645
646 static void
647 vnode_iterate_panic_hook(panic_hook_t *hook_)
648 {
649 struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
650 panic_phys_range_t range;
651 uint64_t phys;
652
653 if (panic_phys_range_before(hook->mp, &phys, &range)) {
654 paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
655 hook->mp, phys, range.type, range.phys_start,
656 range.phys_start + range.len);
657 } else {
658 paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
659 }
660
661 if (panic_phys_range_before(hook->vp, &phys, &range)) {
662 paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
663 hook->vp, phys, range.type, range.phys_start,
664 range.phys_start + range.len);
665 } else {
666 paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
667 }
668 panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
669 }
670 #endif /* defined(__x86_64__) */
671
672 int
673 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
674 void *arg)
675 {
676 struct vnode *vp;
677 int vid, retval;
678 int ret = 0;
679
680 /*
681 * The mount iterate mutex is held for the duration of the iteration.
682 * This can be done by a state flag on the mount structure but we can
683 * run into priority inversion issues sometimes.
684 * Using a mutex allows us to benefit from the priority donation
685 * mechanisms in the kernel for locks. This mutex should never be
686 * acquired in spin mode and it should be acquired before attempting to
687 * acquire the mount lock.
688 */
689 mount_iterate_lock(mp);
690
691 mount_lock(mp);
692
693 vnode_iterate_setup(mp);
694
695 /* If it returns 0 then there is nothing to do */
696 retval = vnode_iterate_prepare(mp);
697
698 if (retval == 0) {
699 vnode_iterate_clear(mp);
700 mount_unlock(mp);
701 mount_iterate_unlock(mp);
702 return ret;
703 }
704
705 #if defined(__x86_64__)
706 struct vnode_iterate_panic_hook hook;
707 hook.mp = mp;
708 hook.vp = NULL;
709 panic_hook(&hook.hook, vnode_iterate_panic_hook);
710 #endif
711 /* iterate over all the vnodes */
712 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
713 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
714 #if defined(__x86_64__)
715 hook.vp = vp;
716 #endif
717 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
718 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
719 vid = vp->v_id;
720 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
721 continue;
722 }
723 mount_unlock(mp);
724
725 if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
726 mount_lock(mp);
727 continue;
728 }
729 if (flags & VNODE_RELOAD) {
730 /*
731 * we're reloading the filesystem
732 * cast out any inactive vnodes...
733 */
734 if (vnode_reload(vp)) {
735 /* vnode will be recycled on the refcount drop */
736 vnode_put(vp);
737 mount_lock(mp);
738 continue;
739 }
740 }
741
742 retval = callout(vp, arg);
743
744 switch (retval) {
745 case VNODE_RETURNED:
746 case VNODE_RETURNED_DONE:
747 vnode_put(vp);
748 if (retval == VNODE_RETURNED_DONE) {
749 mount_lock(mp);
750 ret = 0;
751 goto out;
752 }
753 break;
754
755 case VNODE_CLAIMED_DONE:
756 mount_lock(mp);
757 ret = 0;
758 goto out;
759 case VNODE_CLAIMED:
760 default:
761 break;
762 }
763 mount_lock(mp);
764 }
765
766 out:
767 #if defined(__x86_64__)
768 panic_unhook(&hook.hook);
769 #endif
770 (void)vnode_iterate_reloadq(mp);
771 vnode_iterate_clear(mp);
772 mount_unlock(mp);
773 mount_iterate_unlock(mp);
774 return ret;
775 }
776
777 void
778 mount_lock_renames(mount_t mp)
779 {
780 lck_mtx_lock(&mp->mnt_renamelock);
781 }
782
783 void
784 mount_unlock_renames(mount_t mp)
785 {
786 lck_mtx_unlock(&mp->mnt_renamelock);
787 }
788
789 void
790 mount_iterate_lock(mount_t mp)
791 {
792 lck_mtx_lock(&mp->mnt_iter_lock);
793 }
794
795 void
796 mount_iterate_unlock(mount_t mp)
797 {
798 lck_mtx_unlock(&mp->mnt_iter_lock);
799 }
800
801 void
802 mount_lock(mount_t mp)
803 {
804 lck_mtx_lock(&mp->mnt_mlock);
805 }
806
807 void
808 mount_lock_spin(mount_t mp)
809 {
810 lck_mtx_lock_spin(&mp->mnt_mlock);
811 }
812
813 void
814 mount_unlock(mount_t mp)
815 {
816 lck_mtx_unlock(&mp->mnt_mlock);
817 }
818
819
820 void
821 mount_ref(mount_t mp, int locked)
822 {
823 if (!locked) {
824 mount_lock_spin(mp);
825 }
826
827 mp->mnt_count++;
828
829 if (!locked) {
830 mount_unlock(mp);
831 }
832 }
833
834
835 void
836 mount_drop(mount_t mp, int locked)
837 {
838 if (!locked) {
839 mount_lock_spin(mp);
840 }
841
842 mp->mnt_count--;
843
844 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
845 wakeup(&mp->mnt_lflag);
846 }
847
848 if (!locked) {
849 mount_unlock(mp);
850 }
851 }
852
853
854 int
855 mount_iterref(mount_t mp, int locked)
856 {
857 int retval = 0;
858
859 if (!locked) {
860 mount_list_lock();
861 }
862 if (mp->mnt_iterref < 0) {
863 retval = 1;
864 } else {
865 mp->mnt_iterref++;
866 }
867 if (!locked) {
868 mount_list_unlock();
869 }
870 return retval;
871 }
872
873 int
874 mount_isdrained(mount_t mp, int locked)
875 {
876 int retval;
877
878 if (!locked) {
879 mount_list_lock();
880 }
881 if (mp->mnt_iterref < 0) {
882 retval = 1;
883 } else {
884 retval = 0;
885 }
886 if (!locked) {
887 mount_list_unlock();
888 }
889 return retval;
890 }
891
892 void
893 mount_iterdrop(mount_t mp)
894 {
895 mount_list_lock();
896 mp->mnt_iterref--;
897 wakeup(&mp->mnt_iterref);
898 mount_list_unlock();
899 }
900
901 void
902 mount_iterdrain(mount_t mp)
903 {
904 mount_list_lock();
905 while (mp->mnt_iterref) {
906 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
907 }
908 /* mount iterations drained */
909 mp->mnt_iterref = -1;
910 mount_list_unlock();
911 }
912 void
913 mount_iterreset(mount_t mp)
914 {
915 mount_list_lock();
916 if (mp->mnt_iterref == -1) {
917 mp->mnt_iterref = 0;
918 }
919 mount_list_unlock();
920 }
921
922 /* always called with mount lock held */
923 int
924 mount_refdrain(mount_t mp)
925 {
926 if (mp->mnt_lflag & MNT_LDRAIN) {
927 panic("already in drain");
928 }
929 mp->mnt_lflag |= MNT_LDRAIN;
930
931 while (mp->mnt_count) {
932 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
933 }
934
935 if (mp->mnt_vnodelist.tqh_first != NULL) {
936 panic("mount_refdrain: dangling vnode");
937 }
938
939 mp->mnt_lflag &= ~MNT_LDRAIN;
940
941 return 0;
942 }
943
944 /* Tags the mount point as not supportine extended readdir for NFS exports */
945 void
946 mount_set_noreaddirext(mount_t mp)
947 {
948 mount_lock(mp);
949 mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
950 mount_unlock(mp);
951 }
952
953 /*
954 * Mark a mount point as busy. Used to synchronize access and to delay
955 * unmounting.
956 */
957 int
958 vfs_busy(mount_t mp, int flags)
959 {
960 restart:
961 if (mp->mnt_lflag & MNT_LDEAD) {
962 return ENOENT;
963 }
964
965 mount_lock(mp);
966
967 if (mp->mnt_lflag & MNT_LUNMOUNT) {
968 if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
969 mount_unlock(mp);
970 return ENOENT;
971 }
972
973 /*
974 * Since all busy locks are shared except the exclusive
975 * lock granted when unmounting, the only place that a
976 * wakeup needs to be done is at the release of the
977 * exclusive lock at the end of dounmount.
978 */
979 mp->mnt_lflag |= MNT_LWAIT;
980 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
981 return ENOENT;
982 }
983
984 mount_unlock(mp);
985
986 lck_rw_lock_shared(&mp->mnt_rwlock);
987
988 /*
989 * Until we are granted the rwlock, it's possible for the mount point to
990 * change state, so re-evaluate before granting the vfs_busy.
991 */
992 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
993 lck_rw_done(&mp->mnt_rwlock);
994 goto restart;
995 }
996 return 0;
997 }
998
999 /*
1000 * Free a busy filesystem.
1001 */
1002 void
1003 vfs_unbusy(mount_t mp)
1004 {
1005 lck_rw_done(&mp->mnt_rwlock);
1006 }
1007
1008
1009
1010 static void
1011 vfs_rootmountfailed(mount_t mp)
1012 {
1013 mount_list_lock();
1014 mp->mnt_vtable->vfc_refcount--;
1015 mount_list_unlock();
1016
1017 vfs_unbusy(mp);
1018
1019 mount_lock_destroy(mp);
1020
1021 #if CONFIG_MACF
1022 mac_mount_label_destroy(mp);
1023 #endif
1024
1025 zfree(mount_zone, mp);
1026 }
1027
1028 /*
1029 * Lookup a filesystem type, and if found allocate and initialize
1030 * a mount structure for it.
1031 *
1032 * Devname is usually updated by mount(8) after booting.
1033 */
1034 static mount_t
1035 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1036 {
1037 mount_t mp;
1038
1039 mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1040 /* Initialize the default IO constraints */
1041 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1042 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1043 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1044 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1045 mp->mnt_devblocksize = DEV_BSIZE;
1046 mp->mnt_alignmentmask = PAGE_MASK;
1047 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1048 mp->mnt_ioscale = 1;
1049 mp->mnt_ioflags = 0;
1050 mp->mnt_realrootvp = NULLVP;
1051 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1052 mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1053 mp->mnt_devbsdunit = 0;
1054
1055 mount_lock_init(mp);
1056 (void)vfs_busy(mp, LK_NOWAIT);
1057
1058 TAILQ_INIT(&mp->mnt_vnodelist);
1059 TAILQ_INIT(&mp->mnt_workerqueue);
1060 TAILQ_INIT(&mp->mnt_newvnodes);
1061
1062 mp->mnt_vtable = vfsp;
1063 mp->mnt_op = vfsp->vfc_vfsops;
1064 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1065 mp->mnt_vnodecovered = NULLVP;
1066 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1067 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1068
1069 mount_list_lock();
1070 vfsp->vfc_refcount++;
1071 mount_list_unlock();
1072
1073 strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1074 mp->mnt_vfsstat.f_mntonname[0] = '/';
1075 /* XXX const poisoning layering violation */
1076 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1077
1078 #if CONFIG_MACF
1079 mac_mount_label_init(mp);
1080 mac_mount_label_associate(vfs_context_kernel(), mp);
1081 #endif
1082 return mp;
1083 }
1084
1085 errno_t
1086 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1087 {
1088 struct vfstable *vfsp;
1089
1090 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1091 if (!strncmp(vfsp->vfc_name, fstypename,
1092 sizeof(vfsp->vfc_name))) {
1093 break;
1094 }
1095 }
1096 if (vfsp == NULL) {
1097 return ENODEV;
1098 }
1099
1100 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
1101
1102 if (*mpp) {
1103 return 0;
1104 }
1105
1106 return ENOMEM;
1107 }
1108
1109 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1110
1111 /*
1112 * Find an appropriate filesystem to use for the root. If a filesystem
1113 * has not been preselected, walk through the list of known filesystems
1114 * trying those that have mountroot routines, and try them until one
1115 * works or we have tried them all.
1116 */
1117 extern int (*mountroot)(void);
1118
1119 int
1120 vfs_mountroot(void)
1121 {
1122 #if CONFIG_MACF
1123 struct vnode *vp;
1124 #endif
1125 struct vfstable *vfsp;
1126 vfs_context_t ctx = vfs_context_kernel();
1127 struct vfs_attr vfsattr;
1128 int error;
1129 mount_t mp;
1130 vnode_t bdevvp_rootvp;
1131
1132 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1133 if (mountroot != NULL) {
1134 /*
1135 * used for netboot which follows a different set of rules
1136 */
1137 error = (*mountroot)();
1138
1139 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1140 return error;
1141 }
1142 if ((error = bdevvp(rootdev, &rootvp))) {
1143 printf("vfs_mountroot: can't setup bdevvp\n");
1144
1145 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1146 return error;
1147 }
1148 /*
1149 * 4951998 - code we call in vfc_mountroot may replace rootvp
1150 * so keep a local copy for some house keeping.
1151 */
1152 bdevvp_rootvp = rootvp;
1153
1154 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1155 if (vfsp->vfc_mountroot == NULL
1156 && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1157 continue;
1158 }
1159
1160 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1161 mp->mnt_devvp = rootvp;
1162
1163 if (vfsp->vfc_mountroot) {
1164 error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1165 } else {
1166 error = VFS_MOUNT(mp, rootvp, 0, ctx);
1167 }
1168
1169 if (!error) {
1170 if (bdevvp_rootvp != rootvp) {
1171 /*
1172 * rootvp changed...
1173 * bump the iocount and fix up mnt_devvp for the
1174 * new rootvp (it will already have a usecount taken)...
1175 * drop the iocount and the usecount on the orignal
1176 * since we are no longer going to use it...
1177 */
1178 vnode_getwithref(rootvp);
1179 mp->mnt_devvp = rootvp;
1180
1181 vnode_rele(bdevvp_rootvp);
1182 vnode_put(bdevvp_rootvp);
1183 }
1184 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1185
1186 vfs_unbusy(mp);
1187
1188 mount_list_add(mp);
1189
1190 /*
1191 * cache the IO attributes for the underlying physical media...
1192 * an error return indicates the underlying driver doesn't
1193 * support all the queries necessary... however, reasonable
1194 * defaults will have been set, so no reason to bail or care
1195 */
1196 vfs_init_io_attributes(rootvp, mp);
1197
1198 if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1199 root_is_CF_drive = TRUE;
1200 }
1201
1202 /*
1203 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1204 */
1205 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1206 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1207 }
1208 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1209 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1210 }
1211
1212 #if defined(XNU_TARGET_OS_OSX)
1213 uint32_t speed;
1214
1215 if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1216 speed = 128;
1217 } else if (disk_conditioner_mount_is_ssd(mp)) {
1218 speed = 7 * 256;
1219 } else {
1220 speed = 256;
1221 }
1222 vc_progress_setdiskspeed(speed);
1223 #endif /* XNU_TARGET_OS_OSX */
1224 /*
1225 * Probe root file system for additional features.
1226 */
1227 (void)VFS_START(mp, 0, ctx);
1228
1229 VFSATTR_INIT(&vfsattr);
1230 VFSATTR_WANTED(&vfsattr, f_capabilities);
1231 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1232 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1233 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1234 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1235 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1236 }
1237 #if NAMEDSTREAMS
1238 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1239 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1240 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1241 }
1242 #endif
1243 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1244 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1245 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1246 }
1247
1248 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1249 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1250 mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1251 }
1252 }
1253
1254 /*
1255 * get rid of iocount reference returned
1256 * by bdevvp (or picked up by us on the substitued
1257 * rootvp)... it (or we) will have also taken
1258 * a usecount reference which we want to keep
1259 */
1260 vnode_put(rootvp);
1261
1262 #if CONFIG_MACF
1263 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1264 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1265 return 0;
1266 }
1267
1268 error = VFS_ROOT(mp, &vp, ctx);
1269 if (error) {
1270 printf("%s() VFS_ROOT() returned %d\n",
1271 __func__, error);
1272 dounmount(mp, MNT_FORCE, 0, ctx);
1273 goto fail;
1274 }
1275 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1276 /*
1277 * get rid of reference provided by VFS_ROOT
1278 */
1279 vnode_put(vp);
1280
1281 if (error) {
1282 printf("%s() vnode_label() returned %d\n",
1283 __func__, error);
1284 dounmount(mp, MNT_FORCE, 0, ctx);
1285 goto fail;
1286 }
1287 #endif
1288 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1289 return 0;
1290 }
1291 #if CONFIG_MACF
1292 fail:
1293 #endif
1294 vfs_rootmountfailed(mp);
1295
1296 if (error != EINVAL) {
1297 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1298 }
1299 }
1300 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1301 return ENODEV;
1302 }
1303
1304 static int
1305 cache_purge_callback(mount_t mp, __unused void * arg)
1306 {
1307 cache_purgevfs(mp);
1308 return VFS_RETURNED;
1309 }
1310
1311 extern lck_rw_t * rootvnode_rw_lock;
1312 extern void set_rootvnode(vnode_t);
1313
1314
1315 static int
1316 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1317 {
1318 int error = 0;
1319
1320 if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1321 (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1322 return 0;
1323 }
1324
1325 if ((error = vfs_busy(mp, LK_NOWAIT))) {
1326 printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1327 return -1;
1328 }
1329
1330 int pathlen = MAXPATHLEN;
1331 if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1332 printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1333 }
1334
1335 vfs_unbusy(mp);
1336
1337 return error;
1338 }
1339
1340 static int
1341 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1342 {
1343 lck_rw_lock_exclusive(&mp->mnt_rwlock);
1344 mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1345 lck_rw_done(&mp->mnt_rwlock);
1346 return VFS_RETURNED;
1347 }
1348
1349 static int
1350 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1351 vfs_switch_root_flags_t flags)
1352 {
1353 mount_t mp;
1354 vnode_t tdp;
1355 vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1356 vnode_t incoming_rootvnode_with_usecount = NULLVP;
1357 int error = 0;
1358
1359 if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1360 printf("Incoming rootfs path not a directory\n");
1361 error = ENOTDIR;
1362 goto done;
1363 }
1364
1365 /*
1366 * Before we call VFS_ROOT, we have to let go of the iocount already
1367 * acquired, but before doing that get a usecount.
1368 */
1369 vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1370 incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1371 vnode_lock_spin(incoming_rootvnode_with_usecount);
1372 if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1373 mp->mnt_crossref++;
1374 vnode_unlock(incoming_rootvnode_with_usecount);
1375 } else {
1376 vnode_unlock(incoming_rootvnode_with_usecount);
1377 printf("Incoming rootfs root vnode does not have associated mount\n");
1378 error = ENOTDIR;
1379 goto done;
1380 }
1381
1382 if (vfs_busy(mp, LK_NOWAIT)) {
1383 printf("Incoming rootfs root vnode mount is busy\n");
1384 error = ENOENT;
1385 goto out;
1386 }
1387
1388 vnode_put(incoming_rootvnode_with_iocount);
1389 incoming_rootvnode_with_iocount = NULLVP;
1390
1391 error = VFS_ROOT(mp, &tdp, ctx);
1392
1393 if (error) {
1394 printf("Could not get rootvnode of incoming rootfs\n");
1395 } else if (tdp != incoming_rootvnode_with_usecount) {
1396 vnode_put(tdp);
1397 tdp = NULLVP;
1398 printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1399 error = EINVAL;
1400 goto out_busy;
1401 } else {
1402 incoming_rootvnode_with_iocount = tdp;
1403 tdp = NULLVP;
1404 }
1405
1406 if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1407 lck_rw_lock_shared(&mp->mnt_rwlock);
1408 if (mp->mnt_flag & MNTK_VIRTUALDEV) {
1409 error = ENODEV;
1410 }
1411 lck_rw_done(&mp->mnt_rwlock);
1412 if (error) {
1413 printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1414 goto out_busy;
1415 }
1416 }
1417
1418 out_busy:
1419 vfs_unbusy(mp);
1420
1421 out:
1422 vnode_lock(incoming_rootvnode_with_usecount);
1423 mp->mnt_crossref--;
1424 if (mp->mnt_crossref < 0) {
1425 panic("mount cross refs -ve");
1426 }
1427 vnode_unlock(incoming_rootvnode_with_usecount);
1428
1429 done:
1430 if (incoming_rootvnode_with_usecount) {
1431 vnode_rele(incoming_rootvnode_with_usecount);
1432 incoming_rootvnode_with_usecount = NULLVP;
1433 }
1434
1435 if (error && incoming_rootvnode_with_iocount) {
1436 vnode_put(incoming_rootvnode_with_iocount);
1437 incoming_rootvnode_with_iocount = NULLVP;
1438 }
1439
1440 *incoming_rootvnodep = incoming_rootvnode_with_iocount;
1441 return error;
1442 }
1443
1444 /*
1445 * vfs_switch_root()
1446 *
1447 * Move the current root volume, and put a different volume at the root.
1448 *
1449 * incoming_vol_old_path: This is the path where the incoming root volume
1450 * is mounted when this function begins.
1451 * outgoing_vol_new_path: This is the path where the outgoing root volume
1452 * will be mounted when this function (successfully) ends.
1453 * Note: Do not use a leading slash.
1454 *
1455 * Volumes mounted at several fixed points (including /dev) will be preserved
1456 * at the same absolute path. That means they will move within the folder
1457 * hierarchy during the pivot operation. For example, /dev before the pivot
1458 * will be at /dev after the pivot.
1459 *
1460 * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1461 * incoming root volume is actually a disk image backed by some other
1462 * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1463 * as appropriate.
1464 */
1465 int
1466 vfs_switch_root(const char *incoming_vol_old_path,
1467 const char *outgoing_vol_new_path,
1468 vfs_switch_root_flags_t flags)
1469 {
1470 // grumble grumble
1471 #define countof(x) (sizeof(x) / sizeof(x[0]))
1472
1473 struct preserved_mount {
1474 vnode_t pm_rootvnode;
1475 mount_t pm_mount;
1476 vnode_t pm_new_covered_vp;
1477 vnode_t pm_old_covered_vp;
1478 const char *pm_path;
1479 };
1480
1481 vfs_context_t ctx = vfs_context_kernel();
1482 vnode_t incoming_rootvnode = NULLVP;
1483 vnode_t outgoing_vol_new_covered_vp = NULLVP;
1484 vnode_t incoming_vol_old_covered_vp = NULLVP;
1485 mount_t outgoing = NULL;
1486 mount_t incoming = NULL;
1487
1488 struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1489 struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1490 struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1491 struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1492 struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1493 struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1494 struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1495 struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1496 struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1497 struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1498
1499 struct preserved_mount *preserved[10];
1500 preserved[0] = &devfs;
1501 preserved[1] = &preboot;
1502 preserved[2] = &recovery;
1503 preserved[3] = &vm;
1504 preserved[4] = &update;
1505 preserved[5] = &iscPreboot;
1506 preserved[6] = &hardware;
1507 preserved[7] = &xarts;
1508 preserved[8] = &factorylogs;
1509 preserved[9] = &idiags;
1510
1511 int error;
1512
1513 printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1514
1515 if (outgoing_vol_new_path[0] == '/') {
1516 // I should have written this to be more helpful and just advance the pointer forward past the slash
1517 printf("Do not use a leading slash in outgoing_vol_new_path\n");
1518 return EINVAL;
1519 }
1520
1521 // Set incoming_rootvnode.
1522 // Find the vnode representing the mountpoint of the new root
1523 // filesystem. That will be the new root directory.
1524 error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1525 if (error) {
1526 printf("Incoming rootfs root vnode not found\n");
1527 error = ENOENT;
1528 goto done;
1529 }
1530
1531 /*
1532 * This function drops the icoount and sets the vnode to NULL on error.
1533 */
1534 error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1535 if (error) {
1536 goto done;
1537 }
1538
1539 /*
1540 * Set outgoing_vol_new_covered_vp.
1541 * Find the vnode representing the future mountpoint of the old
1542 * root filesystem, inside the directory incoming_rootvnode.
1543 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1544 * soon it will become "/oldrootfs_path_after", which will be covered.
1545 */
1546 error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1547 if (error) {
1548 printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1549 error = ENOENT;
1550 goto done;
1551 }
1552 if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1553 printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1554 error = ENOTDIR;
1555 goto done;
1556 }
1557
1558 /*
1559 * Find the preserved mounts - see if they are mounted. Get their root
1560 * vnode if they are. If they aren't, leave rootvnode NULL which will
1561 * be the signal to ignore this mount later on.
1562 *
1563 * Also get preserved mounts' new_covered_vp.
1564 * Find the node representing the folder "dev" inside the directory newrootvnode.
1565 * Right now it's at "/incoming_vol_old_path/dev".
1566 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1567 */
1568 for (size_t i = 0; i < countof(preserved); i++) {
1569 struct preserved_mount *pmi = preserved[i];
1570
1571 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1572 if (error) {
1573 printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1574 // not fatal. try the next one in the list.
1575 continue;
1576 }
1577 bool is_mountpoint = false;
1578 vnode_lock_spin(pmi->pm_rootvnode);
1579 if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1580 is_mountpoint = true;
1581 }
1582 vnode_unlock(pmi->pm_rootvnode);
1583 if (!is_mountpoint) {
1584 printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1585 vnode_put(pmi->pm_rootvnode);
1586 pmi->pm_rootvnode = NULLVP;
1587 // not fatal. try the next one in the list.
1588 continue;
1589 }
1590
1591 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1592 if (error) {
1593 printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1594 error = ENOENT;
1595 goto done;
1596 }
1597 if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1598 printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1599 error = ENOTDIR;
1600 goto done;
1601 }
1602
1603 printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1604 }
1605
1606 /*
1607 * --
1608 * At this point, everything has been prepared and all error conditions
1609 * have been checked. We check everything we can before this point;
1610 * from now on we start making destructive changes, and we can't stop
1611 * until we reach the end.
1612 * ----
1613 */
1614
1615 /* this usecount is transferred to the mnt_vnodecovered */
1616 vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1617 /* this usecount is transferred to set_rootvnode */
1618 vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1619
1620
1621 for (size_t i = 0; i < countof(preserved); i++) {
1622 struct preserved_mount *pmi = preserved[i];
1623 if (pmi->pm_rootvnode == NULLVP) {
1624 continue;
1625 }
1626
1627 /* this usecount is transferred to the mnt_vnodecovered */
1628 vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1629
1630 /* The new_covered_vp is a mountpoint from now on. */
1631 vnode_lock_spin(pmi->pm_new_covered_vp);
1632 pmi->pm_new_covered_vp->v_flag |= VMOUNT;
1633 vnode_unlock(pmi->pm_new_covered_vp);
1634 }
1635
1636 /* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1637 vnode_lock_spin(outgoing_vol_new_covered_vp);
1638 outgoing_vol_new_covered_vp->v_flag |= VMOUNT;
1639 vnode_unlock(outgoing_vol_new_covered_vp);
1640
1641
1642 /*
1643 * Identify the mount_ts of the mounted filesystems that are being
1644 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1645 * mounts.
1646 */
1647 outgoing = rootvnode->v_mount;
1648 incoming = incoming_rootvnode->v_mount;
1649 for (size_t i = 0; i < countof(preserved); i++) {
1650 struct preserved_mount *pmi = preserved[i];
1651 if (pmi->pm_rootvnode == NULLVP) {
1652 continue;
1653 }
1654
1655 pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1656 }
1657
1658 lck_rw_lock_exclusive(rootvnode_rw_lock);
1659
1660 /* Setup incoming as the new rootfs */
1661 lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1662 incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1663 incoming->mnt_vnodecovered = NULLVP;
1664 strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1665 incoming->mnt_flag |= MNT_ROOTFS;
1666 lck_rw_done(&incoming->mnt_rwlock);
1667
1668 /*
1669 * The preserved mountpoints will now be moved to
1670 * incoming_rootnode/pm_path, and then by the end of the function,
1671 * since incoming_rootnode is going to /, the preserved mounts
1672 * will be end up back at /pm_path
1673 */
1674 for (size_t i = 0; i < countof(preserved); i++) {
1675 struct preserved_mount *pmi = preserved[i];
1676 if (pmi->pm_rootvnode == NULLVP) {
1677 continue;
1678 }
1679
1680 lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1681 pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1682 pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1683 vnode_lock_spin(pmi->pm_new_covered_vp);
1684 pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1685 vnode_unlock(pmi->pm_new_covered_vp);
1686 lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1687 }
1688
1689 /*
1690 * The old root volume now covers outgoing_vol_new_covered_vp
1691 * on the new root volume. Remove the ROOTFS marker.
1692 * Now it is to be found at outgoing_vol_new_path
1693 */
1694 lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1695 outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1696 strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1697 strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1698 outgoing->mnt_flag &= ~MNT_ROOTFS;
1699 vnode_lock_spin(outgoing_vol_new_covered_vp);
1700 outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1701 vnode_unlock(outgoing_vol_new_covered_vp);
1702 lck_rw_done(&outgoing->mnt_rwlock);
1703
1704 /*
1705 * Finally, remove the mount_t linkage from the previously covered
1706 * vnodes on the old root volume. These were incoming_vol_old_path,
1707 * and each preserved mounts's "/pm_path". The filesystems previously
1708 * mounted there have already been moved away.
1709 */
1710 vnode_lock_spin(incoming_vol_old_covered_vp);
1711 incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1712 incoming_vol_old_covered_vp->v_mountedhere = NULL;
1713 vnode_unlock(incoming_vol_old_covered_vp);
1714
1715 for (size_t i = 0; i < countof(preserved); i++) {
1716 struct preserved_mount *pmi = preserved[i];
1717 if (pmi->pm_rootvnode == NULLVP) {
1718 continue;
1719 }
1720
1721 vnode_lock_spin(pmi->pm_old_covered_vp);
1722 pmi->pm_old_covered_vp->v_flag &= ~VMOUNT;
1723 pmi->pm_old_covered_vp->v_mountedhere = NULL;
1724 vnode_unlock(pmi->pm_old_covered_vp);
1725 }
1726
1727 /*
1728 * Clear the name cache since many cached names are now invalid.
1729 */
1730 vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1731
1732 /*
1733 * Actually change the rootvnode! And finally drop the lock that
1734 * prevents concurrent vnode_lookups.
1735 */
1736 set_rootvnode(incoming_rootvnode);
1737 lck_rw_unlock_exclusive(rootvnode_rw_lock);
1738
1739 if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1740 !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1741 /*
1742 * Switch the order of mount structures in the mountlist, new root
1743 * mount moves to the head of the list followed by /dev and the other
1744 * preserved mounts then all the preexisting mounts (old rootfs + any
1745 * others)
1746 */
1747 mount_list_lock();
1748 for (size_t i = 0; i < countof(preserved); i++) {
1749 struct preserved_mount *pmi = preserved[i];
1750 if (pmi->pm_rootvnode == NULLVP) {
1751 continue;
1752 }
1753
1754 TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1755 TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1756 }
1757 TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1758 TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1759 mount_list_unlock();
1760 }
1761
1762 /*
1763 * Fixups across all volumes
1764 */
1765 vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1766 vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1767
1768 error = 0;
1769
1770 done:
1771 for (size_t i = 0; i < countof(preserved); i++) {
1772 struct preserved_mount *pmi = preserved[i];
1773
1774 if (pmi->pm_rootvnode) {
1775 vnode_put(pmi->pm_rootvnode);
1776 }
1777 if (pmi->pm_new_covered_vp) {
1778 vnode_put(pmi->pm_new_covered_vp);
1779 }
1780 if (pmi->pm_old_covered_vp) {
1781 vnode_rele(pmi->pm_old_covered_vp);
1782 }
1783 }
1784
1785 if (outgoing_vol_new_covered_vp) {
1786 vnode_put(outgoing_vol_new_covered_vp);
1787 }
1788
1789 if (incoming_vol_old_covered_vp) {
1790 vnode_rele(incoming_vol_old_covered_vp);
1791 }
1792
1793 if (incoming_rootvnode) {
1794 vnode_put(incoming_rootvnode);
1795 }
1796
1797 printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1798 return error;
1799 }
1800
1801 /*
1802 * Mount the Recovery volume of a container
1803 */
1804 int
1805 vfs_mount_recovery(void)
1806 {
1807 #if CONFIG_MOUNT_PREBOOTRECOVERY
1808 int error = 0;
1809
1810 error = vnode_get(rootvnode);
1811 if (error) {
1812 /* root must be mounted first */
1813 printf("vnode_get(rootvnode) failed with error %d\n", error);
1814 return error;
1815 }
1816
1817 char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1818
1819 /* Mount the recovery volume */
1820 printf("attempting kernel mount for recovery volume... \n");
1821 error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1822 recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1823
1824 if (error) {
1825 printf("Failed to mount recovery volume (%d)\n", error);
1826 } else {
1827 printf("mounted recovery volume\n");
1828 }
1829
1830 vnode_put(rootvnode);
1831 return error;
1832 #else
1833 return 0;
1834 #endif
1835 }
1836
1837 /*
1838 * Lookup a mount point by filesystem identifier.
1839 */
1840
1841 struct mount *
1842 vfs_getvfs(fsid_t *fsid)
1843 {
1844 return mount_list_lookupby_fsid(fsid, 0, 0);
1845 }
1846
1847 static struct mount *
1848 vfs_getvfs_locked(fsid_t *fsid)
1849 {
1850 return mount_list_lookupby_fsid(fsid, 1, 0);
1851 }
1852
1853 struct mount *
1854 vfs_getvfs_by_mntonname(char *path)
1855 {
1856 mount_t retmp = (mount_t)0;
1857 mount_t mp;
1858
1859 mount_list_lock();
1860 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1861 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1862 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1863 retmp = mp;
1864 if (mount_iterref(retmp, 1)) {
1865 retmp = NULL;
1866 }
1867 goto out;
1868 }
1869 }
1870 out:
1871 mount_list_unlock();
1872 return retmp;
1873 }
1874
1875 /* generation number for creation of new fsids */
1876 u_short mntid_gen = 0;
1877 /*
1878 * Get a new unique fsid
1879 */
1880 void
1881 vfs_getnewfsid(struct mount *mp)
1882 {
1883 fsid_t tfsid;
1884 int mtype;
1885
1886 mount_list_lock();
1887
1888 /* generate a new fsid */
1889 mtype = mp->mnt_vtable->vfc_typenum;
1890 if (++mntid_gen == 0) {
1891 mntid_gen++;
1892 }
1893 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1894 tfsid.val[1] = mtype;
1895
1896 while (vfs_getvfs_locked(&tfsid)) {
1897 if (++mntid_gen == 0) {
1898 mntid_gen++;
1899 }
1900 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1901 }
1902
1903 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1904 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1905 mount_list_unlock();
1906 }
1907
1908 /*
1909 * Routines having to do with the management of the vnode table.
1910 */
1911 extern int(**dead_vnodeop_p)(void *);
1912 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
1913
1914
1915 int async_work_timed_out = 0;
1916 int async_work_handled = 0;
1917 int dead_vnode_wanted = 0;
1918 int dead_vnode_waited = 0;
1919
1920 /*
1921 * Move a vnode from one mount queue to another.
1922 */
1923 static void
1924 insmntque(vnode_t vp, mount_t mp)
1925 {
1926 mount_t lmp;
1927 /*
1928 * Delete from old mount point vnode list, if on one.
1929 */
1930 if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1931 if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
1932 panic("insmntque: vp not in mount vnode list");
1933 }
1934 vp->v_lflag &= ~VNAMED_MOUNT;
1935
1936 mount_lock_spin(lmp);
1937
1938 mount_drop(lmp, 1);
1939
1940 if (vp->v_mntvnodes.tqe_next == NULL) {
1941 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
1942 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1943 } else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
1944 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1945 } else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
1946 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1947 }
1948 } else {
1949 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1950 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1951 }
1952 vp->v_mntvnodes.tqe_next = NULL;
1953 vp->v_mntvnodes.tqe_prev = NULL;
1954 mount_unlock(lmp);
1955 return;
1956 }
1957
1958 /*
1959 * Insert into list of vnodes for the new mount point, if available.
1960 */
1961 if ((vp->v_mount = mp) != NULL) {
1962 mount_lock_spin(mp);
1963 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
1964 panic("vp already in mount list");
1965 }
1966 if (mp->mnt_lflag & MNT_LITER) {
1967 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1968 } else {
1969 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1970 }
1971 if (vp->v_lflag & VNAMED_MOUNT) {
1972 panic("insmntque: vp already in mount vnode list");
1973 }
1974 vp->v_lflag |= VNAMED_MOUNT;
1975 mount_ref(mp, 1);
1976 mount_unlock(mp);
1977 }
1978 }
1979
1980
1981 /*
1982 * Create a vnode for a block device.
1983 * Used for root filesystem, argdev, and swap areas.
1984 * Also used for memory file system special devices.
1985 */
1986 int
1987 bdevvp(dev_t dev, vnode_t *vpp)
1988 {
1989 vnode_t nvp;
1990 int error;
1991 struct vnode_fsparam vfsp;
1992 struct vfs_context context;
1993
1994 if (dev == NODEV) {
1995 *vpp = NULLVP;
1996 return ENODEV;
1997 }
1998
1999 context.vc_thread = current_thread();
2000 context.vc_ucred = FSCRED;
2001
2002 vfsp.vnfs_mp = (struct mount *)0;
2003 vfsp.vnfs_vtype = VBLK;
2004 vfsp.vnfs_str = "bdevvp";
2005 vfsp.vnfs_dvp = NULL;
2006 vfsp.vnfs_fsnode = NULL;
2007 vfsp.vnfs_cnp = NULL;
2008 vfsp.vnfs_vops = spec_vnodeop_p;
2009 vfsp.vnfs_rdev = dev;
2010 vfsp.vnfs_filesize = 0;
2011
2012 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2013
2014 vfsp.vnfs_marksystem = 0;
2015 vfsp.vnfs_markroot = 0;
2016
2017 if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2018 *vpp = NULLVP;
2019 return error;
2020 }
2021 vnode_lock_spin(nvp);
2022 nvp->v_flag |= VBDEVVP;
2023 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
2024 vnode_unlock(nvp);
2025 if ((error = vnode_ref(nvp))) {
2026 panic("bdevvp failed: vnode_ref");
2027 return error;
2028 }
2029 if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2030 panic("bdevvp failed: fsync");
2031 return error;
2032 }
2033 if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2034 panic("bdevvp failed: invalidateblks");
2035 return error;
2036 }
2037
2038 #if CONFIG_MACF
2039 /*
2040 * XXXMAC: We can't put a MAC check here, the system will
2041 * panic without this vnode.
2042 */
2043 #endif /* MAC */
2044
2045 if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2046 panic("bdevvp failed: open");
2047 return error;
2048 }
2049 *vpp = nvp;
2050
2051 return 0;
2052 }
2053
2054 /*
2055 * Check to see if the new vnode represents a special device
2056 * for which we already have a vnode (either because of
2057 * bdevvp() or because of a different vnode representing
2058 * the same block device). If such an alias exists, deallocate
2059 * the existing contents and return the aliased vnode. The
2060 * caller is responsible for filling it with its new contents.
2061 */
2062 static vnode_t
2063 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2064 {
2065 struct vnode *vp;
2066 struct vnode **vpp;
2067 struct specinfo *sin = NULL;
2068 int vid = 0;
2069
2070 vpp = &speclisth[SPECHASH(nvp_rdev)];
2071 loop:
2072 SPECHASH_LOCK();
2073
2074 for (vp = *vpp; vp; vp = vp->v_specnext) {
2075 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2076 vid = vp->v_id;
2077 break;
2078 }
2079 }
2080 SPECHASH_UNLOCK();
2081
2082 if (vp) {
2083 found_alias:
2084 if (vnode_getwithvid(vp, vid)) {
2085 goto loop;
2086 }
2087 /*
2088 * Termination state is checked in vnode_getwithvid
2089 */
2090 vnode_lock(vp);
2091
2092 /*
2093 * Alias, but not in use, so flush it out.
2094 */
2095 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2096 vnode_reclaim_internal(vp, 1, 1, 0);
2097 vnode_put_locked(vp);
2098 vnode_unlock(vp);
2099 goto loop;
2100 }
2101 }
2102 if (vp == NULL || vp->v_tag != VT_NON) {
2103 if (sin == NULL) {
2104 sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2105 } else {
2106 bzero(sin, sizeof(struct specinfo));
2107 }
2108
2109 nvp->v_specinfo = sin;
2110 nvp->v_rdev = nvp_rdev;
2111 nvp->v_specflags = 0;
2112 nvp->v_speclastr = -1;
2113 nvp->v_specinfo->si_opencount = 0;
2114 nvp->v_specinfo->si_initted = 0;
2115 nvp->v_specinfo->si_throttleable = 0;
2116
2117 SPECHASH_LOCK();
2118
2119 /* We dropped the lock, someone could have added */
2120 if (vp == NULLVP) {
2121 for (vp = *vpp; vp; vp = vp->v_specnext) {
2122 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2123 vid = vp->v_id;
2124 SPECHASH_UNLOCK();
2125 goto found_alias;
2126 }
2127 }
2128 }
2129
2130 nvp->v_hashchain = vpp;
2131 nvp->v_specnext = *vpp;
2132 *vpp = nvp;
2133
2134 if (vp != NULLVP) {
2135 nvp->v_specflags |= SI_ALIASED;
2136 vp->v_specflags |= SI_ALIASED;
2137 SPECHASH_UNLOCK();
2138 vnode_put_locked(vp);
2139 vnode_unlock(vp);
2140 } else {
2141 SPECHASH_UNLOCK();
2142 }
2143
2144 return NULLVP;
2145 }
2146
2147 if (sin) {
2148 zfree(specinfo_zone, sin);
2149 }
2150
2151 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2152 return vp;
2153 }
2154
2155 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2156
2157 return vp;
2158 }
2159
2160
2161 /*
2162 * Get a reference on a particular vnode and lock it if requested.
2163 * If the vnode was on the inactive list, remove it from the list.
2164 * If the vnode was on the free list, remove it from the list and
2165 * move it to inactive list as needed.
2166 * The vnode lock bit is set if the vnode is being eliminated in
2167 * vgone. The process is awakened when the transition is completed,
2168 * and an error returned to indicate that the vnode is no longer
2169 * usable (possibly having been changed to a new file system type).
2170 */
2171 int
2172 vget_internal(vnode_t vp, int vid, int vflags)
2173 {
2174 int error = 0;
2175
2176 vnode_lock_spin(vp);
2177
2178 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2179 /*
2180 * vnode to be returned only if it has writers opened
2181 */
2182 error = EINVAL;
2183 } else {
2184 error = vnode_getiocount(vp, vid, vflags);
2185 }
2186
2187 vnode_unlock(vp);
2188
2189 return error;
2190 }
2191
2192 /*
2193 * Returns: 0 Success
2194 * ENOENT No such file or directory [terminating]
2195 */
2196 int
2197 vnode_ref(vnode_t vp)
2198 {
2199 return vnode_ref_ext(vp, 0, 0);
2200 }
2201
2202 /*
2203 * Returns: 0 Success
2204 * ENOENT No such file or directory [terminating]
2205 */
2206 int
2207 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2208 {
2209 int error = 0;
2210
2211 vnode_lock_spin(vp);
2212
2213 /*
2214 * once all the current call sites have been fixed to insure they have
2215 * taken an iocount, we can toughen this assert up and insist that the
2216 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2217 */
2218 if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2219 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2220 }
2221
2222 /*
2223 * if you are the owner of drain/termination, can acquire usecount
2224 */
2225 if ((flags & VNODE_REF_FORCE) == 0) {
2226 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
2227 if (vp->v_owner != current_thread()) {
2228 error = ENOENT;
2229 goto out;
2230 }
2231 }
2232 }
2233
2234 /* Enable atomic ops on v_usecount without the vnode lock */
2235 os_atomic_inc(&vp->v_usecount, relaxed);
2236
2237 if (fmode & FWRITE) {
2238 if (++vp->v_writecount <= 0) {
2239 panic("vnode_ref_ext: v_writecount");
2240 }
2241 }
2242 if (fmode & O_EVTONLY) {
2243 if (++vp->v_kusecount <= 0) {
2244 panic("vnode_ref_ext: v_kusecount");
2245 }
2246 }
2247 if (vp->v_flag & VRAGE) {
2248 struct uthread *ut;
2249
2250 ut = get_bsdthread_info(current_thread());
2251
2252 if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2253 !(ut->uu_flag & UT_RAGE_VNODES)) {
2254 /*
2255 * a 'normal' process accessed this vnode
2256 * so make sure its no longer marked
2257 * for rapid aging... also, make sure
2258 * it gets removed from the rage list...
2259 * when v_usecount drops back to 0, it
2260 * will be put back on the real free list
2261 */
2262 vp->v_flag &= ~VRAGE;
2263 vp->v_references = 0;
2264 vnode_list_remove(vp);
2265 }
2266 }
2267 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2268 if (vp->v_ubcinfo) {
2269 vnode_lock_convert(vp);
2270 memory_object_mark_used(vp->v_ubcinfo->ui_control);
2271 }
2272 }
2273 out:
2274 vnode_unlock(vp);
2275
2276 return error;
2277 }
2278
2279
2280 boolean_t
2281 vnode_on_reliable_media(vnode_t vp)
2282 {
2283 mount_t mp = vp->v_mount;
2284
2285 /*
2286 * A NULL mountpoint would imply it's not attached to a any filesystem.
2287 * This can only happen with a vnode created by bdevvp(). We'll consider
2288 * those as not unreliable as the primary use of this function is determine
2289 * which vnodes are to be handed off to the async cleaner thread for
2290 * reclaim.
2291 */
2292 if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2293 return TRUE;
2294 }
2295
2296 return FALSE;
2297 }
2298
2299 static void
2300 vnode_async_list_add_locked(vnode_t vp)
2301 {
2302 if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2303 panic("vnode_async_list_add: %p is in wrong state", vp);
2304 }
2305
2306 TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2307 vp->v_listflag |= VLIST_ASYNC_WORK;
2308
2309 async_work_vnodes++;
2310 }
2311
2312 static void
2313 vnode_async_list_add(vnode_t vp)
2314 {
2315 vnode_list_lock();
2316
2317 vnode_async_list_add_locked(vp);
2318
2319 vnode_list_unlock();
2320
2321 wakeup(&vnode_async_work_list);
2322 }
2323
2324
2325 /*
2326 * put the vnode on appropriate free list.
2327 * called with vnode LOCKED
2328 */
2329 static void
2330 vnode_list_add(vnode_t vp)
2331 {
2332 boolean_t need_dead_wakeup = FALSE;
2333
2334 #if DIAGNOSTIC
2335 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2336 #endif
2337
2338 again:
2339
2340 /*
2341 * if it is already on a list or non zero references return
2342 */
2343 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2344 return;
2345 }
2346
2347 /*
2348 * In vclean, we might have deferred ditching locked buffers
2349 * because something was still referencing them (indicated by
2350 * usecount). We can ditch them now.
2351 */
2352 if (ISSET(vp->v_lflag, VL_DEAD)
2353 && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2354 ++vp->v_iocount; // Probably not necessary, but harmless
2355 #ifdef JOE_DEBUG
2356 record_vp(vp, 1);
2357 #endif
2358 vnode_unlock(vp);
2359 buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2360 vnode_lock(vp);
2361 vnode_dropiocount(vp);
2362 goto again;
2363 }
2364
2365 vnode_list_lock();
2366
2367 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2368 /*
2369 * add the new guy to the appropriate end of the RAGE list
2370 */
2371 if ((vp->v_flag & VAGE)) {
2372 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2373 } else {
2374 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2375 }
2376
2377 vp->v_listflag |= VLIST_RAGE;
2378 ragevnodes++;
2379
2380 /*
2381 * reset the timestamp for the last inserted vp on the RAGE
2382 * queue to let new_vnode know that its not ok to start stealing
2383 * from this list... as long as we're actively adding to this list
2384 * we'll push out the vnodes we want to donate to the real free list
2385 * once we stop pushing, we'll let some time elapse before we start
2386 * stealing them in the new_vnode routine
2387 */
2388 microuptime(&rage_tv);
2389 } else {
2390 /*
2391 * if VL_DEAD, insert it at head of the dead list
2392 * else insert at tail of LRU list or at head if VAGE is set
2393 */
2394 if ((vp->v_lflag & VL_DEAD)) {
2395 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2396 vp->v_listflag |= VLIST_DEAD;
2397 deadvnodes++;
2398
2399 if (dead_vnode_wanted) {
2400 dead_vnode_wanted--;
2401 need_dead_wakeup = TRUE;
2402 }
2403 } else if ((vp->v_flag & VAGE)) {
2404 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2405 vp->v_flag &= ~VAGE;
2406 freevnodes++;
2407 } else {
2408 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2409 freevnodes++;
2410 }
2411 }
2412 vnode_list_unlock();
2413
2414 if (need_dead_wakeup == TRUE) {
2415 wakeup_one((caddr_t)&dead_vnode_wanted);
2416 }
2417 }
2418
2419
2420 /*
2421 * remove the vnode from appropriate free list.
2422 * called with vnode LOCKED and
2423 * the list lock held
2424 */
2425 static void
2426 vnode_list_remove_locked(vnode_t vp)
2427 {
2428 if (VONLIST(vp)) {
2429 /*
2430 * the v_listflag field is
2431 * protected by the vnode_list_lock
2432 */
2433 if (vp->v_listflag & VLIST_RAGE) {
2434 VREMRAGE("vnode_list_remove", vp);
2435 } else if (vp->v_listflag & VLIST_DEAD) {
2436 VREMDEAD("vnode_list_remove", vp);
2437 } else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2438 VREMASYNC_WORK("vnode_list_remove", vp);
2439 } else {
2440 VREMFREE("vnode_list_remove", vp);
2441 }
2442 }
2443 }
2444
2445
2446 /*
2447 * remove the vnode from appropriate free list.
2448 * called with vnode LOCKED
2449 */
2450 static void
2451 vnode_list_remove(vnode_t vp)
2452 {
2453 #if DIAGNOSTIC
2454 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2455 #endif
2456 /*
2457 * we want to avoid taking the list lock
2458 * in the case where we're not on the free
2459 * list... this will be true for most
2460 * directories and any currently in use files
2461 *
2462 * we're guaranteed that we can't go from
2463 * the not-on-list state to the on-list
2464 * state since we hold the vnode lock...
2465 * all calls to vnode_list_add are done
2466 * under the vnode lock... so we can
2467 * check for that condition (the prevelant one)
2468 * without taking the list lock
2469 */
2470 if (VONLIST(vp)) {
2471 vnode_list_lock();
2472 /*
2473 * however, we're not guaranteed that
2474 * we won't go from the on-list state
2475 * to the not-on-list state until we
2476 * hold the vnode_list_lock... this
2477 * is due to "new_vnode" removing vnodes
2478 * from the free list uder the list_lock
2479 * w/o the vnode lock... so we need to
2480 * check again whether we're currently
2481 * on the free list
2482 */
2483 vnode_list_remove_locked(vp);
2484
2485 vnode_list_unlock();
2486 }
2487 }
2488
2489
2490 void
2491 vnode_rele(vnode_t vp)
2492 {
2493 vnode_rele_internal(vp, 0, 0, 0);
2494 }
2495
2496
2497 void
2498 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2499 {
2500 vnode_rele_internal(vp, fmode, dont_reenter, 0);
2501 }
2502
2503
2504 void
2505 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2506 {
2507 int32_t old_usecount;
2508
2509 if (!locked) {
2510 vnode_lock_spin(vp);
2511 }
2512 #if DIAGNOSTIC
2513 else {
2514 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2515 }
2516 #endif
2517 /* Enable atomic ops on v_usecount without the vnode lock */
2518 old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2519 if (old_usecount < 1) {
2520 /*
2521 * Because we allow atomic ops on usecount (in lookup only, under
2522 * specific conditions of already having a usecount) it is
2523 * possible that when the vnode is examined, its usecount is
2524 * different than what will be printed in this panic message.
2525 */
2526 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.",
2527 vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2528 }
2529
2530 if (fmode & FWRITE) {
2531 if (--vp->v_writecount < 0) {
2532 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2533 }
2534 }
2535 if (fmode & O_EVTONLY) {
2536 if (--vp->v_kusecount < 0) {
2537 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2538 }
2539 }
2540 if (vp->v_kusecount > vp->v_usecount) {
2541 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2542 }
2543
2544 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2545 /*
2546 * vnode is still busy... if we're the last
2547 * usecount, mark for a future call to VNOP_INACTIVE
2548 * when the iocount finally drops to 0
2549 */
2550 if (vp->v_usecount == 0) {
2551 vp->v_lflag |= VL_NEEDINACTIVE;
2552 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2553 }
2554 goto done;
2555 }
2556 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2557
2558 if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2559 /*
2560 * vnode is being cleaned, or
2561 * we've requested that we don't reenter
2562 * the filesystem on this release...in
2563 * the latter case, we'll mark the vnode aged
2564 */
2565 if (dont_reenter) {
2566 if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2567 vp->v_lflag |= VL_NEEDINACTIVE;
2568
2569 if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2570 vnode_async_list_add(vp);
2571 goto done;
2572 }
2573 }
2574 vp->v_flag |= VAGE;
2575 }
2576 vnode_list_add(vp);
2577
2578 goto done;
2579 }
2580 /*
2581 * at this point both the iocount and usecount
2582 * are zero
2583 * pick up an iocount so that we can call
2584 * VNOP_INACTIVE with the vnode lock unheld
2585 */
2586 vp->v_iocount++;
2587 #ifdef JOE_DEBUG
2588 record_vp(vp, 1);
2589 #endif
2590 vp->v_lflag &= ~VL_NEEDINACTIVE;
2591 vnode_unlock(vp);
2592
2593 VNOP_INACTIVE(vp, vfs_context_current());
2594
2595 vnode_lock_spin(vp);
2596 /*
2597 * because we dropped the vnode lock to call VNOP_INACTIVE
2598 * the state of the vnode may have changed... we may have
2599 * picked up an iocount, usecount or the MARKTERM may have
2600 * been set... we need to reevaluate the reference counts
2601 * to determine if we can call vnode_reclaim_internal at
2602 * this point... if the reference counts are up, we'll pick
2603 * up the MARKTERM state when they get subsequently dropped
2604 */
2605 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2606 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2607 struct uthread *ut;
2608
2609 ut = get_bsdthread_info(current_thread());
2610
2611 if (ut->uu_defer_reclaims) {
2612 vp->v_defer_reclaimlist = ut->uu_vreclaims;
2613 ut->uu_vreclaims = vp;
2614 goto done;
2615 }
2616 vnode_lock_convert(vp);
2617 vnode_reclaim_internal(vp, 1, 1, 0);
2618 }
2619 vnode_dropiocount(vp);
2620 vnode_list_add(vp);
2621 done:
2622 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2623 if (vp->v_ubcinfo) {
2624 vnode_lock_convert(vp);
2625 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2626 }
2627 }
2628 if (!locked) {
2629 vnode_unlock(vp);
2630 }
2631 return;
2632 }
2633
2634 /*
2635 * Remove any vnodes in the vnode table belonging to mount point mp.
2636 *
2637 * If MNT_NOFORCE is specified, there should not be any active ones,
2638 * return error if any are found (nb: this is a user error, not a
2639 * system error). If MNT_FORCE is specified, detach any active vnodes
2640 * that are found.
2641 */
2642
2643 int
2644 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2645 {
2646 struct vnode *vp;
2647 int busy = 0;
2648 int reclaimed = 0;
2649 int retval;
2650 unsigned int vid;
2651 bool first_try = true;
2652
2653 /*
2654 * See comments in vnode_iterate() for the rationale for this lock
2655 */
2656 mount_iterate_lock(mp);
2657
2658 mount_lock(mp);
2659 vnode_iterate_setup(mp);
2660 /*
2661 * On regular unmounts(not forced) do a
2662 * quick check for vnodes to be in use. This
2663 * preserves the caching of vnodes. automounter
2664 * tries unmounting every so often to see whether
2665 * it is still busy or not.
2666 */
2667 if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2668 if (vnode_umount_preflight(mp, skipvp, flags)) {
2669 vnode_iterate_clear(mp);
2670 mount_unlock(mp);
2671 mount_iterate_unlock(mp);
2672 return EBUSY;
2673 }
2674 }
2675 loop:
2676 /* If it returns 0 then there is nothing to do */
2677 retval = vnode_iterate_prepare(mp);
2678
2679 if (retval == 0) {
2680 vnode_iterate_clear(mp);
2681 mount_unlock(mp);
2682 mount_iterate_unlock(mp);
2683 return retval;
2684 }
2685
2686 /* iterate over all the vnodes */
2687 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2688 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2689 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2690 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2691
2692 if ((vp->v_mount != mp) || (vp == skipvp)) {
2693 continue;
2694 }
2695 vid = vp->v_id;
2696 mount_unlock(mp);
2697
2698 vnode_lock_spin(vp);
2699
2700 // If vnode is already terminating, wait for it...
2701 while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2702 vp->v_lflag |= VL_TERMWANT;
2703 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2704 }
2705
2706 if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2707 vnode_unlock(vp);
2708 mount_lock(mp);
2709 continue;
2710 }
2711
2712 /*
2713 * If requested, skip over vnodes marked VSYSTEM.
2714 * Skip over all vnodes marked VNOFLUSH.
2715 */
2716 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2717 (vp->v_flag & VNOFLUSH))) {
2718 vnode_unlock(vp);
2719 mount_lock(mp);
2720 continue;
2721 }
2722 /*
2723 * If requested, skip over vnodes marked VSWAP.
2724 */
2725 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2726 vnode_unlock(vp);
2727 mount_lock(mp);
2728 continue;
2729 }
2730 /*
2731 * If requested, skip over vnodes marked VROOT.
2732 */
2733 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2734 vnode_unlock(vp);
2735 mount_lock(mp);
2736 continue;
2737 }
2738 /*
2739 * If WRITECLOSE is set, only flush out regular file
2740 * vnodes open for writing.
2741 */
2742 if ((flags & WRITECLOSE) &&
2743 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2744 vnode_unlock(vp);
2745 mount_lock(mp);
2746 continue;
2747 }
2748 /*
2749 * If the real usecount is 0, all we need to do is clear
2750 * out the vnode data structures and we are done.
2751 */
2752 if (((vp->v_usecount == 0) ||
2753 ((vp->v_usecount - vp->v_kusecount) == 0))) {
2754 vnode_lock_convert(vp);
2755 vp->v_iocount++; /* so that drain waits for * other iocounts */
2756 #ifdef JOE_DEBUG
2757 record_vp(vp, 1);
2758 #endif
2759 vnode_reclaim_internal(vp, 1, 1, 0);
2760 vnode_dropiocount(vp);
2761 vnode_list_add(vp);
2762 vnode_unlock(vp);
2763
2764 reclaimed++;
2765 mount_lock(mp);
2766 continue;
2767 }
2768 /*
2769 * If FORCECLOSE is set, forcibly close the vnode.
2770 * For block or character devices, revert to an
2771 * anonymous device. For all other files, just kill them.
2772 */
2773 if (flags & FORCECLOSE) {
2774 vnode_lock_convert(vp);
2775
2776 if (vp->v_type != VBLK && vp->v_type != VCHR) {
2777 vp->v_iocount++; /* so that drain waits * for other iocounts */
2778 #ifdef JOE_DEBUG
2779 record_vp(vp, 1);
2780 #endif
2781 vnode_abort_advlocks(vp);
2782 vnode_reclaim_internal(vp, 1, 1, 0);
2783 vnode_dropiocount(vp);
2784 vnode_list_add(vp);
2785 vnode_unlock(vp);
2786 } else {
2787 vclean(vp, 0);
2788 vp->v_lflag &= ~VL_DEAD;
2789 vp->v_op = spec_vnodeop_p;
2790 vp->v_flag |= VDEVFLUSH;
2791 vnode_unlock(vp);
2792 }
2793 mount_lock(mp);
2794 continue;
2795 }
2796
2797 /* log vnodes blocking unforced unmounts */
2798 if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2799 vprint("vflush - busy vnode", vp);
2800 }
2801
2802 vnode_unlock(vp);
2803 mount_lock(mp);
2804 busy++;
2805 }
2806
2807 /* At this point the worker queue is completed */
2808 if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2809 busy = 0;
2810 reclaimed = 0;
2811 (void)vnode_iterate_reloadq(mp);
2812 first_try = false;
2813 /* returned with mount lock held */
2814 goto loop;
2815 }
2816
2817 /* if new vnodes were created in between retry the reclaim */
2818 if (vnode_iterate_reloadq(mp) != 0) {
2819 if (!(busy && ((flags & FORCECLOSE) == 0))) {
2820 first_try = false;
2821 goto loop;
2822 }
2823 }
2824 vnode_iterate_clear(mp);
2825 mount_unlock(mp);
2826 mount_iterate_unlock(mp);
2827
2828 if (busy && ((flags & FORCECLOSE) == 0)) {
2829 return EBUSY;
2830 }
2831 return 0;
2832 }
2833
2834 long num_recycledvnodes = 0;
2835 /*
2836 * Disassociate the underlying file system from a vnode.
2837 * The vnode lock is held on entry.
2838 */
2839 static void
2840 vclean(vnode_t vp, int flags)
2841 {
2842 vfs_context_t ctx = vfs_context_current();
2843 int active;
2844 int need_inactive;
2845 int already_terminating;
2846 int clflags = 0;
2847 #if NAMEDSTREAMS
2848 int is_namedstream;
2849 #endif
2850
2851 /*
2852 * Check to see if the vnode is in use.
2853 * If so we have to reference it before we clean it out
2854 * so that its count cannot fall to zero and generate a
2855 * race against ourselves to recycle it.
2856 */
2857 active = vp->v_usecount;
2858
2859 /*
2860 * just in case we missed sending a needed
2861 * VNOP_INACTIVE, we'll do it now
2862 */
2863 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
2864
2865 vp->v_lflag &= ~VL_NEEDINACTIVE;
2866
2867 /*
2868 * Prevent the vnode from being recycled or
2869 * brought into use while we clean it out.
2870 */
2871 already_terminating = (vp->v_lflag & VL_TERMINATE);
2872
2873 vp->v_lflag |= VL_TERMINATE;
2874
2875 #if NAMEDSTREAMS
2876 is_namedstream = vnode_isnamedstream(vp);
2877 #endif
2878
2879 vnode_unlock(vp);
2880
2881 OSAddAtomicLong(1, &num_recycledvnodes);
2882
2883 if (flags & DOCLOSE) {
2884 clflags |= IO_NDELAY;
2885 }
2886 if (flags & REVOKEALL) {
2887 clflags |= IO_REVOKE;
2888 }
2889
2890 #if CONFIG_MACF
2891 mac_vnode_notify_reclaim(vp);
2892 #endif
2893
2894 if (active && (flags & DOCLOSE)) {
2895 VNOP_CLOSE(vp, clflags, ctx);
2896 }
2897
2898 /*
2899 * Clean out any buffers associated with the vnode.
2900 */
2901 if (flags & DOCLOSE) {
2902 #if CONFIG_NFS_CLIENT
2903 if (vp->v_tag == VT_NFS) {
2904 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2905 } else
2906 #endif /* CONFIG_NFS_CLIENT */
2907 {
2908 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2909
2910 /*
2911 * If the vnode is still in use (by the journal for
2912 * example) we don't want to invalidate locked buffers
2913 * here. In that case, either the journal will tidy them
2914 * up, or we will deal with it when the usecount is
2915 * finally released in vnode_rele_internal.
2916 */
2917 buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
2918 }
2919 if (UBCINFOEXISTS(vp)) {
2920 /*
2921 * Clean the pages in VM.
2922 */
2923 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2924 }
2925 }
2926 if (active || need_inactive) {
2927 VNOP_INACTIVE(vp, ctx);
2928 }
2929
2930 #if NAMEDSTREAMS
2931 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2932 vnode_t pvp = vp->v_parent;
2933
2934 /* Delete the shadow stream file before we reclaim its vnode */
2935 if (vnode_isshadow(vp)) {
2936 vnode_relenamedstream(pvp, vp);
2937 }
2938
2939 /*
2940 * No more streams associated with the parent. We
2941 * have a ref on it, so its identity is stable.
2942 * If the parent is on an opaque volume, then we need to know
2943 * whether it has associated named streams.
2944 */
2945 if (vfs_authopaque(pvp->v_mount)) {
2946 vnode_lock_spin(pvp);
2947 pvp->v_lflag &= ~VL_HASSTREAMS;
2948 vnode_unlock(pvp);
2949 }
2950 }
2951 #endif
2952
2953 /*
2954 * Destroy ubc named reference
2955 * cluster_release is done on this path
2956 * along with dropping the reference on the ucred
2957 * (and in the case of forced unmount of an mmap-ed file,
2958 * the ubc reference on the vnode is dropped here too).
2959 */
2960 ubc_destroy_named(vp);
2961
2962 #if CONFIG_TRIGGERS
2963 /*
2964 * cleanup trigger info from vnode (if any)
2965 */
2966 if (vp->v_resolve) {
2967 vnode_resolver_detach(vp);
2968 }
2969 #endif
2970
2971 /*
2972 * Reclaim the vnode.
2973 */
2974 if (VNOP_RECLAIM(vp, ctx)) {
2975 panic("vclean: cannot reclaim");
2976 }
2977
2978 // make sure the name & parent ptrs get cleaned out!
2979 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
2980
2981 vnode_lock(vp);
2982
2983 /*
2984 * Remove the vnode from any mount list it might be on. It is not
2985 * safe to do this any earlier because unmount needs to wait for
2986 * any vnodes to terminate and it cannot do that if it cannot find
2987 * them.
2988 */
2989 insmntque(vp, (struct mount *)0);
2990
2991 vp->v_mount = dead_mountp;
2992 vp->v_op = dead_vnodeop_p;
2993 vp->v_tag = VT_NON;
2994 vp->v_data = NULL;
2995
2996 vp->v_lflag |= VL_DEAD;
2997 vp->v_flag &= ~VISDIRTY;
2998
2999 if (already_terminating == 0) {
3000 vp->v_lflag &= ~VL_TERMINATE;
3001 /*
3002 * Done with purge, notify sleepers of the grim news.
3003 */
3004 if (vp->v_lflag & VL_TERMWANT) {
3005 vp->v_lflag &= ~VL_TERMWANT;
3006 wakeup(&vp->v_lflag);
3007 }
3008 }
3009 }
3010
3011 /*
3012 * Eliminate all activity associated with the requested vnode
3013 * and with all vnodes aliased to the requested vnode.
3014 */
3015 int
3016 #if DIAGNOSTIC
3017 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3018 #else
3019 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3020 #endif
3021 {
3022 struct vnode *vq;
3023 int vid;
3024
3025 #if DIAGNOSTIC
3026 if ((flags & REVOKEALL) == 0) {
3027 panic("vnop_revoke");
3028 }
3029 #endif
3030
3031 if (vnode_isaliased(vp)) {
3032 /*
3033 * If a vgone (or vclean) is already in progress,
3034 * return an immediate error
3035 */
3036 if (vp->v_lflag & VL_TERMINATE) {
3037 return ENOENT;
3038 }
3039
3040 /*
3041 * Ensure that vp will not be vgone'd while we
3042 * are eliminating its aliases.
3043 */
3044 SPECHASH_LOCK();
3045 while ((vp->v_specflags & SI_ALIASED)) {
3046 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3047 if (vq->v_rdev != vp->v_rdev ||
3048 vq->v_type != vp->v_type || vp == vq) {
3049 continue;
3050 }
3051 vid = vq->v_id;
3052 SPECHASH_UNLOCK();
3053 if (vnode_getwithvid(vq, vid)) {
3054 SPECHASH_LOCK();
3055 break;
3056 }
3057 vnode_lock(vq);
3058 if (!(vq->v_lflag & VL_TERMINATE)) {
3059 vnode_reclaim_internal(vq, 1, 1, 0);
3060 }
3061 vnode_put_locked(vq);
3062 vnode_unlock(vq);
3063 SPECHASH_LOCK();
3064 break;
3065 }
3066 }
3067 SPECHASH_UNLOCK();
3068 }
3069 vnode_lock(vp);
3070 if (vp->v_lflag & VL_TERMINATE) {
3071 vnode_unlock(vp);
3072 return ENOENT;
3073 }
3074 vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3075 vnode_unlock(vp);
3076
3077 return 0;
3078 }
3079
3080 /*
3081 * Recycle an unused vnode to the front of the free list.
3082 * Release the passed interlock if the vnode will be recycled.
3083 */
3084 int
3085 vnode_recycle(struct vnode *vp)
3086 {
3087 vnode_lock_spin(vp);
3088
3089 if (vp->v_iocount || vp->v_usecount) {
3090 vp->v_lflag |= VL_MARKTERM;
3091 vnode_unlock(vp);
3092 return 0;
3093 }
3094 vnode_lock_convert(vp);
3095 vnode_reclaim_internal(vp, 1, 0, 0);
3096
3097 vnode_unlock(vp);
3098
3099 return 1;
3100 }
3101
3102 static int
3103 vnode_reload(vnode_t vp)
3104 {
3105 vnode_lock_spin(vp);
3106
3107 if ((vp->v_iocount > 1) || vp->v_usecount) {
3108 vnode_unlock(vp);
3109 return 0;
3110 }
3111 if (vp->v_iocount <= 0) {
3112 panic("vnode_reload with no iocount %d", vp->v_iocount);
3113 }
3114
3115 /* mark for release when iocount is dopped */
3116 vp->v_lflag |= VL_MARKTERM;
3117 vnode_unlock(vp);
3118
3119 return 1;
3120 }
3121
3122
3123 static void
3124 vgone(vnode_t vp, int flags)
3125 {
3126 struct vnode *vq;
3127 struct vnode *vx;
3128
3129 /*
3130 * Clean out the filesystem specific data.
3131 * vclean also takes care of removing the
3132 * vnode from any mount list it might be on
3133 */
3134 vclean(vp, flags | DOCLOSE);
3135
3136 /*
3137 * If special device, remove it from special device alias list
3138 * if it is on one.
3139 */
3140 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3141 SPECHASH_LOCK();
3142 if (*vp->v_hashchain == vp) {
3143 *vp->v_hashchain = vp->v_specnext;
3144 } else {
3145 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3146 if (vq->v_specnext != vp) {
3147 continue;
3148 }
3149 vq->v_specnext = vp->v_specnext;
3150 break;
3151 }
3152 if (vq == NULL) {
3153 panic("missing bdev");
3154 }
3155 }
3156 if (vp->v_specflags & SI_ALIASED) {
3157 vx = NULL;
3158 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3159 if (vq->v_rdev != vp->v_rdev ||
3160 vq->v_type != vp->v_type) {
3161 continue;
3162 }
3163 if (vx) {
3164 break;
3165 }
3166 vx = vq;
3167 }
3168 if (vx == NULL) {
3169 panic("missing alias");
3170 }
3171 if (vq == NULL) {
3172 vx->v_specflags &= ~SI_ALIASED;
3173 }
3174 vp->v_specflags &= ~SI_ALIASED;
3175 }
3176 SPECHASH_UNLOCK();
3177 {
3178 struct specinfo *tmp = vp->v_specinfo;
3179 vp->v_specinfo = NULL;
3180 zfree(specinfo_zone, tmp);
3181 }
3182 }
3183 }
3184
3185 /*
3186 * Lookup a vnode by device number.
3187 */
3188 int
3189 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3190 {
3191 vnode_t vp;
3192 int rc = 0;
3193 int vid;
3194
3195 loop:
3196 SPECHASH_LOCK();
3197 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3198 if (dev != vp->v_rdev || type != vp->v_type) {
3199 continue;
3200 }
3201 vid = vp->v_id;
3202 SPECHASH_UNLOCK();
3203 if (vnode_getwithvid(vp, vid)) {
3204 goto loop;
3205 }
3206 vnode_lock_spin(vp);
3207 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3208 vnode_unlock(vp);
3209 if ((*errorp = vfs_mountedon(vp)) != 0) {
3210 rc = 1;
3211 }
3212 } else {
3213 vnode_unlock(vp);
3214 }
3215 vnode_put(vp);
3216 return rc;
3217 }
3218 SPECHASH_UNLOCK();
3219 return 0;
3220 }
3221
3222 /*
3223 * Calculate the total number of references to a special device.
3224 */
3225 int
3226 vcount(vnode_t vp)
3227 {
3228 vnode_t vq, vnext;
3229 int count;
3230 int vid;
3231
3232 if (!vnode_isspec(vp)) {
3233 return vp->v_usecount - vp->v_kusecount;
3234 }
3235
3236 loop:
3237 if (!vnode_isaliased(vp)) {
3238 return vp->v_specinfo->si_opencount;
3239 }
3240 count = 0;
3241
3242 SPECHASH_LOCK();
3243 /*
3244 * Grab first vnode and its vid.
3245 */
3246 vq = *vp->v_hashchain;
3247 vid = vq ? vq->v_id : 0;
3248
3249 SPECHASH_UNLOCK();
3250
3251 while (vq) {
3252 /*
3253 * Attempt to get the vnode outside the SPECHASH lock.
3254 */
3255 if (vnode_getwithvid(vq, vid)) {
3256 goto loop;
3257 }
3258 vnode_lock(vq);
3259
3260 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3261 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3262 /*
3263 * Alias, but not in use, so flush it out.
3264 */
3265 vnode_reclaim_internal(vq, 1, 1, 0);
3266 vnode_put_locked(vq);
3267 vnode_unlock(vq);
3268 goto loop;
3269 }
3270 count += vq->v_specinfo->si_opencount;
3271 }
3272 vnode_unlock(vq);
3273
3274 SPECHASH_LOCK();
3275 /*
3276 * must do this with the reference still held on 'vq'
3277 * so that it can't be destroyed while we're poking
3278 * through v_specnext
3279 */
3280 vnext = vq->v_specnext;
3281 vid = vnext ? vnext->v_id : 0;
3282
3283 SPECHASH_UNLOCK();
3284
3285 vnode_put(vq);
3286
3287 vq = vnext;
3288 }
3289
3290 return count;
3291 }
3292
3293 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
3294
3295 /*
3296 * Print out a description of a vnode.
3297 */
3298 static const char *typename[] =
3299 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3300
3301 void
3302 vprint(const char *label, struct vnode *vp)
3303 {
3304 char sbuf[64];
3305
3306 if (label != NULL) {
3307 printf("%s: ", label);
3308 }
3309 printf("name %s type %s, usecount %d, writecount %d\n",
3310 vp->v_name, typename[vp->v_type],
3311 vp->v_usecount, vp->v_writecount);
3312 sbuf[0] = '\0';
3313 if (vp->v_flag & VROOT) {
3314 strlcat(sbuf, "|VROOT", sizeof(sbuf));
3315 }
3316 if (vp->v_flag & VTEXT) {
3317 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3318 }
3319 if (vp->v_flag & VSYSTEM) {
3320 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3321 }
3322 if (vp->v_flag & VNOFLUSH) {
3323 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3324 }
3325 if (vp->v_flag & VBWAIT) {
3326 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3327 }
3328 if (vnode_isaliased(vp)) {
3329 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3330 }
3331 if (sbuf[0] != '\0') {
3332 printf("vnode flags (%s\n", &sbuf[1]);
3333 }
3334 }
3335
3336
3337 int
3338 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3339 {
3340 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
3341 }
3342
3343 int
3344 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3345 {
3346 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
3347 }
3348
3349 /*
3350 * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3351 * vnode. It requires that there are IO counts on both the vnode and the directory vnode.
3352 *
3353 * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3354 * unlink, rmdir and rename. For these operation the MAC hook calls vn_getpath. This presents
3355 * problems where if the path can not be found from the name cache, those operations can
3356 * erroneously fail with EPERM even though the call should succeed. When removing or moving
3357 * file system objects with operations such as unlink or rename, those operations need to
3358 * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3359 * MAC hook from these operations during forced unmount operations can lead to dead
3360 * lock. This happens when the operation starts, IO counts are taken on the containing
3361 * directories and targets. Before the MAC hook is called a forced unmount from another
3362 * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3363 * After which, the MAC hook gets called and calls vn_getpath_fsenter. vn_getpath_fsenter
3364 * is called with the understanding that there is an IO count on the target. If in
3365 * build_path the directory vnode is no longer in the cache, then the parent object id via
3366 * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3367 * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3368 * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3369 * depending on which version and how it calls the vnode_get family of interfaces.
3370 *
3371 * N.B. A reasonable interface to use is vnode_getwithvid. This interface was modified to
3372 * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3373 * cause issues, but there is no guarantee that all or any file systems are doing that.
3374 *
3375 * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3376 * IO count on the directory vnode by calling build_path_with_parent.
3377 */
3378
3379 int
3380 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3381 {
3382 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3383 }
3384
3385 int
3386 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, int *len, int flags)
3387 {
3388 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3389
3390 if (flags && (flags != VN_GETPATH_FSENTER)) {
3391 if (flags & VN_GETPATH_NO_FIRMLINK) {
3392 bpflags |= BUILDPATH_NO_FIRMLINK;;
3393 }
3394 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3395 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3396 }
3397 if (flags & VN_GETPATH_NO_PROCROOT) {
3398 bpflags |= BUILDPATH_NO_PROCROOT;
3399 }
3400 }
3401
3402 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, bpflags, vfs_context_current());
3403 }
3404
3405 int
3406 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3407 {
3408 return vn_getpath_ext(vp, NULLVP, pathbuf, len, VN_GETPATH_NO_FIRMLINK);
3409 }
3410
3411 int
3412 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len, size_t *mntlen, int flags)
3413 {
3414 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3415 int local_len;
3416 int error;
3417
3418 if (*len > INT_MAX) {
3419 return EINVAL;
3420 }
3421
3422 local_len = *len;
3423
3424 if (flags && (flags != VN_GETPATH_FSENTER)) {
3425 if (flags & VN_GETPATH_NO_FIRMLINK) {
3426 bpflags |= BUILDPATH_NO_FIRMLINK;;
3427 }
3428 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3429 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3430 }
3431 if (flags & VN_GETPATH_NO_PROCROOT) {
3432 bpflags |= BUILDPATH_NO_PROCROOT;
3433 }
3434 }
3435
3436 error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len, mntlen, bpflags, vfs_context_current());
3437
3438 if (local_len >= 0 && local_len <= (int)*len) {
3439 *len = (size_t)local_len;
3440 }
3441
3442 return error;
3443 }
3444
3445 int
3446 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
3447 {
3448 return ubc_cs_getcdhash(vp, offset, cdhash);
3449 }
3450
3451
3452 static char *extension_table = NULL;
3453 static int nexts;
3454 static int max_ext_width;
3455
3456 static int
3457 extension_cmp(const void *a, const void *b)
3458 {
3459 return (int)(strlen((const char *)a) - strlen((const char *)b));
3460 }
3461
3462
3463 //
3464 // This is the api LaunchServices uses to inform the kernel
3465 // the list of package extensions to ignore.
3466 //
3467 // Internally we keep the list sorted by the length of the
3468 // the extension (from longest to shortest). We sort the
3469 // list of extensions so that we can speed up our searches
3470 // when comparing file names -- we only compare extensions
3471 // that could possibly fit into the file name, not all of
3472 // them (i.e. a short 8 character name can't have an 8
3473 // character extension).
3474 //
3475 extern lck_mtx_t *pkg_extensions_lck;
3476
3477 __private_extern__ int
3478 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3479 {
3480 char *new_exts, *old_exts;
3481 int old_nentries = 0, old_maxwidth = 0;
3482 int error;
3483
3484 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3485 return EINVAL;
3486 }
3487
3488
3489 // allocate one byte extra so we can guarantee null termination
3490 new_exts = kheap_alloc(KHEAP_DATA_BUFFERS, (nentries * maxwidth) + 1,
3491 Z_WAITOK);
3492 if (new_exts == NULL) {
3493 return ENOMEM;
3494 }
3495
3496 error = copyin(data, new_exts, nentries * maxwidth);
3497 if (error) {
3498 kheap_free(KHEAP_DATA_BUFFERS, new_exts, (nentries * maxwidth) + 1);
3499 return error;
3500 }
3501
3502 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3503
3504 qsort(new_exts, nentries, maxwidth, extension_cmp);
3505
3506 lck_mtx_lock(pkg_extensions_lck);
3507
3508 old_exts = extension_table;
3509 old_nentries = nexts;
3510 old_maxwidth = max_ext_width;
3511 extension_table = new_exts;
3512 nexts = nentries;
3513 max_ext_width = maxwidth;
3514
3515 lck_mtx_unlock(pkg_extensions_lck);
3516
3517 kheap_free(KHEAP_DATA_BUFFERS, old_exts,
3518 (old_nentries * old_maxwidth) + 1);
3519
3520 return 0;
3521 }
3522
3523
3524 int
3525 is_package_name(const char *name, int len)
3526 {
3527 int i;
3528 size_t extlen;
3529 const char *ptr, *name_ext;
3530
3531 // if the name is less than 3 bytes it can't be of the
3532 // form A.B and if it begins with a "." then it is also
3533 // not a package.
3534 if (len <= 3 || name[0] == '.') {
3535 return 0;
3536 }
3537
3538 name_ext = NULL;
3539 for (ptr = name; *ptr != '\0'; ptr++) {
3540 if (*ptr == '.') {
3541 name_ext = ptr;
3542 }
3543 }
3544
3545 // if there is no "." extension, it can't match
3546 if (name_ext == NULL) {
3547 return 0;
3548 }
3549
3550 // advance over the "."
3551 name_ext++;
3552
3553 lck_mtx_lock(pkg_extensions_lck);
3554
3555 // now iterate over all the extensions to see if any match
3556 ptr = &extension_table[0];
3557 for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3558 extlen = strlen(ptr);
3559 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3560 // aha, a match!
3561 lck_mtx_unlock(pkg_extensions_lck);
3562 return 1;
3563 }
3564 }
3565
3566 lck_mtx_unlock(pkg_extensions_lck);
3567
3568 // if we get here, no extension matched
3569 return 0;
3570 }
3571
3572 int
3573 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3574 {
3575 char *ptr, *end;
3576 int comp = 0;
3577
3578 if (pathlen < 0) {
3579 return EINVAL;
3580 }
3581
3582 *component = -1;
3583 if (*path != '/') {
3584 return EINVAL;
3585 }
3586
3587 end = path + 1;
3588 while (end < path + pathlen && *end != '\0') {
3589 while (end < path + pathlen && *end == '/' && *end != '\0') {
3590 end++;
3591 }
3592
3593 ptr = end;
3594
3595 while (end < path + pathlen && *end != '/' && *end != '\0') {
3596 end++;
3597 }
3598
3599 if (end > path + pathlen) {
3600 // hmm, string wasn't null terminated
3601 return EINVAL;
3602 }
3603
3604 *end = '\0';
3605 if (is_package_name(ptr, (int)(end - ptr))) {
3606 *component = comp;
3607 break;
3608 }
3609
3610 end++;
3611 comp++;
3612 }
3613
3614 return 0;
3615 }
3616
3617 /*
3618 * Determine if a name is inappropriate for a searchfs query.
3619 * This list consists of /System currently.
3620 */
3621
3622 int
3623 vn_searchfs_inappropriate_name(const char *name, int len)
3624 {
3625 const char *bad_names[] = { "System" };
3626 int bad_len[] = { 6 };
3627 int i;
3628
3629 if (len < 0) {
3630 return EINVAL;
3631 }
3632
3633 for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
3634 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
3635 return 1;
3636 }
3637 }
3638
3639 // if we get here, no name matched
3640 return 0;
3641 }
3642
3643 /*
3644 * Top level filesystem related information gathering.
3645 */
3646 extern unsigned int vfs_nummntops;
3647
3648 /*
3649 * The VFS_NUMMNTOPS shouldn't be at name[1] since
3650 * is a VFS generic variable. Since we no longer support
3651 * VT_UFS, we reserve its value to support this sysctl node.
3652 *
3653 * It should have been:
3654 * name[0]: VFS_GENERIC
3655 * name[1]: VFS_NUMMNTOPS
3656 */
3657 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
3658 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3659 &vfs_nummntops, 0, "");
3660
3661 int
3662 vfs_sysctl(int *name __unused, u_int namelen __unused,
3663 user_addr_t oldp __unused, size_t *oldlenp __unused,
3664 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
3665
3666 int
3667 vfs_sysctl(int *name __unused, u_int namelen __unused,
3668 user_addr_t oldp __unused, size_t *oldlenp __unused,
3669 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
3670 {
3671 return EINVAL;
3672 }
3673
3674
3675 //
3676 // The following code disallows specific sysctl's that came through
3677 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
3678 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
3679 // through vfs_sysctl_node() because it passes the user's oldp pointer
3680 // directly to the file system which (for these selectors) casts it
3681 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
3682 // which jumps through an arbitrary function pointer. When called
3683 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
3684 // and so it's safe.
3685 //
3686 // Unfortunately we have to pull in definitions from AFP and SMB and
3687 // perform explicit name checks on the file system to determine if
3688 // these selectors are being used.
3689 //
3690
3691 #define AFPFS_VFS_CTL_GETID 0x00020001
3692 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
3693 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
3694
3695 #define SMBFS_SYSCTL_REMOUNT 1
3696 #define SMBFS_SYSCTL_REMOUNT_INFO 2
3697 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
3698
3699
3700 static int
3701 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
3702 {
3703 switch (selector_name) {
3704 case VFS_CTL_QUERY:
3705 case VFS_CTL_TIMEO:
3706 case VFS_CTL_NOLOCKS:
3707 case VFS_CTL_NSTATUS:
3708 case VFS_CTL_SADDR:
3709 case VFS_CTL_DISC:
3710 case VFS_CTL_SERVERINFO:
3711 return 1;
3712
3713 default:
3714 break;
3715 }
3716
3717 // the more complicated check for some of SMB's special values
3718 if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
3719 switch (selector_name) {
3720 case SMBFS_SYSCTL_REMOUNT:
3721 case SMBFS_SYSCTL_REMOUNT_INFO:
3722 case SMBFS_SYSCTL_GET_SERVER_SHARE:
3723 return 1;
3724 }
3725 } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
3726 switch (selector_name) {
3727 case AFPFS_VFS_CTL_GETID:
3728 case AFPFS_VFS_CTL_NETCHANGE:
3729 case AFPFS_VFS_CTL_VOLCHANGE:
3730 return 1;
3731 }
3732 }
3733
3734 //
3735 // If we get here we passed all the checks so the selector is ok
3736 //
3737 return 0;
3738 }
3739
3740
3741 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
3742 {
3743 int *name, namelen;
3744 struct vfstable *vfsp;
3745 int error;
3746 int fstypenum;
3747
3748 fstypenum = oidp->oid_number;
3749 name = arg1;
3750 namelen = arg2;
3751
3752 /* all sysctl names at this level should have at least one name slot for the FS */
3753 if (namelen < 1) {
3754 return EISDIR; /* overloaded */
3755 }
3756 mount_list_lock();
3757 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3758 if (vfsp->vfc_typenum == fstypenum) {
3759 vfsp->vfc_refcount++;
3760 break;
3761 }
3762 }
3763 mount_list_unlock();
3764
3765 if (vfsp == NULL) {
3766 return ENOTSUP;
3767 }
3768
3769 if (is_bad_sysctl_name(vfsp, name[0])) {
3770 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]);
3771 return EPERM;
3772 }
3773
3774 error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen, req->oldptr, &req->oldlen, req->newptr, req->newlen, vfs_context_current());
3775
3776 mount_list_lock();
3777 vfsp->vfc_refcount--;
3778 mount_list_unlock();
3779
3780 return error;
3781 }
3782
3783 /*
3784 * Check to see if a filesystem is mounted on a block device.
3785 */
3786 int
3787 vfs_mountedon(struct vnode *vp)
3788 {
3789 struct vnode *vq;
3790 int error = 0;
3791
3792 SPECHASH_LOCK();
3793 if (vp->v_specflags & SI_MOUNTEDON) {
3794 error = EBUSY;
3795 goto out;
3796 }
3797 if (vp->v_specflags & SI_ALIASED) {
3798 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3799 if (vq->v_rdev != vp->v_rdev ||
3800 vq->v_type != vp->v_type) {
3801 continue;
3802 }
3803 if (vq->v_specflags & SI_MOUNTEDON) {
3804 error = EBUSY;
3805 break;
3806 }
3807 }
3808 }
3809 out:
3810 SPECHASH_UNLOCK();
3811 return error;
3812 }
3813
3814 struct unmount_info {
3815 int u_errs; // Total failed unmounts
3816 int u_busy; // EBUSY failed unmounts
3817 };
3818
3819 static int
3820 unmount_callback(mount_t mp, void *arg)
3821 {
3822 int error;
3823 char *mntname;
3824 struct unmount_info *uip = arg;
3825
3826 mount_ref(mp, 0);
3827 mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount()
3828
3829 mntname = zalloc(ZV_NAMEI);
3830 strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
3831
3832 error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
3833 if (error) {
3834 uip->u_errs++;
3835 printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
3836 if (error == EBUSY) {
3837 uip->u_busy++;
3838 }
3839 }
3840 if (mntname) {
3841 zfree(ZV_NAMEI, mntname);
3842 }
3843
3844 return VFS_RETURNED;
3845 }
3846
3847 /*
3848 * Unmount all filesystems. The list is traversed in reverse order
3849 * of mounting to avoid dependencies.
3850 * Busy mounts are retried.
3851 */
3852 __private_extern__ void
3853 vfs_unmountall(void)
3854 {
3855 int mounts, sec = 1;
3856 struct unmount_info ui;
3857
3858 vfs_unmountall_started = 1;
3859
3860 retry:
3861 ui.u_errs = ui.u_busy = 0;
3862 vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
3863 mounts = mount_getvfscnt();
3864 if (mounts == 0) {
3865 return;
3866 }
3867
3868 if (ui.u_busy > 0) { // Busy mounts - wait & retry
3869 tsleep(&nummounts, PVFS, "busy mount", sec * hz);
3870 sec *= 2;
3871 if (sec <= 32) {
3872 goto retry;
3873 }
3874 printf("Unmounting timed out\n");
3875 } else if (ui.u_errs < mounts) {
3876 // If the vfs_iterate missed mounts in progress - wait a bit
3877 tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
3878 }
3879 }
3880
3881 /*
3882 * This routine is called from vnode_pager_deallocate out of the VM
3883 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
3884 * on a vnode that has a UBCINFO
3885 */
3886 __private_extern__ void
3887 vnode_pager_vrele(vnode_t vp)
3888 {
3889 struct ubc_info *uip;
3890
3891 vnode_lock_spin(vp);
3892
3893 vp->v_lflag &= ~VNAMED_UBC;
3894 if (vp->v_usecount != 0) {
3895 /*
3896 * At the eleventh hour, just before the ubcinfo is
3897 * destroyed, ensure the ubc-specific v_usecount
3898 * reference has gone. We use v_usecount != 0 as a hint;
3899 * ubc_unmap() does nothing if there's no mapping.
3900 *
3901 * This case is caused by coming here via forced unmount,
3902 * versus the usual vm_object_deallocate() path.
3903 * In the forced unmount case, ubc_destroy_named()
3904 * releases the pager before memory_object_last_unmap()
3905 * can be called.
3906 */
3907 vnode_unlock(vp);
3908 ubc_unmap(vp);
3909 vnode_lock_spin(vp);
3910 }
3911
3912 uip = vp->v_ubcinfo;
3913 vp->v_ubcinfo = UBC_INFO_NULL;
3914
3915 vnode_unlock(vp);
3916
3917 ubc_info_deallocate(uip);
3918 }
3919
3920
3921 #include <sys/disk.h>
3922
3923 u_int32_t rootunit = (u_int32_t)-1;
3924
3925 #if CONFIG_IOSCHED
3926 extern int lowpri_throttle_enabled;
3927 extern int iosched_enabled;
3928 #endif
3929
3930 errno_t
3931 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
3932 {
3933 int error;
3934 off_t readblockcnt = 0;
3935 off_t writeblockcnt = 0;
3936 off_t readmaxcnt = 0;
3937 off_t writemaxcnt = 0;
3938 off_t readsegcnt = 0;
3939 off_t writesegcnt = 0;
3940 off_t readsegsize = 0;
3941 off_t writesegsize = 0;
3942 off_t alignment = 0;
3943 u_int32_t minsaturationbytecount = 0;
3944 u_int32_t ioqueue_depth = 0;
3945 u_int32_t blksize;
3946 u_int64_t temp;
3947 u_int32_t features;
3948 u_int64_t location = 0;
3949 vfs_context_t ctx = vfs_context_current();
3950 dk_corestorage_info_t cs_info;
3951 boolean_t cs_present = FALSE;;
3952 int isssd = 0;
3953 int isvirtual = 0;
3954
3955
3956 VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
3957 /*
3958 * as a reasonable approximation, only use the lowest bit of the mask
3959 * to generate a disk unit number
3960 */
3961 mp->mnt_devbsdunit = num_trailing_0(mp->mnt_throttle_mask);
3962
3963 if (devvp == rootvp) {
3964 rootunit = mp->mnt_devbsdunit;
3965 }
3966
3967 if (mp->mnt_devbsdunit == rootunit) {
3968 /*
3969 * this mount point exists on the same device as the root
3970 * partition, so it comes under the hard throttle control...
3971 * this is true even for the root mount point itself
3972 */
3973 mp->mnt_kern_flag |= MNTK_ROOTDEV;
3974 }
3975 /*
3976 * force the spec device to re-cache
3977 * the underlying block size in case
3978 * the filesystem overrode the initial value
3979 */
3980 set_fsblocksize(devvp);
3981
3982
3983 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
3984 (caddr_t)&blksize, 0, ctx))) {
3985 return error;
3986 }
3987
3988 mp->mnt_devblocksize = blksize;
3989
3990 /*
3991 * set the maximum possible I/O size
3992 * this may get clipped to a smaller value
3993 * based on which constraints are being advertised
3994 * and if those advertised constraints result in a smaller
3995 * limit for a given I/O
3996 */
3997 mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
3998 mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
3999
4000 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4001 if (isvirtual) {
4002 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4003 mp->mnt_flag |= MNT_REMOVABLE;
4004 }
4005 }
4006 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4007 if (isssd) {
4008 mp->mnt_kern_flag |= MNTK_SSD;
4009 }
4010 }
4011 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4012 (caddr_t)&features, 0, ctx))) {
4013 return error;
4014 }
4015
4016 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4017 (caddr_t)&readblockcnt, 0, ctx))) {
4018 return error;
4019 }
4020
4021 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4022 (caddr_t)&writeblockcnt, 0, ctx))) {
4023 return error;
4024 }
4025
4026 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4027 (caddr_t)&readmaxcnt, 0, ctx))) {
4028 return error;
4029 }
4030
4031 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4032 (caddr_t)&writemaxcnt, 0, ctx))) {
4033 return error;
4034 }
4035
4036 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4037 (caddr_t)&readsegcnt, 0, ctx))) {
4038 return error;
4039 }
4040
4041 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4042 (caddr_t)&writesegcnt, 0, ctx))) {
4043 return error;
4044 }
4045
4046 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4047 (caddr_t)&readsegsize, 0, ctx))) {
4048 return error;
4049 }
4050
4051 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4052 (caddr_t)&writesegsize, 0, ctx))) {
4053 return error;
4054 }
4055
4056 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4057 (caddr_t)&alignment, 0, ctx))) {
4058 return error;
4059 }
4060
4061 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4062 (caddr_t)&ioqueue_depth, 0, ctx))) {
4063 return error;
4064 }
4065
4066 if (readmaxcnt) {
4067 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4068 }
4069
4070 if (readblockcnt) {
4071 temp = readblockcnt * blksize;
4072 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4073
4074 if (temp < mp->mnt_maxreadcnt) {
4075 mp->mnt_maxreadcnt = (u_int32_t)temp;
4076 }
4077 }
4078
4079 if (writemaxcnt) {
4080 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4081 }
4082
4083 if (writeblockcnt) {
4084 temp = writeblockcnt * blksize;
4085 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4086
4087 if (temp < mp->mnt_maxwritecnt) {
4088 mp->mnt_maxwritecnt = (u_int32_t)temp;
4089 }
4090 }
4091
4092 if (readsegcnt) {
4093 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4094 } else {
4095 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4096
4097 if (temp > UINT16_MAX) {
4098 temp = UINT16_MAX;
4099 }
4100 }
4101 mp->mnt_segreadcnt = (u_int16_t)temp;
4102
4103 if (writesegcnt) {
4104 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4105 } else {
4106 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4107
4108 if (temp > UINT16_MAX) {
4109 temp = UINT16_MAX;
4110 }
4111 }
4112 mp->mnt_segwritecnt = (u_int16_t)temp;
4113
4114 if (readsegsize) {
4115 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4116 } else {
4117 temp = mp->mnt_maxreadcnt;
4118 }
4119 mp->mnt_maxsegreadsize = (u_int32_t)temp;
4120
4121 if (writesegsize) {
4122 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4123 } else {
4124 temp = mp->mnt_maxwritecnt;
4125 }
4126 mp->mnt_maxsegwritesize = (u_int32_t)temp;
4127
4128 if (alignment) {
4129 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4130 } else {
4131 temp = 0;
4132 }
4133 mp->mnt_alignmentmask = (uint32_t)temp;
4134
4135
4136 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4137 temp = ioqueue_depth;
4138 } else {
4139 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4140 }
4141
4142 mp->mnt_ioqueue_depth = (uint32_t)temp;
4143 mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4144
4145 if (mp->mnt_ioscale > 1) {
4146 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4147 }
4148
4149 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4150 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4151 }
4152
4153 if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4154 mp->mnt_minsaturationbytecount = minsaturationbytecount;
4155 } else {
4156 mp->mnt_minsaturationbytecount = 0;
4157 }
4158
4159 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4160 cs_present = TRUE;
4161 }
4162
4163 if (features & DK_FEATURE_UNMAP) {
4164 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4165
4166 if (cs_present == TRUE) {
4167 mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4168 }
4169 }
4170 if (cs_present == TRUE) {
4171 /*
4172 * for now we'll use the following test as a proxy for
4173 * the underlying drive being FUSION in nature
4174 */
4175 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4176 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4177 }
4178 } else {
4179 /* Check for APFS Fusion */
4180 dk_apfs_flavour_t flavour;
4181 if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4182 (flavour == DK_APFS_FUSION)) {
4183 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4184 }
4185 }
4186
4187 if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4188 if (location & DK_LOCATION_EXTERNAL) {
4189 mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4190 mp->mnt_flag |= MNT_REMOVABLE;
4191 }
4192 }
4193
4194 #if CONFIG_IOSCHED
4195 if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4196 mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4197 throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4198 }
4199 #endif /* CONFIG_IOSCHED */
4200 return error;
4201 }
4202
4203 static struct klist fs_klist;
4204 lck_grp_t *fs_klist_lck_grp;
4205 lck_mtx_t *fs_klist_lock;
4206
4207 void
4208 vfs_event_init(void)
4209 {
4210 klist_init(&fs_klist);
4211 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
4212 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
4213 }
4214
4215 void
4216 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4217 {
4218 if (event == VQ_DEAD || event == VQ_NOTRESP) {
4219 struct mount *mp = vfs_getvfs(fsid);
4220 if (mp) {
4221 mount_lock_spin(mp);
4222 if (data) {
4223 mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding
4224 } else {
4225 mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding
4226 }
4227 mount_unlock(mp);
4228 }
4229 }
4230
4231 lck_mtx_lock(fs_klist_lock);
4232 KNOTE(&fs_klist, event);
4233 lck_mtx_unlock(fs_klist_lock);
4234 }
4235
4236 /*
4237 * return the number of mounted filesystems.
4238 */
4239 static int
4240 sysctl_vfs_getvfscnt(void)
4241 {
4242 return mount_getvfscnt();
4243 }
4244
4245
4246 static int
4247 mount_getvfscnt(void)
4248 {
4249 int ret;
4250
4251 mount_list_lock();
4252 ret = nummounts;
4253 mount_list_unlock();
4254 return ret;
4255 }
4256
4257
4258
4259 static int
4260 mount_fillfsids(fsid_t *fsidlst, int count)
4261 {
4262 struct mount *mp;
4263 int actual = 0;
4264
4265 actual = 0;
4266 mount_list_lock();
4267 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4268 if (actual < count) {
4269 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4270 actual++;
4271 }
4272 }
4273 mount_list_unlock();
4274 return actual;
4275 }
4276
4277 /*
4278 * fill in the array of fsid_t's up to a max of 'count', the actual
4279 * number filled in will be set in '*actual'. If there are more fsid_t's
4280 * than room in fsidlst then ENOMEM will be returned and '*actual' will
4281 * have the actual count.
4282 * having *actual filled out even in the error case is depended upon.
4283 */
4284 static int
4285 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4286 {
4287 struct mount *mp;
4288
4289 *actual = 0;
4290 mount_list_lock();
4291 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4292 (*actual)++;
4293 if (*actual <= count) {
4294 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4295 }
4296 }
4297 mount_list_unlock();
4298 return *actual <= count ? 0 : ENOMEM;
4299 }
4300
4301 static int
4302 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4303 __unused int arg2, struct sysctl_req *req)
4304 {
4305 unsigned long actual;
4306 int error;
4307 size_t space;
4308 fsid_t *fsidlst;
4309
4310 /* This is a readonly node. */
4311 if (req->newptr != USER_ADDR_NULL) {
4312 return EPERM;
4313 }
4314
4315 /* they are querying us so just return the space required. */
4316 if (req->oldptr == USER_ADDR_NULL) {
4317 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4318 return 0;
4319 }
4320 again:
4321 /*
4322 * Retrieve an accurate count of the amount of space required to copy
4323 * out all the fsids in the system.
4324 */
4325 space = req->oldlen;
4326 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4327
4328 /* they didn't give us enough space. */
4329 if (space < req->oldlen) {
4330 return ENOMEM;
4331 }
4332
4333 fsidlst = kheap_alloc(KHEAP_TEMP, req->oldlen, Z_WAITOK | Z_ZERO);
4334 if (fsidlst == NULL) {
4335 return ENOMEM;
4336 }
4337
4338 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4339 &actual);
4340 /*
4341 * If we get back ENOMEM, then another mount has been added while we
4342 * slept in malloc above. If this is the case then try again.
4343 */
4344 if (error == ENOMEM) {
4345 kheap_free(KHEAP_TEMP, fsidlst, req->oldlen);
4346 req->oldlen = space;
4347 goto again;
4348 }
4349 if (error == 0) {
4350 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4351 }
4352 kheap_free(KHEAP_TEMP, fsidlst, req->oldlen);
4353 return error;
4354 }
4355
4356 /*
4357 * Do a sysctl by fsid.
4358 */
4359 static int
4360 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4361 struct sysctl_req *req)
4362 {
4363 union union_vfsidctl vc;
4364 struct mount *mp;
4365 struct vfsstatfs *sp;
4366 int *name, namelen;
4367 int flags = 0;
4368 int error = 0, gotref = 0;
4369 vfs_context_t ctx = vfs_context_current();
4370 proc_t p = req->p; /* XXX req->p != current_proc()? */
4371 boolean_t is_64_bit;
4372 union {
4373 struct statfs64 sfs64;
4374 struct user64_statfs osfs64;
4375 struct user32_statfs osfs32;
4376 } *sfsbuf;
4377
4378 if (req->newptr == USER_ADDR_NULL) {
4379 error = EINVAL;
4380 goto out;
4381 }
4382
4383 name = arg1;
4384 namelen = arg2;
4385 is_64_bit = proc_is64bit(p);
4386
4387 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4388 if (error) {
4389 goto out;
4390 }
4391 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4392 error = EINVAL;
4393 goto out;
4394 }
4395 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4396 if (mp == NULL) {
4397 error = ENOENT;
4398 goto out;
4399 }
4400 gotref = 1;
4401 /* reset so that the fs specific code can fetch it. */
4402 req->newidx = 0;
4403 /*
4404 * Note if this is a VFS_CTL then we pass the actual sysctl req
4405 * in for "oldp" so that the lower layer can DTRT and use the
4406 * SYSCTL_IN/OUT routines.
4407 */
4408 if (mp->mnt_op->vfs_sysctl != NULL) {
4409 if (is_64_bit) {
4410 if (vfs_64bitready(mp)) {
4411 error = mp->mnt_op->vfs_sysctl(name, namelen,
4412 CAST_USER_ADDR_T(req),
4413 NULL, USER_ADDR_NULL, 0,
4414 ctx);
4415 } else {
4416 error = ENOTSUP;
4417 }
4418 } else {
4419 error = mp->mnt_op->vfs_sysctl(name, namelen,
4420 CAST_USER_ADDR_T(req),
4421 NULL, USER_ADDR_NULL, 0,
4422 ctx);
4423 }
4424 if (error != ENOTSUP) {
4425 goto out;
4426 }
4427 }
4428 switch (name[0]) {
4429 case VFS_CTL_UMOUNT:
4430 #if CONFIG_MACF
4431 error = mac_mount_check_umount(ctx, mp);
4432 if (error != 0) {
4433 goto out;
4434 }
4435 #endif
4436 req->newidx = 0;
4437 if (is_64_bit) {
4438 req->newptr = vc.vc64.vc_ptr;
4439 req->newlen = (size_t)vc.vc64.vc_len;
4440 } else {
4441 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4442 req->newlen = vc.vc32.vc_len;
4443 }
4444 error = SYSCTL_IN(req, &flags, sizeof(flags));
4445 if (error) {
4446 break;
4447 }
4448
4449 mount_ref(mp, 0);
4450 mount_iterdrop(mp);
4451 gotref = 0;
4452 /* safedounmount consumes a ref */
4453 error = safedounmount(mp, flags, ctx);
4454 break;
4455 case VFS_CTL_OSTATFS:
4456 case VFS_CTL_STATFS64:
4457 #if CONFIG_MACF
4458 error = mac_mount_check_stat(ctx, mp);
4459 if (error != 0) {
4460 break;
4461 }
4462 #endif
4463 req->newidx = 0;
4464 if (is_64_bit) {
4465 req->newptr = vc.vc64.vc_ptr;
4466 req->newlen = (size_t)vc.vc64.vc_len;
4467 } else {
4468 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4469 req->newlen = vc.vc32.vc_len;
4470 }
4471 error = SYSCTL_IN(req, &flags, sizeof(flags));
4472 if (error) {
4473 break;
4474 }
4475 sp = &mp->mnt_vfsstat;
4476 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
4477 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
4478 goto out;
4479 }
4480
4481 sfsbuf = kheap_alloc(KHEAP_TEMP, sizeof(*sfsbuf), Z_WAITOK);
4482
4483 if (name[0] == VFS_CTL_STATFS64) {
4484 struct statfs64 *sfs = &sfsbuf->sfs64;
4485
4486 vfs_get_statfs64(mp, sfs);
4487 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4488 } else if (is_64_bit) {
4489 struct user64_statfs *sfs = &sfsbuf->osfs64;
4490
4491 bzero(sfs, sizeof(*sfs));
4492 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4493 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4494 sfs->f_bsize = (user64_long_t)sp->f_bsize;
4495 sfs->f_iosize = (user64_long_t)sp->f_iosize;
4496 sfs->f_blocks = (user64_long_t)sp->f_blocks;
4497 sfs->f_bfree = (user64_long_t)sp->f_bfree;
4498 sfs->f_bavail = (user64_long_t)sp->f_bavail;
4499 sfs->f_files = (user64_long_t)sp->f_files;
4500 sfs->f_ffree = (user64_long_t)sp->f_ffree;
4501 sfs->f_fsid = sp->f_fsid;
4502 sfs->f_owner = sp->f_owner;
4503 #ifdef CONFIG_NFS_CLIENT
4504 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4505 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4506 } else
4507 #endif /* CONFIG_NFS_CLIENT */
4508 {
4509 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4510 }
4511 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4512 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4513
4514 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4515 } else {
4516 struct user32_statfs *sfs = &sfsbuf->osfs32;
4517 long temp;
4518
4519 bzero(sfs, sizeof(*sfs));
4520 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4521 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4522
4523 /*
4524 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
4525 * have to fudge the numbers here in that case. We inflate the blocksize in order
4526 * to reflect the filesystem size as best we can.
4527 */
4528 if (sp->f_blocks > INT_MAX) {
4529 int shift;
4530
4531 /*
4532 * Work out how far we have to shift the block count down to make it fit.
4533 * Note that it's possible to have to shift so far that the resulting
4534 * blocksize would be unreportably large. At that point, we will clip
4535 * any values that don't fit.
4536 *
4537 * For safety's sake, we also ensure that f_iosize is never reported as
4538 * being smaller than f_bsize.
4539 */
4540 for (shift = 0; shift < 32; shift++) {
4541 if ((sp->f_blocks >> shift) <= INT_MAX) {
4542 break;
4543 }
4544 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
4545 break;
4546 }
4547 }
4548 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
4549 sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
4550 sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
4551 sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
4552 #undef __SHIFT_OR_CLIP
4553 sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
4554 temp = lmax(sp->f_iosize, sp->f_bsize);
4555 if (temp > INT32_MAX) {
4556 error = EINVAL;
4557 kheap_free(KHEAP_TEMP, sfsbuf, sizeof(*sfsbuf));
4558 goto out;
4559 }
4560 sfs->f_iosize = (user32_long_t)temp;
4561 } else {
4562 sfs->f_bsize = (user32_long_t)sp->f_bsize;
4563 sfs->f_iosize = (user32_long_t)sp->f_iosize;
4564 sfs->f_blocks = (user32_long_t)sp->f_blocks;
4565 sfs->f_bfree = (user32_long_t)sp->f_bfree;
4566 sfs->f_bavail = (user32_long_t)sp->f_bavail;
4567 }
4568 sfs->f_files = (user32_long_t)sp->f_files;
4569 sfs->f_ffree = (user32_long_t)sp->f_ffree;
4570 sfs->f_fsid = sp->f_fsid;
4571 sfs->f_owner = sp->f_owner;
4572
4573 #ifdef CONFIG_NFS_CLIENT
4574 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4575 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4576 } else
4577 #endif /* CONFIG_NFS_CLIENT */
4578 {
4579 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4580 }
4581 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4582 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4583
4584 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4585 }
4586 kheap_free(KHEAP_TEMP, sfsbuf, sizeof(*sfsbuf));
4587 break;
4588 default:
4589 error = ENOTSUP;
4590 goto out;
4591 }
4592 out:
4593 if (gotref != 0) {
4594 mount_iterdrop(mp);
4595 }
4596 return error;
4597 }
4598
4599 static int filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
4600 static void filt_fsdetach(struct knote *kn);
4601 static int filt_fsevent(struct knote *kn, long hint);
4602 static int filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
4603 static int filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
4604 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
4605 .f_attach = filt_fsattach,
4606 .f_detach = filt_fsdetach,
4607 .f_event = filt_fsevent,
4608 .f_touch = filt_fstouch,
4609 .f_process = filt_fsprocess,
4610 };
4611
4612 static int
4613 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
4614 {
4615 kn->kn_flags |= EV_CLEAR; /* automatic */
4616 kn->kn_sdata = 0; /* incoming data is ignored */
4617
4618 lck_mtx_lock(fs_klist_lock);
4619 KNOTE_ATTACH(&fs_klist, kn);
4620 lck_mtx_unlock(fs_klist_lock);
4621
4622 /*
4623 * filter only sees future events,
4624 * so it can't be fired already.
4625 */
4626 return 0;
4627 }
4628
4629 static void
4630 filt_fsdetach(struct knote *kn)
4631 {
4632 lck_mtx_lock(fs_klist_lock);
4633 KNOTE_DETACH(&fs_klist, kn);
4634 lck_mtx_unlock(fs_klist_lock);
4635 }
4636
4637 static int
4638 filt_fsevent(struct knote *kn, long hint)
4639 {
4640 /*
4641 * Backwards compatibility:
4642 * Other filters would do nothing if kn->kn_sfflags == 0
4643 */
4644
4645 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
4646 kn->kn_fflags |= hint;
4647 }
4648
4649 return kn->kn_fflags != 0;
4650 }
4651
4652 static int
4653 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
4654 {
4655 int res;
4656
4657 lck_mtx_lock(fs_klist_lock);
4658
4659 kn->kn_sfflags = kev->fflags;
4660
4661 /*
4662 * the above filter function sets bits even if nobody is looking for them.
4663 * Just preserve those bits even in the new mask is more selective
4664 * than before.
4665 *
4666 * For compatibility with previous implementations, we leave kn_fflags
4667 * as they were before.
4668 */
4669 //if (kn->kn_sfflags)
4670 // kn->kn_fflags &= kn->kn_sfflags;
4671 res = (kn->kn_fflags != 0);
4672
4673 lck_mtx_unlock(fs_klist_lock);
4674
4675 return res;
4676 }
4677
4678 static int
4679 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
4680 {
4681 int res = 0;
4682
4683 lck_mtx_lock(fs_klist_lock);
4684 if (kn->kn_fflags) {
4685 knote_fill_kevent(kn, kev, 0);
4686 res = 1;
4687 }
4688 lck_mtx_unlock(fs_klist_lock);
4689 return res;
4690 }
4691
4692 static int
4693 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
4694 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4695 {
4696 int out, error;
4697 pid_t pid;
4698 proc_t p;
4699
4700 /* We need a pid. */
4701 if (req->newptr == USER_ADDR_NULL) {
4702 return EINVAL;
4703 }
4704
4705 error = SYSCTL_IN(req, &pid, sizeof(pid));
4706 if (error) {
4707 return error;
4708 }
4709
4710 p = proc_find(pid < 0 ? -pid : pid);
4711 if (p == NULL) {
4712 return ESRCH;
4713 }
4714
4715 /*
4716 * Fetching the value is ok, but we only fetch if the old
4717 * pointer is given.
4718 */
4719 if (req->oldptr != USER_ADDR_NULL) {
4720 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
4721 proc_rele(p);
4722 error = SYSCTL_OUT(req, &out, sizeof(out));
4723 return error;
4724 }
4725
4726 /* cansignal offers us enough security. */
4727 if (p != req->p && proc_suser(req->p) != 0) {
4728 proc_rele(p);
4729 return EPERM;
4730 }
4731
4732 if (pid < 0) {
4733 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
4734 } else {
4735 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
4736 }
4737 proc_rele(p);
4738
4739 return 0;
4740 }
4741
4742 static int
4743 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
4744 {
4745 int *name, namelen;
4746 struct vfstable *vfsp;
4747 struct vfsconf vfsc = {};
4748
4749 (void)oidp;
4750 name = arg1;
4751 namelen = arg2;
4752
4753 if (namelen < 1) {
4754 return EISDIR;
4755 } else if (namelen > 1) {
4756 return ENOTDIR;
4757 }
4758
4759 mount_list_lock();
4760 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4761 if (vfsp->vfc_typenum == name[0]) {
4762 break;
4763 }
4764 }
4765
4766 if (vfsp == NULL) {
4767 mount_list_unlock();
4768 return ENOTSUP;
4769 }
4770
4771 vfsc.vfc_reserved1 = 0;
4772 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
4773 vfsc.vfc_typenum = vfsp->vfc_typenum;
4774 vfsc.vfc_refcount = vfsp->vfc_refcount;
4775 vfsc.vfc_flags = vfsp->vfc_flags;
4776 vfsc.vfc_reserved2 = 0;
4777 vfsc.vfc_reserved3 = 0;
4778
4779 mount_list_unlock();
4780 return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
4781 }
4782
4783 /* the vfs.generic. branch. */
4784 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
4785 /* retreive a list of mounted filesystem fsid_t */
4786 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
4787 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
4788 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
4789 /* perform operations on filesystem via fsid_t */
4790 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
4791 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
4792 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
4793 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
4794 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
4795 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4796 &maxvfstypenum, 0, "");
4797 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
4798 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
4799 CTLFLAG_RD | CTLFLAG_LOCKED,
4800 sysctl_vfs_generic_conf, "");
4801 #if DEVELOPMENT || DEBUG
4802 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
4803 CTLTYPE_INT | CTLFLAG_RW,
4804 &print_busy_vnodes, 0,
4805 "VFS log busy vnodes blocking unmount");
4806 #endif
4807
4808 /* Indicate that the root file system unmounted cleanly */
4809 static int vfs_root_unmounted_cleanly = 0;
4810 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
4811
4812 void
4813 vfs_set_root_unmounted_cleanly(void)
4814 {
4815 vfs_root_unmounted_cleanly = 1;
4816 }
4817
4818 /*
4819 * Print vnode state.
4820 */
4821 void
4822 vn_print_state(struct vnode *vp, const char *fmt, ...)
4823 {
4824 va_list ap;
4825 char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
4826 char fs_name[MFSNAMELEN];
4827
4828 va_start(ap, fmt);
4829 vprintf(fmt, ap);
4830 va_end(ap);
4831 printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
4832 printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
4833 /* Counts .. */
4834 printf(" iocount %d, usecount %d, kusecount %d references %d\n",
4835 vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
4836 printf(" writecount %d, numoutput %d\n", vp->v_writecount,
4837 vp->v_numoutput);
4838 /* Flags */
4839 printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
4840 vp->v_lflag, vp->v_listflag);
4841
4842 if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
4843 strlcpy(fs_name, "deadfs", MFSNAMELEN);
4844 } else {
4845 vfs_name(vp->v_mount, fs_name);
4846 }
4847
4848 printf(" v_data 0x%0llx %s\n",
4849 (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
4850 perm_str);
4851 printf(" v_mount 0x%0llx %s vfs_name %s\n",
4852 (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
4853 perm_str, fs_name);
4854 }
4855
4856 long num_reusedvnodes = 0;
4857
4858
4859 static vnode_t
4860 process_vp(vnode_t vp, int want_vp, int *deferred)
4861 {
4862 unsigned int vpid;
4863
4864 *deferred = 0;
4865
4866 vpid = vp->v_id;
4867
4868 vnode_list_remove_locked(vp);
4869
4870 vnode_list_unlock();
4871
4872 vnode_lock_spin(vp);
4873
4874 /*
4875 * We could wait for the vnode_lock after removing the vp from the freelist
4876 * and the vid is bumped only at the very end of reclaim. So it is possible
4877 * that we are looking at a vnode that is being terminated. If so skip it.
4878 */
4879 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
4880 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
4881 /*
4882 * we lost the race between dropping the list lock
4883 * and picking up the vnode_lock... someone else
4884 * used this vnode and it is now in a new state
4885 */
4886 vnode_unlock(vp);
4887
4888 return NULLVP;
4889 }
4890 if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
4891 /*
4892 * we did a vnode_rele_ext that asked for
4893 * us not to reenter the filesystem during
4894 * the release even though VL_NEEDINACTIVE was
4895 * set... we'll do it here by doing a
4896 * vnode_get/vnode_put
4897 *
4898 * pick up an iocount so that we can call
4899 * vnode_put and drive the VNOP_INACTIVE...
4900 * vnode_put will either leave us off
4901 * the freelist if a new ref comes in,
4902 * or put us back on the end of the freelist
4903 * or recycle us if we were marked for termination...
4904 * so we'll just go grab a new candidate
4905 */
4906 vp->v_iocount++;
4907 #ifdef JOE_DEBUG
4908 record_vp(vp, 1);
4909 #endif
4910 vnode_put_locked(vp);
4911 vnode_unlock(vp);
4912
4913 return NULLVP;
4914 }
4915 /*
4916 * Checks for anyone racing us for recycle
4917 */
4918 if (vp->v_type != VBAD) {
4919 if (want_vp && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
4920 vnode_async_list_add(vp);
4921 vnode_unlock(vp);
4922
4923 *deferred = 1;
4924
4925 return NULLVP;
4926 }
4927 if (vp->v_lflag & VL_DEAD) {
4928 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
4929 }
4930
4931 vnode_lock_convert(vp);
4932 (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
4933
4934 if (want_vp) {
4935 if ((VONLIST(vp))) {
4936 panic("new_vnode(%p): vp on list", vp);
4937 }
4938 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
4939 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
4940 panic("new_vnode(%p): free vnode still referenced", vp);
4941 }
4942 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
4943 panic("new_vnode(%p): vnode seems to be on mount list", vp);
4944 }
4945 if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
4946 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
4947 }
4948 } else {
4949 vnode_unlock(vp);
4950 vp = NULLVP;
4951 }
4952 }
4953 return vp;
4954 }
4955
4956 __attribute__((noreturn))
4957 static void
4958 async_work_continue(void)
4959 {
4960 struct async_work_lst *q;
4961 int deferred;
4962 vnode_t vp;
4963
4964 q = &vnode_async_work_list;
4965
4966 for (;;) {
4967 vnode_list_lock();
4968
4969 if (TAILQ_EMPTY(q)) {
4970 assert_wait(q, (THREAD_UNINT));
4971
4972 vnode_list_unlock();
4973
4974 thread_block((thread_continue_t)async_work_continue);
4975
4976 continue;
4977 }
4978 async_work_handled++;
4979
4980 vp = TAILQ_FIRST(q);
4981
4982 vp = process_vp(vp, 0, &deferred);
4983
4984 if (vp != NULLVP) {
4985 panic("found VBAD vp (%p) on async queue", vp);
4986 }
4987 }
4988 }
4989
4990
4991 static int
4992 new_vnode(vnode_t *vpp)
4993 {
4994 vnode_t vp;
4995 uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */
4996 uint32_t bdevvp_vnodes = 0;
4997 int force_alloc = 0, walk_count = 0;
4998 boolean_t need_reliable_vp = FALSE;
4999 int deferred;
5000 struct timeval initial_tv;
5001 struct timeval current_tv;
5002 proc_t curproc = current_proc();
5003
5004 initial_tv.tv_sec = 0;
5005 retry:
5006 vp = NULLVP;
5007
5008 vnode_list_lock();
5009
5010 if (need_reliable_vp == TRUE) {
5011 async_work_timed_out++;
5012 }
5013
5014 if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) {
5015 struct timespec ts;
5016
5017 if (!TAILQ_EMPTY(&vnode_dead_list)) {
5018 /*
5019 * Can always reuse a dead one
5020 */
5021 vp = TAILQ_FIRST(&vnode_dead_list);
5022 goto steal_this_vp;
5023 }
5024 /*
5025 * no dead vnodes available... if we're under
5026 * the limit, we'll create a new vnode
5027 */
5028 numvnodes++;
5029 vnode_list_unlock();
5030
5031 vp = zalloc_flags(vnode_zone, Z_WAITOK | Z_ZERO);
5032 VLISTNONE(vp); /* avoid double queue removal */
5033 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
5034
5035 TAILQ_INIT(&vp->v_ncchildren);
5036
5037 klist_init(&vp->v_knotes);
5038 nanouptime(&ts);
5039 vp->v_id = (uint32_t)ts.tv_nsec;
5040 vp->v_flag = VSTANDARD;
5041
5042 #if CONFIG_MACF
5043 if (mac_vnode_label_init_needed(vp)) {
5044 mac_vnode_label_init(vp);
5045 }
5046 #endif /* MAC */
5047
5048 vp->v_iocount = 1;
5049 goto done;
5050 }
5051 microuptime(&current_tv);
5052
5053 #define MAX_WALK_COUNT 1000
5054
5055 if (!TAILQ_EMPTY(&vnode_rage_list) &&
5056 (ragevnodes >= rage_limit ||
5057 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5058 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5059 if (!(vp->v_listflag & VLIST_RAGE)) {
5060 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5061 }
5062
5063 /*
5064 * skip free vnodes created by bdevvp as they are
5065 * typically not fully constructedi and may have issues
5066 * in getting reclaimed.
5067 */
5068 if (vp->v_flag & VBDEVVP) {
5069 bdevvp_vnodes++;
5070 continue;
5071 }
5072
5073 // if we're a dependency-capable process, skip vnodes that can
5074 // cause recycling deadlocks. (i.e. this process is diskimages
5075 // helper and the vnode is in a disk image). Querying the
5076 // mnt_kern_flag for the mount's virtual device status
5077 // is safer than checking the mnt_dependent_process, which
5078 // may not be updated if there are multiple devnode layers
5079 // in between the disk image and the final consumer.
5080
5081 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5082 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5083 /*
5084 * if need_reliable_vp == TRUE, then we've already sent one or more
5085 * non-reliable vnodes to the async thread for processing and timed
5086 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5087 * mechanism to first scan for a reliable vnode before forcing
5088 * a new vnode to be created
5089 */
5090 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5091 break;
5092 }
5093 }
5094
5095 // don't iterate more than MAX_WALK_COUNT vnodes to
5096 // avoid keeping the vnode list lock held for too long.
5097
5098 if (walk_count++ > MAX_WALK_COUNT) {
5099 vp = NULL;
5100 break;
5101 }
5102 }
5103 }
5104
5105 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5106 /*
5107 * Pick the first vp for possible reuse
5108 */
5109 walk_count = 0;
5110 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5111 /*
5112 * skip free vnodes created by bdevvp as they are
5113 * typically not fully constructedi and may have issues
5114 * in getting reclaimed.
5115 */
5116 if (vp->v_flag & VBDEVVP) {
5117 bdevvp_vnodes++;
5118 continue;
5119 }
5120
5121 // if we're a dependency-capable process, skip vnodes that can
5122 // cause recycling deadlocks. (i.e. this process is diskimages
5123 // helper and the vnode is in a disk image). Querying the
5124 // mnt_kern_flag for the mount's virtual device status
5125 // is safer than checking the mnt_dependent_process, which
5126 // may not be updated if there are multiple devnode layers
5127 // in between the disk image and the final consumer.
5128
5129 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5130 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5131 /*
5132 * if need_reliable_vp == TRUE, then we've already sent one or more
5133 * non-reliable vnodes to the async thread for processing and timed
5134 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5135 * mechanism to first scan for a reliable vnode before forcing
5136 * a new vnode to be created
5137 */
5138 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5139 break;
5140 }
5141 }
5142
5143 // don't iterate more than MAX_WALK_COUNT vnodes to
5144 // avoid keeping the vnode list lock held for too long.
5145
5146 if (walk_count++ > MAX_WALK_COUNT) {
5147 vp = NULL;
5148 break;
5149 }
5150 }
5151 }
5152
5153 //
5154 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5155 // then we're trying to create a vnode on behalf of a
5156 // process like diskimages-helper that has file systems
5157 // mounted on top of itself (and thus we can't reclaim
5158 // vnodes in the file systems on top of us). if we can't
5159 // find a vnode to reclaim then we'll just have to force
5160 // the allocation.
5161 //
5162 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5163 force_alloc = 1;
5164 vnode_list_unlock();
5165 goto retry;
5166 }
5167
5168 if (vp == NULL) {
5169 /*
5170 * we've reached the system imposed maximum number of vnodes
5171 * but there isn't a single one available
5172 * wait a bit and then retry... if we can't get a vnode
5173 * after our target number of retries, than log a complaint
5174 */
5175 if (++retries <= max_retries) {
5176 vnode_list_unlock();
5177 delay_for_interval(1, 1000 * 1000);
5178 goto retry;
5179 }
5180
5181 vnode_list_unlock();
5182 tablefull("vnode");
5183 log(LOG_EMERG, "%d desired, %ld numvnodes, "
5184 "%ld free, %ld dead, %ld async, %d rage %d bdevvp\n",
5185 desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes, bdevvp_vnodes);
5186 #if CONFIG_JETSAM
5187
5188 #if DEVELOPMENT || DEBUG
5189 if (bootarg_no_vnode_jetsam) {
5190 panic("vnode table is full\n");
5191 }
5192 #endif /* DEVELOPMENT || DEBUG */
5193
5194 /*
5195 * Running out of vnodes tends to make a system unusable. Start killing
5196 * processes that jetsam knows are killable.
5197 */
5198 if (memorystatus_kill_on_vnode_limit() == FALSE) {
5199 /*
5200 * If jetsam can't find any more processes to kill and there
5201 * still aren't any free vnodes, panic. Hopefully we'll get a
5202 * panic log to tell us why we ran out.
5203 */
5204 panic("vnode table is full\n");
5205 }
5206
5207 /*
5208 * Now that we've killed someone, wait a bit and continue looking
5209 * (with fewer retries before trying another kill).
5210 */
5211 delay_for_interval(3, 1000 * 1000);
5212 retries = 0;
5213 max_retries = 10;
5214 goto retry;
5215 #endif
5216
5217 *vpp = NULL;
5218 return ENFILE;
5219 }
5220 steal_this_vp:
5221 if ((vp = process_vp(vp, 1, &deferred)) == NULLVP) {
5222 if (deferred) {
5223 int elapsed_msecs;
5224 struct timeval elapsed_tv;
5225
5226 if (initial_tv.tv_sec == 0) {
5227 microuptime(&initial_tv);
5228 }
5229
5230 vnode_list_lock();
5231
5232 dead_vnode_waited++;
5233 dead_vnode_wanted++;
5234
5235 /*
5236 * note that we're only going to explicitly wait 10ms
5237 * for a dead vnode to become available, since even if one
5238 * isn't available, a reliable vnode might now be available
5239 * at the head of the VRAGE or free lists... if so, we
5240 * can satisfy the new_vnode request with less latency then waiting
5241 * for the full 100ms duration we're ultimately willing to tolerate
5242 */
5243 assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
5244
5245 vnode_list_unlock();
5246
5247 thread_block(THREAD_CONTINUE_NULL);
5248
5249 microuptime(&elapsed_tv);
5250
5251 timevalsub(&elapsed_tv, &initial_tv);
5252 elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
5253
5254 if (elapsed_msecs >= 100) {
5255 /*
5256 * we've waited long enough... 100ms is
5257 * somewhat arbitrary for this case, but the
5258 * normal worst case latency used for UI
5259 * interaction is 100ms, so I've chosen to
5260 * go with that.
5261 *
5262 * setting need_reliable_vp to TRUE
5263 * forces us to find a reliable vnode
5264 * that we can process synchronously, or
5265 * to create a new one if the scan for
5266 * a reliable one hits the scan limit
5267 */
5268 need_reliable_vp = TRUE;
5269 }
5270 }
5271 goto retry;
5272 }
5273 OSAddAtomicLong(1, &num_reusedvnodes);
5274
5275
5276 #if CONFIG_MACF
5277 /*
5278 * We should never see VL_LABELWAIT or VL_LABEL here.
5279 * as those operations hold a reference.
5280 */
5281 assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
5282 assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
5283 if (vp->v_lflag & VL_LABELED || vp->v_label != NULL) {
5284 vnode_lock_convert(vp);
5285 mac_vnode_label_recycle(vp);
5286 } else if (mac_vnode_label_init_needed(vp)) {
5287 vnode_lock_convert(vp);
5288 mac_vnode_label_init(vp);
5289 }
5290
5291 #endif /* MAC */
5292
5293 vp->v_iocount = 1;
5294 vp->v_lflag = 0;
5295 vp->v_writecount = 0;
5296 vp->v_references = 0;
5297 vp->v_iterblkflags = 0;
5298 vp->v_flag = VSTANDARD;
5299 /* vbad vnodes can point to dead_mountp */
5300 vp->v_mount = NULL;
5301 vp->v_defer_reclaimlist = (vnode_t)0;
5302
5303 vnode_unlock(vp);
5304
5305 done:
5306 *vpp = vp;
5307
5308 return 0;
5309 }
5310
5311 void
5312 vnode_lock(vnode_t vp)
5313 {
5314 lck_mtx_lock(&vp->v_lock);
5315 }
5316
5317 void
5318 vnode_lock_spin(vnode_t vp)
5319 {
5320 lck_mtx_lock_spin(&vp->v_lock);
5321 }
5322
5323 void
5324 vnode_unlock(vnode_t vp)
5325 {
5326 lck_mtx_unlock(&vp->v_lock);
5327 }
5328
5329
5330
5331 int
5332 vnode_get(struct vnode *vp)
5333 {
5334 int retval;
5335
5336 vnode_lock_spin(vp);
5337 retval = vnode_get_locked(vp);
5338 vnode_unlock(vp);
5339
5340 return retval;
5341 }
5342
5343 int
5344 vnode_get_locked(struct vnode *vp)
5345 {
5346 #if DIAGNOSTIC
5347 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5348 #endif
5349 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
5350 return ENOENT;
5351 }
5352
5353 if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
5354 panic("v_iocount overflow");
5355 }
5356
5357 #ifdef JOE_DEBUG
5358 record_vp(vp, 1);
5359 #endif
5360 return 0;
5361 }
5362
5363 /*
5364 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
5365 * while the vnode is draining, but at no point after that) to prevent
5366 * deadlocks when getting vnodes from filesystem hashes while holding
5367 * resources that may prevent other iocounts from being released.
5368 */
5369 int
5370 vnode_getwithvid(vnode_t vp, uint32_t vid)
5371 {
5372 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
5373 }
5374
5375 /*
5376 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
5377 * drain; it exists for use in the VFS name cache, where we really do want to block behind
5378 * vnode drain to prevent holding off an unmount.
5379 */
5380 int
5381 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
5382 {
5383 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
5384 }
5385
5386 int
5387 vnode_getwithref(vnode_t vp)
5388 {
5389 return vget_internal(vp, 0, 0);
5390 }
5391
5392
5393 __private_extern__ int
5394 vnode_getalways(vnode_t vp)
5395 {
5396 return vget_internal(vp, 0, VNODE_ALWAYS);
5397 }
5398
5399 __private_extern__ int
5400 vnode_getalways_from_pager(vnode_t vp)
5401 {
5402 return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
5403 }
5404
5405 static inline void
5406 vn_set_dead(vnode_t vp)
5407 {
5408 vp->v_mount = NULL;
5409 vp->v_op = dead_vnodeop_p;
5410 vp->v_tag = VT_NON;
5411 vp->v_data = NULL;
5412 vp->v_type = VBAD;
5413 vp->v_lflag |= VL_DEAD;
5414 }
5415
5416 static int
5417 vnode_put_internal_locked(vnode_t vp, bool from_pager)
5418 {
5419 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
5420
5421 #if DIAGNOSTIC
5422 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5423 #endif
5424 retry:
5425 if (vp->v_iocount < 1) {
5426 panic("vnode_put(%p): iocount < 1", vp);
5427 }
5428
5429 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
5430 vnode_dropiocount(vp);
5431 return 0;
5432 }
5433
5434 if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
5435 vp->v_lflag &= ~VL_NEEDINACTIVE;
5436 vnode_unlock(vp);
5437
5438 VNOP_INACTIVE(vp, ctx);
5439
5440 vnode_lock_spin(vp);
5441 /*
5442 * because we had to drop the vnode lock before calling
5443 * VNOP_INACTIVE, the state of this vnode may have changed...
5444 * we may pick up both VL_MARTERM and either
5445 * an iocount or a usecount while in the VNOP_INACTIVE call
5446 * we don't want to call vnode_reclaim_internal on a vnode
5447 * that has active references on it... so loop back around
5448 * and reevaluate the state
5449 */
5450 goto retry;
5451 }
5452 vp->v_lflag &= ~VL_NEEDINACTIVE;
5453
5454 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
5455 if (from_pager) {
5456 /*
5457 * We can't initiate reclaim when called from the pager
5458 * because it will deadlock with itself so we hand it
5459 * off to the async cleaner thread.
5460 */
5461 if (VONLIST(vp)) {
5462 if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
5463 vnode_list_lock();
5464 vnode_list_remove_locked(vp);
5465 vnode_async_list_add_locked(vp);
5466 vnode_list_unlock();
5467 }
5468 wakeup(&vnode_async_work_list);
5469 } else {
5470 vnode_async_list_add(vp);
5471 }
5472 } else {
5473 vnode_lock_convert(vp);
5474 vnode_reclaim_internal(vp, 1, 1, 0);
5475 }
5476 }
5477 vnode_dropiocount(vp);
5478 vnode_list_add(vp);
5479
5480 return 0;
5481 }
5482
5483 int
5484 vnode_put_locked(vnode_t vp)
5485 {
5486 return vnode_put_internal_locked(vp, false);
5487 }
5488
5489 int
5490 vnode_put(vnode_t vp)
5491 {
5492 int retval;
5493
5494 vnode_lock_spin(vp);
5495 retval = vnode_put_internal_locked(vp, false);
5496 vnode_unlock(vp);
5497
5498 return retval;
5499 }
5500
5501 int
5502 vnode_put_from_pager(vnode_t vp)
5503 {
5504 int retval;
5505
5506 vnode_lock_spin(vp);
5507 /* Cannot initiate reclaim while paging */
5508 retval = vnode_put_internal_locked(vp, true);
5509 vnode_unlock(vp);
5510
5511 return retval;
5512 }
5513
5514 /* is vnode_t in use by others? */
5515 int
5516 vnode_isinuse(vnode_t vp, int refcnt)
5517 {
5518 return vnode_isinuse_locked(vp, refcnt, 0);
5519 }
5520
5521 int
5522 vnode_usecount(vnode_t vp)
5523 {
5524 return vp->v_usecount;
5525 }
5526
5527 int
5528 vnode_iocount(vnode_t vp)
5529 {
5530 return vp->v_iocount;
5531 }
5532
5533 int
5534 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
5535 {
5536 int retval = 0;
5537
5538 if (!locked) {
5539 vnode_lock_spin(vp);
5540 }
5541 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
5542 retval = 1;
5543 goto out;
5544 }
5545 if (vp->v_type == VREG) {
5546 retval = ubc_isinuse_locked(vp, refcnt, 1);
5547 }
5548
5549 out:
5550 if (!locked) {
5551 vnode_unlock(vp);
5552 }
5553 return retval;
5554 }
5555
5556
5557 /* resume vnode_t */
5558 errno_t
5559 vnode_resume(vnode_t vp)
5560 {
5561 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
5562 vnode_lock_spin(vp);
5563 vp->v_lflag &= ~VL_SUSPENDED;
5564 vp->v_owner = NULL;
5565 vnode_unlock(vp);
5566
5567 wakeup(&vp->v_iocount);
5568 }
5569 return 0;
5570 }
5571
5572 /* suspend vnode_t
5573 * Please do not use on more than one vnode at a time as it may
5574 * cause deadlocks.
5575 * xxx should we explicity prevent this from happening?
5576 */
5577
5578 errno_t
5579 vnode_suspend(vnode_t vp)
5580 {
5581 if (vp->v_lflag & VL_SUSPENDED) {
5582 return EBUSY;
5583 }
5584
5585 vnode_lock_spin(vp);
5586
5587 /*
5588 * xxx is this sufficient to check if a vnode_drain is
5589 * progress?
5590 */
5591
5592 if (vp->v_owner == NULL) {
5593 vp->v_lflag |= VL_SUSPENDED;
5594 vp->v_owner = current_thread();
5595 }
5596 vnode_unlock(vp);
5597
5598 return 0;
5599 }
5600
5601 /*
5602 * Release any blocked locking requests on the vnode.
5603 * Used for forced-unmounts.
5604 *
5605 * XXX What about network filesystems?
5606 */
5607 static void
5608 vnode_abort_advlocks(vnode_t vp)
5609 {
5610 if (vp->v_flag & VLOCKLOCAL) {
5611 lf_abort_advlocks(vp);
5612 }
5613 }
5614
5615
5616 static errno_t
5617 vnode_drain(vnode_t vp)
5618 {
5619 if (vp->v_lflag & VL_DRAIN) {
5620 panic("vnode_drain: recursive drain");
5621 return ENOENT;
5622 }
5623 vp->v_lflag |= VL_DRAIN;
5624 vp->v_owner = current_thread();
5625
5626 while (vp->v_iocount > 1) {
5627 if (bootarg_no_vnode_drain) {
5628 struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
5629 int error;
5630
5631 if (vfs_unmountall_started) {
5632 ts.tv_sec = 1;
5633 }
5634
5635 error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
5636
5637 /* Try to deal with leaked iocounts under bootarg and shutting down */
5638 if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
5639 ts.tv_sec == 1 && vp->v_numoutput == 0) {
5640 vp->v_iocount = 1;
5641 break;
5642 }
5643 } else {
5644 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
5645 }
5646 }
5647
5648 vp->v_lflag &= ~VL_DRAIN;
5649
5650 return 0;
5651 }
5652
5653
5654 /*
5655 * if the number of recent references via vnode_getwithvid or vnode_getwithref
5656 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
5657 * the LRU list if it's currently on it... once the iocount and usecount both drop
5658 * to 0, it will get put back on the end of the list, effectively making it younger
5659 * this allows us to keep actively referenced vnodes in the list without having
5660 * to constantly remove and add to the list each time a vnode w/o a usecount is
5661 * referenced which costs us taking and dropping a global lock twice.
5662 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
5663 */
5664 #define UNAGE_THRESHHOLD 25
5665 #define UNAGE_DIRTYTHRESHHOLD 6
5666
5667 errno_t
5668 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
5669 {
5670 int nodead = vflags & VNODE_NODEAD;
5671 int nosusp = vflags & VNODE_NOSUSPEND;
5672 int always = vflags & VNODE_ALWAYS;
5673 int beatdrain = vflags & VNODE_DRAINO;
5674 int withvid = vflags & VNODE_WITHID;
5675 int forpager = vflags & VNODE_PAGER;
5676
5677 for (;;) {
5678 int sleepflg = 0;
5679
5680 /*
5681 * if it is a dead vnode with deadfs
5682 */
5683 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
5684 return ENOENT;
5685 }
5686 /*
5687 * will return VL_DEAD ones
5688 */
5689 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
5690 break;
5691 }
5692 /*
5693 * if suspended vnodes are to be failed
5694 */
5695 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
5696 return ENOENT;
5697 }
5698 /*
5699 * if you are the owner of drain/suspend/termination , can acquire iocount
5700 * check for VL_TERMINATE; it does not set owner
5701 */
5702 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
5703 (vp->v_owner == current_thread())) {
5704 break;
5705 }
5706
5707 if (always != 0) {
5708 break;
5709 }
5710
5711 /*
5712 * If this vnode is getting drained, there are some cases where
5713 * we can't block or, in case of tty vnodes, want to be
5714 * interruptible.
5715 */
5716 if (vp->v_lflag & VL_DRAIN) {
5717 /*
5718 * In some situations, we want to get an iocount
5719 * even if the vnode is draining to prevent deadlock,
5720 * e.g. if we're in the filesystem, potentially holding
5721 * resources that could prevent other iocounts from
5722 * being released.
5723 */
5724 if (beatdrain) {
5725 break;
5726 }
5727 /*
5728 * Don't block if the vnode's mount point is unmounting as
5729 * we may be the thread the unmount is itself waiting on
5730 * Only callers who pass in vids (at this point, we've already
5731 * handled nosusp and nodead) are expecting error returns
5732 * from this function, so only we can only return errors for
5733 * those. ENODEV is intended to inform callers that the call
5734 * failed because an unmount is in progress.
5735 */
5736 if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
5737 return ENODEV;
5738 }
5739
5740 if (vnode_istty(vp)) {
5741 sleepflg = PCATCH;
5742 }
5743 }
5744
5745 vnode_lock_convert(vp);
5746
5747 if (vp->v_lflag & VL_TERMINATE) {
5748 int error;
5749
5750 vp->v_lflag |= VL_TERMWANT;
5751
5752 error = msleep(&vp->v_lflag, &vp->v_lock,
5753 (PVFS | sleepflg), "vnode getiocount", NULL);
5754 if (error) {
5755 return error;
5756 }
5757 } else {
5758 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
5759 }
5760 }
5761 if (withvid && vid != vp->v_id) {
5762 return ENOENT;
5763 }
5764 if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
5765 (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
5766 vp->v_references = 0;
5767 vnode_list_remove(vp);
5768 }
5769 vp->v_iocount++;
5770 #ifdef JOE_DEBUG
5771 record_vp(vp, 1);
5772 #endif
5773 return 0;
5774 }
5775
5776 static void
5777 vnode_dropiocount(vnode_t vp)
5778 {
5779 if (vp->v_iocount < 1) {
5780 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
5781 }
5782
5783 vp->v_iocount--;
5784 #ifdef JOE_DEBUG
5785 record_vp(vp, -1);
5786 #endif
5787 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
5788 wakeup(&vp->v_iocount);
5789 }
5790 }
5791
5792
5793 void
5794 vnode_reclaim(struct vnode * vp)
5795 {
5796 vnode_reclaim_internal(vp, 0, 0, 0);
5797 }
5798
5799 __private_extern__
5800 void
5801 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
5802 {
5803 int isfifo = 0;
5804 bool clear_tty_revoke = false;
5805
5806 if (!locked) {
5807 vnode_lock(vp);
5808 }
5809
5810 if (vp->v_lflag & VL_TERMINATE) {
5811 panic("vnode reclaim in progress");
5812 }
5813 vp->v_lflag |= VL_TERMINATE;
5814
5815 vn_clearunionwait(vp, 1);
5816
5817 /*
5818 * We have to force any terminals in reads to return and give up
5819 * their iocounts. It's important to do this after VL_TERMINATE
5820 * has been set to ensure new reads are blocked while the
5821 * revoke is in progress.
5822 */
5823 if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
5824 vnode_unlock(vp);
5825 VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
5826 clear_tty_revoke = true;
5827 vnode_lock(vp);
5828 }
5829
5830 vnode_drain(vp);
5831
5832 if (clear_tty_revoke) {
5833 vnode_unlock(vp);
5834 VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel());
5835 vnode_lock(vp);
5836 }
5837
5838 isfifo = (vp->v_type == VFIFO);
5839
5840 if (vp->v_type != VBAD) {
5841 vgone(vp, flags); /* clean and reclaim the vnode */
5842 }
5843 /*
5844 * give the vnode a new identity so that vnode_getwithvid will fail
5845 * on any stale cache accesses...
5846 * grab the list_lock so that if we're in "new_vnode"
5847 * behind the list_lock trying to steal this vnode, the v_id is stable...
5848 * once new_vnode drops the list_lock, it will block trying to take
5849 * the vnode lock until we release it... at that point it will evaluate
5850 * whether the v_vid has changed
5851 * also need to make sure that the vnode isn't on a list where "new_vnode"
5852 * can find it after the v_id has been bumped until we are completely done
5853 * with the vnode (i.e. putting it back on a list has to be the very last
5854 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
5855 * are holding an io_count on the vnode... they need to drop the io_count
5856 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
5857 * they are completely done with the vnode
5858 */
5859 vnode_list_lock();
5860
5861 vnode_list_remove_locked(vp);
5862 vp->v_id++;
5863
5864 vnode_list_unlock();
5865
5866 if (isfifo) {
5867 struct fifoinfo * fip;
5868
5869 fip = vp->v_fifoinfo;
5870 vp->v_fifoinfo = NULL;
5871 kheap_free(KHEAP_DEFAULT, fip, sizeof(struct fifoinfo));
5872 }
5873 vp->v_type = VBAD;
5874
5875 if (vp->v_data) {
5876 panic("vnode_reclaim_internal: cleaned vnode isn't");
5877 }
5878 if (vp->v_numoutput) {
5879 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
5880 }
5881 if (UBCINFOEXISTS(vp)) {
5882 panic("vnode_reclaim_internal: ubcinfo not cleaned");
5883 }
5884 if (vp->v_parent) {
5885 panic("vnode_reclaim_internal: vparent not removed");
5886 }
5887 if (vp->v_name) {
5888 panic("vnode_reclaim_internal: vname not removed");
5889 }
5890
5891 vp->v_socket = NULL;
5892
5893 vp->v_lflag &= ~VL_TERMINATE;
5894 vp->v_owner = NULL;
5895
5896 KNOTE(&vp->v_knotes, NOTE_REVOKE);
5897
5898 /* Make sure that when we reuse the vnode, no knotes left over */
5899 klist_init(&vp->v_knotes);
5900
5901 if (vp->v_lflag & VL_TERMWANT) {
5902 vp->v_lflag &= ~VL_TERMWANT;
5903 wakeup(&vp->v_lflag);
5904 }
5905 if (!reuse) {
5906 /*
5907 * make sure we get on the
5908 * dead list if appropriate
5909 */
5910 vnode_list_add(vp);
5911 }
5912 if (!locked) {
5913 vnode_unlock(vp);
5914 }
5915 }
5916
5917 static int
5918 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
5919 int init_vnode)
5920 {
5921 int error;
5922 int insert = 1;
5923 int existing_vnode;
5924 vnode_t vp;
5925 vnode_t nvp;
5926 vnode_t dvp;
5927 struct uthread *ut;
5928 struct componentname *cnp;
5929 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
5930 #if CONFIG_TRIGGERS
5931 struct vnode_trigger_param *tinfo = NULL;
5932 #endif
5933 if (*vpp) {
5934 vp = *vpp;
5935 *vpp = NULLVP;
5936 existing_vnode = 1;
5937 } else {
5938 existing_vnode = 0;
5939 }
5940
5941 if (init_vnode) {
5942 /* Do quick sanity check on the parameters. */
5943 if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
5944 error = EINVAL;
5945 goto error_out;
5946 }
5947
5948 #if CONFIG_TRIGGERS
5949 if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
5950 tinfo = (struct vnode_trigger_param *)data;
5951
5952 /* Validate trigger vnode input */
5953 if ((param->vnfs_vtype != VDIR) ||
5954 (tinfo->vnt_resolve_func == NULL) ||
5955 (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
5956 error = EINVAL;
5957 goto error_out;
5958 }
5959 /* Fall through a normal create (params will be the same) */
5960 flavor = VNCREATE_FLAVOR;
5961 size = VCREATESIZE;
5962 }
5963 #endif
5964 if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
5965 error = EINVAL;
5966 goto error_out;
5967 }
5968 }
5969
5970 if (!existing_vnode) {
5971 if ((error = new_vnode(&vp))) {
5972 return error;
5973 }
5974 if (!init_vnode) {
5975 /* Make it so that it can be released by a vnode_put) */
5976 vn_set_dead(vp);
5977 *vpp = vp;
5978 return 0;
5979 }
5980 } else {
5981 /*
5982 * A vnode obtained by vnode_create_empty has been passed to
5983 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
5984 * this point, it is set back on any error.
5985 *
5986 * N.B. vnode locking - We make the same assumptions as the
5987 * "unsplit" vnode_create did - i.e. it is safe to update the
5988 * vnode's fields without the vnode lock. This vnode has been
5989 * out and about with the filesystem and hopefully nothing
5990 * was done to the vnode between the vnode_create_empty and
5991 * now when it has come in through vnode_initialize.
5992 */
5993 vp->v_lflag &= ~VL_DEAD;
5994 }
5995
5996 dvp = param->vnfs_dvp;
5997 cnp = param->vnfs_cnp;
5998
5999 vp->v_op = param->vnfs_vops;
6000 vp->v_type = (uint16_t)param->vnfs_vtype;
6001 vp->v_data = param->vnfs_fsnode;
6002
6003 if (param->vnfs_markroot) {
6004 vp->v_flag |= VROOT;
6005 }
6006 if (param->vnfs_marksystem) {
6007 vp->v_flag |= VSYSTEM;
6008 }
6009 if (vp->v_type == VREG) {
6010 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
6011 if (error) {
6012 #ifdef JOE_DEBUG
6013 record_vp(vp, 1);
6014 #endif
6015 vn_set_dead(vp);
6016
6017 vnode_put(vp);
6018 return error;
6019 }
6020 if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
6021 memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
6022 }
6023 }
6024 #ifdef JOE_DEBUG
6025 record_vp(vp, 1);
6026 #endif
6027
6028 #if CONFIG_FIRMLINKS
6029 vp->v_fmlink = NULLVP;
6030 #endif
6031 vp->v_flag &= ~VFMLINKTARGET;
6032
6033 #if CONFIG_TRIGGERS
6034 /*
6035 * For trigger vnodes, attach trigger info to vnode
6036 */
6037 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
6038 /*
6039 * Note: has a side effect of incrementing trigger count on the
6040 * mount if successful, which we would need to undo on a
6041 * subsequent failure.
6042 */
6043 #ifdef JOE_DEBUG
6044 record_vp(vp, -1);
6045 #endif
6046 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
6047 if (error) {
6048 printf("vnode_create: vnode_resolver_create() err %d\n", error);
6049 vn_set_dead(vp);
6050 #ifdef JOE_DEBUG
6051 record_vp(vp, 1);
6052 #endif
6053 vnode_put(vp);
6054 return error;
6055 }
6056 }
6057 #endif
6058 if (vp->v_type == VCHR || vp->v_type == VBLK) {
6059 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
6060
6061 if ((nvp = checkalias(vp, param->vnfs_rdev))) {
6062 /*
6063 * if checkalias returns a vnode, it will be locked
6064 *
6065 * first get rid of the unneeded vnode we acquired
6066 */
6067 vp->v_data = NULL;
6068 vp->v_op = spec_vnodeop_p;
6069 vp->v_type = VBAD;
6070 vp->v_lflag = VL_DEAD;
6071 vp->v_data = NULL;
6072 vp->v_tag = VT_NON;
6073 vnode_put(vp);
6074
6075 /*
6076 * switch to aliased vnode and finish
6077 * preparing it
6078 */
6079 vp = nvp;
6080
6081 vclean(vp, 0);
6082 vp->v_op = param->vnfs_vops;
6083 vp->v_type = (uint16_t)param->vnfs_vtype;
6084 vp->v_data = param->vnfs_fsnode;
6085 vp->v_lflag = 0;
6086 vp->v_mount = NULL;
6087 insmntque(vp, param->vnfs_mp);
6088 insert = 0;
6089 vnode_unlock(vp);
6090 }
6091
6092 if (VCHR == vp->v_type) {
6093 u_int maj = major(vp->v_rdev);
6094
6095 if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
6096 vp->v_flag |= VISTTY;
6097 }
6098 }
6099 }
6100
6101 if (vp->v_type == VFIFO) {
6102 struct fifoinfo *fip;
6103
6104 fip = kheap_alloc(KHEAP_DEFAULT, sizeof(struct fifoinfo),
6105 Z_WAITOK | Z_ZERO);
6106 vp->v_fifoinfo = fip;
6107 }
6108 /* The file systems must pass the address of the location where
6109 * they store the vnode pointer. When we add the vnode into the mount
6110 * list and name cache they become discoverable. So the file system node
6111 * must have the connection to vnode setup by then
6112 */
6113 *vpp = vp;
6114
6115 /* Add fs named reference. */
6116 if (param->vnfs_flags & VNFS_ADDFSREF) {
6117 vp->v_lflag |= VNAMED_FSHASH;
6118 }
6119 if (param->vnfs_mp) {
6120 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
6121 vp->v_flag |= VLOCKLOCAL;
6122 }
6123 if (insert) {
6124 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6125 panic("insmntque: vp on the free list\n");
6126 }
6127
6128 /*
6129 * enter in mount vnode list
6130 */
6131 insmntque(vp, param->vnfs_mp);
6132 }
6133 }
6134 if (dvp && vnode_ref(dvp) == 0) {
6135 vp->v_parent = dvp;
6136 }
6137 if (cnp) {
6138 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
6139 /*
6140 * enter into name cache
6141 * we've got the info to enter it into the name cache now
6142 * cache_enter_create will pick up an extra reference on
6143 * the name entered into the string cache
6144 */
6145 vp->v_name = cache_enter_create(dvp, vp, cnp);
6146 } else {
6147 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
6148 }
6149
6150 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) {
6151 vp->v_flag |= VISUNION;
6152 }
6153 }
6154 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
6155 /*
6156 * this vnode is being created as cacheable in the name cache
6157 * this allows us to re-enter it in the cache
6158 */
6159 vp->v_flag |= VNCACHEABLE;
6160 }
6161 ut = get_bsdthread_info(current_thread());
6162
6163 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
6164 (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
6165 /*
6166 * process has indicated that it wants any
6167 * vnodes created on its behalf to be rapidly
6168 * aged to reduce the impact on the cached set
6169 * of vnodes
6170 *
6171 * if UT_KERN_RAGE_VNODES is set, then the
6172 * kernel internally wants vnodes to be rapidly
6173 * aged, even if the process hasn't requested
6174 * this
6175 */
6176 vp->v_flag |= VRAGE;
6177 }
6178
6179 #if CONFIG_SECLUDED_MEMORY
6180 switch (secluded_for_filecache) {
6181 case 0:
6182 /*
6183 * secluded_for_filecache == 0:
6184 * + no file contents in secluded pool
6185 */
6186 break;
6187 case 1:
6188 /*
6189 * secluded_for_filecache == 1:
6190 * + no files from /
6191 * + files from /Applications/ are OK
6192 * + files from /Applications/Camera are not OK
6193 * + no files that are open for write
6194 */
6195 if (vnode_vtype(vp) == VREG &&
6196 vnode_mount(vp) != NULL &&
6197 (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
6198 /* not from root filesystem: eligible for secluded pages */
6199 memory_object_mark_eligible_for_secluded(
6200 ubc_getobject(vp, UBC_FLAGS_NONE),
6201 TRUE);
6202 }
6203 break;
6204 case 2:
6205 /*
6206 * secluded_for_filecache == 2:
6207 * + all read-only files OK, except:
6208 * + dyld_shared_cache_arm64*
6209 * + Camera
6210 * + mediaserverd
6211 */
6212 if (vnode_vtype(vp) == VREG) {
6213 memory_object_mark_eligible_for_secluded(
6214 ubc_getobject(vp, UBC_FLAGS_NONE),
6215 TRUE);
6216 }
6217 break;
6218 default:
6219 break;
6220 }
6221 #endif /* CONFIG_SECLUDED_MEMORY */
6222
6223 return 0;
6224
6225 error_out:
6226 if (existing_vnode) {
6227 vnode_put(vp);
6228 }
6229 return error;
6230 }
6231
6232 /* USAGE:
6233 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
6234 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
6235 * is obsoleted by this.
6236 */
6237 int
6238 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6239 {
6240 *vpp = NULLVP;
6241 return vnode_create_internal(flavor, size, data, vpp, 1);
6242 }
6243
6244 int
6245 vnode_create_empty(vnode_t *vpp)
6246 {
6247 *vpp = NULLVP;
6248 return vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL,
6249 vpp, 0);
6250 }
6251
6252 int
6253 vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6254 {
6255 if (*vpp == NULLVP) {
6256 panic("NULL vnode passed to vnode_initialize");
6257 }
6258 #if DEVELOPMENT || DEBUG
6259 /*
6260 * We lock to check that vnode is fit for unlocked use in
6261 * vnode_create_internal.
6262 */
6263 vnode_lock_spin(*vpp);
6264 VNASSERT(((*vpp)->v_iocount == 1), *vpp,
6265 ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount));
6266 VNASSERT(((*vpp)->v_usecount == 0), *vpp,
6267 ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
6268 VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
6269 ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
6270 (*vpp)->v_lflag));
6271 VNASSERT(((*vpp)->v_data == NULL), *vpp,
6272 ("vnode_initialize : v_data not NULL"));
6273 vnode_unlock(*vpp);
6274 #endif
6275 return vnode_create_internal(flavor, size, data, vpp, 1);
6276 }
6277
6278 int
6279 vnode_addfsref(vnode_t vp)
6280 {
6281 vnode_lock_spin(vp);
6282 if (vp->v_lflag & VNAMED_FSHASH) {
6283 panic("add_fsref: vp already has named reference");
6284 }
6285 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6286 panic("addfsref: vp on the free list\n");
6287 }
6288 vp->v_lflag |= VNAMED_FSHASH;
6289 vnode_unlock(vp);
6290 return 0;
6291 }
6292 int
6293 vnode_removefsref(vnode_t vp)
6294 {
6295 vnode_lock_spin(vp);
6296 if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
6297 panic("remove_fsref: no named reference");
6298 }
6299 vp->v_lflag &= ~VNAMED_FSHASH;
6300 vnode_unlock(vp);
6301 return 0;
6302 }
6303
6304
6305 int
6306 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
6307 {
6308 mount_t mp;
6309 int ret = 0;
6310 fsid_t * fsid_list;
6311 int count, actualcount, i;
6312 void * allocmem;
6313 int indx_start, indx_stop, indx_incr;
6314 int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
6315 int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
6316
6317 count = mount_getvfscnt();
6318 count += 10;
6319
6320 fsid_list = kheap_alloc(KHEAP_TEMP, count * sizeof(fsid_t), Z_WAITOK);
6321 allocmem = (void *)fsid_list;
6322
6323 actualcount = mount_fillfsids(fsid_list, count);
6324
6325 /*
6326 * Establish the iteration direction
6327 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
6328 */
6329 if (flags & VFS_ITERATE_TAIL_FIRST) {
6330 indx_start = actualcount - 1;
6331 indx_stop = -1;
6332 indx_incr = -1;
6333 } else { /* Head first by default */
6334 indx_start = 0;
6335 indx_stop = actualcount;
6336 indx_incr = 1;
6337 }
6338
6339 for (i = indx_start; i != indx_stop; i += indx_incr) {
6340 /* obtain the mount point with iteration reference */
6341 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
6342
6343 if (mp == (struct mount *)0) {
6344 continue;
6345 }
6346 mount_lock(mp);
6347 if ((mp->mnt_lflag & MNT_LDEAD) ||
6348 (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
6349 mount_unlock(mp);
6350 mount_iterdrop(mp);
6351 continue;
6352 }
6353 mount_unlock(mp);
6354
6355 /* iterate over all the vnodes */
6356 ret = callout(mp, arg);
6357
6358 /*
6359 * Drop the iterref here if the callback didn't do it.
6360 * Note: If cb_dropref is set the mp may no longer exist.
6361 */
6362 if (!cb_dropref) {
6363 mount_iterdrop(mp);
6364 }
6365
6366 switch (ret) {
6367 case VFS_RETURNED:
6368 case VFS_RETURNED_DONE:
6369 if (ret == VFS_RETURNED_DONE) {
6370 ret = 0;
6371 goto out;
6372 }
6373 break;
6374
6375 case VFS_CLAIMED_DONE:
6376 ret = 0;
6377 goto out;
6378 case VFS_CLAIMED:
6379 default:
6380 break;
6381 }
6382 ret = 0;
6383 }
6384
6385 out:
6386 kheap_free(KHEAP_TEMP, allocmem, (count * sizeof(fsid_t)));
6387 return ret;
6388 }
6389
6390 /*
6391 * Update the vfsstatfs structure in the mountpoint.
6392 * MAC: Parameter eventtype added, indicating whether the event that
6393 * triggered this update came from user space, via a system call
6394 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
6395 */
6396 int
6397 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
6398 {
6399 struct vfs_attr va;
6400 int error;
6401
6402 /*
6403 * Request the attributes we want to propagate into
6404 * the per-mount vfsstat structure.
6405 */
6406 VFSATTR_INIT(&va);
6407 VFSATTR_WANTED(&va, f_iosize);
6408 VFSATTR_WANTED(&va, f_blocks);
6409 VFSATTR_WANTED(&va, f_bfree);
6410 VFSATTR_WANTED(&va, f_bavail);
6411 VFSATTR_WANTED(&va, f_bused);
6412 VFSATTR_WANTED(&va, f_files);
6413 VFSATTR_WANTED(&va, f_ffree);
6414 VFSATTR_WANTED(&va, f_bsize);
6415 VFSATTR_WANTED(&va, f_fssubtype);
6416
6417 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
6418 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
6419 return error;
6420 }
6421 #if CONFIG_MACF
6422 if (eventtype == VFS_USER_EVENT) {
6423 error = mac_mount_check_getattr(ctx, mp, &va);
6424 if (error != 0) {
6425 return error;
6426 }
6427 }
6428 #endif
6429 /*
6430 * Unpack into the per-mount structure.
6431 *
6432 * We only overwrite these fields, which are likely to change:
6433 * f_blocks
6434 * f_bfree
6435 * f_bavail
6436 * f_bused
6437 * f_files
6438 * f_ffree
6439 *
6440 * And these which are not, but which the FS has no other way
6441 * of providing to us:
6442 * f_bsize
6443 * f_iosize
6444 * f_fssubtype
6445 *
6446 */
6447 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
6448 /* 4822056 - protect against malformed server mount */
6449 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
6450 } else {
6451 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
6452 }
6453 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
6454 mp->mnt_vfsstat.f_iosize = va.f_iosize;
6455 } else {
6456 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
6457 }
6458 if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
6459 mp->mnt_vfsstat.f_blocks = va.f_blocks;
6460 }
6461 if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
6462 mp->mnt_vfsstat.f_bfree = va.f_bfree;
6463 }
6464 if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
6465 mp->mnt_vfsstat.f_bavail = va.f_bavail;
6466 }
6467 if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
6468 mp->mnt_vfsstat.f_bused = va.f_bused;
6469 }
6470 if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
6471 mp->mnt_vfsstat.f_files = va.f_files;
6472 }
6473 if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
6474 mp->mnt_vfsstat.f_ffree = va.f_ffree;
6475 }
6476
6477 /* this is unlikely to change, but has to be queried for */
6478 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
6479 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
6480 }
6481
6482 return 0;
6483 }
6484
6485 int
6486 mount_list_add(mount_t mp)
6487 {
6488 int res;
6489
6490 mount_list_lock();
6491 if (get_system_inshutdown() != 0) {
6492 res = -1;
6493 } else {
6494 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
6495 nummounts++;
6496 res = 0;
6497 }
6498 mount_list_unlock();
6499
6500 return res;
6501 }
6502
6503 void
6504 mount_list_remove(mount_t mp)
6505 {
6506 mount_list_lock();
6507 TAILQ_REMOVE(&mountlist, mp, mnt_list);
6508 nummounts--;
6509 mp->mnt_list.tqe_next = NULL;
6510 mp->mnt_list.tqe_prev = NULL;
6511 mount_list_unlock();
6512 }
6513
6514 mount_t
6515 mount_lookupby_volfsid(int volfs_id, int withref)
6516 {
6517 mount_t cur_mount = (mount_t)0;
6518 mount_t mp;
6519
6520 mount_list_lock();
6521 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
6522 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
6523 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
6524 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
6525 cur_mount = mp;
6526 if (withref) {
6527 if (mount_iterref(cur_mount, 1)) {
6528 cur_mount = (mount_t)0;
6529 mount_list_unlock();
6530 goto out;
6531 }
6532 }
6533 break;
6534 }
6535 }
6536 mount_list_unlock();
6537 if (withref && (cur_mount != (mount_t)0)) {
6538 mp = cur_mount;
6539 if (vfs_busy(mp, LK_NOWAIT) != 0) {
6540 cur_mount = (mount_t)0;
6541 }
6542 mount_iterdrop(mp);
6543 }
6544 out:
6545 return cur_mount;
6546 }
6547
6548 mount_t
6549 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
6550 {
6551 mount_t retmp = (mount_t)0;
6552 mount_t mp;
6553
6554 if (!locked) {
6555 mount_list_lock();
6556 }
6557 TAILQ_FOREACH(mp, &mountlist, mnt_list)
6558 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
6559 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
6560 retmp = mp;
6561 if (withref) {
6562 if (mount_iterref(retmp, 1)) {
6563 retmp = (mount_t)0;
6564 }
6565 }
6566 goto out;
6567 }
6568 out:
6569 if (!locked) {
6570 mount_list_unlock();
6571 }
6572 return retmp;
6573 }
6574
6575 errno_t
6576 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
6577 vnode_t start_dvp)
6578 {
6579 struct nameidata *ndp;
6580 int error = 0;
6581 u_int32_t ndflags = 0;
6582
6583 if (ctx == NULL) {
6584 return EINVAL;
6585 }
6586
6587 ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
6588 if (!ndp) {
6589 return ENOMEM;
6590 }
6591
6592 if (flags & VNODE_LOOKUP_NOFOLLOW) {
6593 ndflags = NOFOLLOW;
6594 } else {
6595 ndflags = FOLLOW;
6596 }
6597
6598 if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
6599 ndflags |= NOCROSSMOUNT;
6600 }
6601
6602 if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6603 ndflags |= CN_NBMOUNTLOOK;
6604 }
6605
6606 /* XXX AUDITVNPATH1 needed ? */
6607 NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
6608 CAST_USER_ADDR_T(path), ctx);
6609
6610 if (start_dvp && (path[0] != '/')) {
6611 ndp->ni_dvp = start_dvp;
6612 ndp->ni_cnd.cn_flags |= USEDVP;
6613 }
6614
6615 if ((error = namei(ndp))) {
6616 goto out_free;
6617 }
6618
6619 ndp->ni_cnd.cn_flags &= ~USEDVP;
6620
6621 *vpp = ndp->ni_vp;
6622 nameidone(ndp);
6623
6624 out_free:
6625 kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata));
6626 return error;
6627 }
6628
6629 errno_t
6630 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
6631 {
6632 return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
6633 }
6634
6635 errno_t
6636 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
6637 {
6638 struct nameidata *ndp = NULL;
6639 int error;
6640 u_int32_t ndflags = 0;
6641 int lflags = flags;
6642
6643 if (ctx == NULL) { /* XXX technically an error */
6644 ctx = vfs_context_current();
6645 }
6646
6647 ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
6648 if (!ndp) {
6649 return ENOMEM;
6650 }
6651
6652 if (fmode & O_NOFOLLOW) {
6653 lflags |= VNODE_LOOKUP_NOFOLLOW;
6654 }
6655
6656 if (lflags & VNODE_LOOKUP_NOFOLLOW) {
6657 ndflags = NOFOLLOW;
6658 } else {
6659 ndflags = FOLLOW;
6660 }
6661
6662 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
6663 ndflags |= NOCROSSMOUNT;
6664 }
6665
6666 if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6667 ndflags |= CN_NBMOUNTLOOK;
6668 }
6669
6670 /* XXX AUDITVNPATH1 needed ? */
6671 NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
6672 CAST_USER_ADDR_T(path), ctx);
6673
6674 if ((error = vn_open(ndp, fmode, cmode))) {
6675 *vpp = NULL;
6676 } else {
6677 *vpp = ndp->ni_vp;
6678 }
6679
6680 kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata));
6681 return error;
6682 }
6683
6684 errno_t
6685 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
6686 {
6687 int error;
6688
6689 if (ctx == NULL) {
6690 ctx = vfs_context_current();
6691 }
6692
6693 error = vn_close(vp, flags, ctx);
6694 vnode_put(vp);
6695 return error;
6696 }
6697
6698 errno_t
6699 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
6700 {
6701 struct vnode_attr va;
6702 int error;
6703
6704 VATTR_INIT(&va);
6705 VATTR_WANTED(&va, va_modify_time);
6706 error = vnode_getattr(vp, &va, ctx);
6707 if (!error) {
6708 *mtime = va.va_modify_time;
6709 }
6710 return error;
6711 }
6712
6713 errno_t
6714 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
6715 {
6716 struct vnode_attr va;
6717 int error;
6718
6719 VATTR_INIT(&va);
6720 VATTR_WANTED(&va, va_flags);
6721 error = vnode_getattr(vp, &va, ctx);
6722 if (!error) {
6723 *flags = va.va_flags;
6724 }
6725 return error;
6726 }
6727
6728 /*
6729 * Returns: 0 Success
6730 * vnode_getattr:???
6731 */
6732 errno_t
6733 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
6734 {
6735 struct vnode_attr va;
6736 int error;
6737
6738 VATTR_INIT(&va);
6739 VATTR_WANTED(&va, va_data_size);
6740 error = vnode_getattr(vp, &va, ctx);
6741 if (!error) {
6742 *sizep = va.va_data_size;
6743 }
6744 return error;
6745 }
6746
6747 errno_t
6748 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
6749 {
6750 struct vnode_attr va;
6751
6752 VATTR_INIT(&va);
6753 VATTR_SET(&va, va_data_size, size);
6754 va.va_vaflags = ioflag & 0xffff;
6755 return vnode_setattr(vp, &va, ctx);
6756 }
6757
6758 int
6759 vnode_setdirty(vnode_t vp)
6760 {
6761 vnode_lock_spin(vp);
6762 vp->v_flag |= VISDIRTY;
6763 vnode_unlock(vp);
6764 return 0;
6765 }
6766
6767 int
6768 vnode_cleardirty(vnode_t vp)
6769 {
6770 vnode_lock_spin(vp);
6771 vp->v_flag &= ~VISDIRTY;
6772 vnode_unlock(vp);
6773 return 0;
6774 }
6775
6776 int
6777 vnode_isdirty(vnode_t vp)
6778 {
6779 int dirty;
6780
6781 vnode_lock_spin(vp);
6782 dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
6783 vnode_unlock(vp);
6784
6785 return dirty;
6786 }
6787
6788 static int
6789 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6790 {
6791 /* Only use compound VNOP for compound operation */
6792 if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
6793 *vpp = NULLVP;
6794 return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
6795 } else {
6796 return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
6797 }
6798 }
6799
6800 /*
6801 * Create a filesystem object of arbitrary type with arbitrary attributes in
6802 * the spevied directory with the specified name.
6803 *
6804 * Parameters: dvp Pointer to the vnode of the directory
6805 * in which to create the object.
6806 * vpp Pointer to the area into which to
6807 * return the vnode of the created object.
6808 * cnp Component name pointer from the namei
6809 * data structure, containing the name to
6810 * use for the create object.
6811 * vap Pointer to the vnode_attr structure
6812 * describing the object to be created,
6813 * including the type of object.
6814 * flags VN_* flags controlling ACL inheritance
6815 * and whether or not authorization is to
6816 * be required for the operation.
6817 *
6818 * Returns: 0 Success
6819 * !0 errno value
6820 *
6821 * Implicit: *vpp Contains the vnode of the object that
6822 * was created, if successful.
6823 * *cnp May be modified by the underlying VFS.
6824 * *vap May be modified by the underlying VFS.
6825 * modified by either ACL inheritance or
6826 *
6827 *
6828 * be modified, even if the operation is
6829 *
6830 *
6831 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
6832 *
6833 * Modification of '*cnp' and '*vap' by the underlying VFS is
6834 * strongly discouraged.
6835 *
6836 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
6837 *
6838 * XXX: We should enummerate the possible errno values here, and where
6839 * in the code they originated.
6840 */
6841 errno_t
6842 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6843 {
6844 errno_t error, old_error;
6845 vnode_t vp = (vnode_t)0;
6846 boolean_t batched;
6847 struct componentname *cnp;
6848 uint32_t defaulted;
6849
6850 cnp = &ndp->ni_cnd;
6851 error = 0;
6852 batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
6853
6854 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
6855
6856 if (flags & VN_CREATE_NOINHERIT) {
6857 vap->va_vaflags |= VA_NOINHERIT;
6858 }
6859 if (flags & VN_CREATE_NOAUTH) {
6860 vap->va_vaflags |= VA_NOAUTH;
6861 }
6862 /*
6863 * Handle ACL inheritance, initialize vap.
6864 */
6865 error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
6866 if (error) {
6867 return error;
6868 }
6869
6870 if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
6871 panic("Open parameters, but not a regular file.");
6872 }
6873 if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
6874 panic("Mode for open, but not trying to open...");
6875 }
6876
6877
6878 /*
6879 * Create the requested node.
6880 */
6881 switch (vap->va_type) {
6882 case VREG:
6883 error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
6884 break;
6885 case VDIR:
6886 error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
6887 break;
6888 case VSOCK:
6889 case VFIFO:
6890 case VBLK:
6891 case VCHR:
6892 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
6893 break;
6894 default:
6895 panic("vnode_create: unknown vtype %d", vap->va_type);
6896 }
6897 if (error != 0) {
6898 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
6899 goto out;
6900 }
6901
6902 vp = *vpp;
6903 old_error = error;
6904
6905 /*
6906 * If some of the requested attributes weren't handled by the VNOP,
6907 * use our fallback code.
6908 */
6909 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
6910 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
6911 error = vnode_setattr_fallback(*vpp, vap, ctx);
6912 }
6913
6914 #if CONFIG_MACF
6915 if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
6916 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
6917 }
6918 #endif
6919
6920 if ((error != 0) && (vp != (vnode_t)0)) {
6921 /* If we've done a compound open, close */
6922 if (batched && (old_error == 0) && (vap->va_type == VREG)) {
6923 VNOP_CLOSE(vp, fmode, ctx);
6924 }
6925
6926 /* Need to provide notifications if a create succeeded */
6927 if (!batched) {
6928 *vpp = (vnode_t) 0;
6929 vnode_put(vp);
6930 vp = NULLVP;
6931 }
6932 }
6933
6934 /*
6935 * For creation VNOPs, this is the equivalent of
6936 * lookup_handle_found_vnode.
6937 */
6938 if (kdebug_enable && *vpp) {
6939 kdebug_lookup(*vpp, cnp);
6940 }
6941
6942 out:
6943 vn_attribute_cleanup(vap, defaulted);
6944
6945 return error;
6946 }
6947
6948 static kauth_scope_t vnode_scope;
6949 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
6950 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
6951 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
6952 vnode_t vp, vnode_t dvp, int *errorp);
6953
6954 typedef struct _vnode_authorize_context {
6955 vnode_t vp;
6956 struct vnode_attr *vap;
6957 vnode_t dvp;
6958 struct vnode_attr *dvap;
6959 vfs_context_t ctx;
6960 int flags;
6961 int flags_valid;
6962 #define _VAC_IS_OWNER (1<<0)
6963 #define _VAC_IN_GROUP (1<<1)
6964 #define _VAC_IS_DIR_OWNER (1<<2)
6965 #define _VAC_IN_DIR_GROUP (1<<3)
6966 #define _VAC_NO_VNODE_POINTERS (1<<4)
6967 } *vauth_ctx;
6968
6969 void
6970 vnode_authorize_init(void)
6971 {
6972 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
6973 }
6974
6975 #define VATTR_PREPARE_DEFAULTED_UID 0x1
6976 #define VATTR_PREPARE_DEFAULTED_GID 0x2
6977 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
6978
6979 int
6980 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
6981 {
6982 kauth_acl_t nacl = NULL, oacl = NULL;
6983 int error;
6984
6985 /*
6986 * Handle ACL inheritance.
6987 */
6988 if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
6989 /* save the original filesec */
6990 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6991 oacl = vap->va_acl;
6992 }
6993
6994 vap->va_acl = NULL;
6995 if ((error = kauth_acl_inherit(dvp,
6996 oacl,
6997 &nacl,
6998 vap->va_type == VDIR,
6999 ctx)) != 0) {
7000 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
7001 return error;
7002 }
7003
7004 /*
7005 * If the generated ACL is NULL, then we can save ourselves some effort
7006 * by clearing the active bit.
7007 */
7008 if (nacl == NULL) {
7009 VATTR_CLEAR_ACTIVE(vap, va_acl);
7010 } else {
7011 vap->va_base_acl = oacl;
7012 VATTR_SET(vap, va_acl, nacl);
7013 }
7014 }
7015
7016 error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
7017 if (error) {
7018 vn_attribute_cleanup(vap, *defaulted_fieldsp);
7019 }
7020
7021 return error;
7022 }
7023
7024 void
7025 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
7026 {
7027 /*
7028 * If the caller supplied a filesec in vap, it has been replaced
7029 * now by the post-inheritance copy. We need to put the original back
7030 * and free the inherited product.
7031 */
7032 kauth_acl_t nacl, oacl;
7033
7034 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7035 nacl = vap->va_acl;
7036 oacl = vap->va_base_acl;
7037
7038 if (oacl) {
7039 VATTR_SET(vap, va_acl, oacl);
7040 vap->va_base_acl = NULL;
7041 } else {
7042 VATTR_CLEAR_ACTIVE(vap, va_acl);
7043 }
7044
7045 if (nacl != NULL) {
7046 kauth_acl_free(nacl);
7047 }
7048 }
7049
7050 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
7051 VATTR_CLEAR_ACTIVE(vap, va_mode);
7052 }
7053 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
7054 VATTR_CLEAR_ACTIVE(vap, va_gid);
7055 }
7056 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
7057 VATTR_CLEAR_ACTIVE(vap, va_uid);
7058 }
7059
7060 return;
7061 }
7062
7063 int
7064 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
7065 {
7066 #if !CONFIG_MACF
7067 #pragma unused(cnp)
7068 #endif
7069 int error = 0;
7070
7071 /*
7072 * Normally, unlinking of directories is not supported.
7073 * However, some file systems may have limited support.
7074 */
7075 if ((vp->v_type == VDIR) &&
7076 !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
7077 return EPERM; /* POSIX */
7078 }
7079
7080 /* authorize the delete operation */
7081 #if CONFIG_MACF
7082 if (!error) {
7083 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
7084 }
7085 #endif /* MAC */
7086 if (!error) {
7087 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7088 }
7089
7090 return error;
7091 }
7092
7093 int
7094 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
7095 {
7096 /* Open of existing case */
7097 kauth_action_t action;
7098 int error = 0;
7099 if (cnp->cn_ndp == NULL) {
7100 panic("NULL ndp");
7101 }
7102 if (reserved != NULL) {
7103 panic("reserved not NULL.");
7104 }
7105
7106 #if CONFIG_MACF
7107 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
7108 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
7109 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
7110 if (error) {
7111 return error;
7112 }
7113 }
7114 #endif
7115
7116 if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
7117 return ENOTDIR;
7118 }
7119
7120 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
7121 return EOPNOTSUPP; /* Operation not supported on socket */
7122 }
7123
7124 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
7125 return ELOOP; /* O_NOFOLLOW was specified and the target is a symbolic link */
7126 }
7127
7128 /* disallow write operations on directories */
7129 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
7130 return EISDIR;
7131 }
7132
7133 if ((cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH)) {
7134 if (vp->v_type != VDIR) {
7135 return ENOTDIR;
7136 }
7137 }
7138
7139 #if CONFIG_MACF
7140 /* If a file being opened is a shadow file containing
7141 * namedstream data, ignore the macf checks because it
7142 * is a kernel internal file and access should always
7143 * be allowed.
7144 */
7145 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
7146 error = mac_vnode_check_open(ctx, vp, fmode);
7147 if (error) {
7148 return error;
7149 }
7150 }
7151 #endif
7152
7153 /* compute action to be authorized */
7154 action = 0;
7155 if (fmode & FREAD) {
7156 action |= KAUTH_VNODE_READ_DATA;
7157 }
7158 if (fmode & (FWRITE | O_TRUNC)) {
7159 /*
7160 * If we are writing, appending, and not truncating,
7161 * indicate that we are appending so that if the
7162 * UF_APPEND or SF_APPEND bits are set, we do not deny
7163 * the open.
7164 */
7165 if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
7166 action |= KAUTH_VNODE_APPEND_DATA;
7167 } else {
7168 action |= KAUTH_VNODE_WRITE_DATA;
7169 }
7170 }
7171 error = vnode_authorize(vp, NULL, action, ctx);
7172 #if NAMEDSTREAMS
7173 if (error == EACCES) {
7174 /*
7175 * Shadow files may exist on-disk with a different UID/GID
7176 * than that of the current context. Verify that this file
7177 * is really a shadow file. If it was created successfully
7178 * then it should be authorized.
7179 */
7180 if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
7181 error = vnode_verifynamedstream(vp);
7182 }
7183 }
7184 #endif
7185
7186 return error;
7187 }
7188
7189 int
7190 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7191 {
7192 #if !CONFIG_MACF
7193 #pragma unused(vap)
7194 #endif
7195 /* Creation case */
7196 int error;
7197
7198 if (cnp->cn_ndp == NULL) {
7199 panic("NULL cn_ndp");
7200 }
7201 if (reserved != NULL) {
7202 panic("reserved not NULL.");
7203 }
7204
7205 /* Only validate path for creation if we didn't do a complete lookup */
7206 if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
7207 error = lookup_validate_creation_path(cnp->cn_ndp);
7208 if (error) {
7209 return error;
7210 }
7211 }
7212
7213 #if CONFIG_MACF
7214 error = mac_vnode_check_create(ctx, dvp, cnp, vap);
7215 if (error) {
7216 return error;
7217 }
7218 #endif /* CONFIG_MACF */
7219
7220 return vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx);
7221 }
7222
7223 int
7224 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7225 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7226 vfs_context_t ctx, void *reserved)
7227 {
7228 return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
7229 }
7230
7231 int
7232 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7233 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7234 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7235 {
7236 return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
7237 }
7238
7239 int
7240 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
7241 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
7242 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7243 {
7244 int error = 0;
7245 int moving = 0;
7246 bool swap = flags & VFS_RENAME_SWAP;
7247
7248 if (reserved != NULL) {
7249 panic("Passed something other than NULL as reserved field!");
7250 }
7251
7252 /*
7253 * Avoid renaming "." and "..".
7254 *
7255 * XXX No need to check for this in the FS. We should always have the leaves
7256 * in VFS in this case.
7257 */
7258 if (fvp->v_type == VDIR &&
7259 ((fdvp == fvp) ||
7260 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
7261 ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
7262 error = EINVAL;
7263 goto out;
7264 }
7265
7266 if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
7267 error = lookup_validate_creation_path(tcnp->cn_ndp);
7268 if (error) {
7269 goto out;
7270 }
7271 }
7272
7273 /***** <MACF> *****/
7274 #if CONFIG_MACF
7275 error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
7276 if (error) {
7277 goto out;
7278 }
7279 if (swap) {
7280 error = mac_vnode_check_rename(ctx, tdvp, tvp, tcnp, fdvp, fvp, fcnp);
7281 if (error) {
7282 goto out;
7283 }
7284 }
7285 #endif
7286 /***** </MACF> *****/
7287
7288 /***** <MiscChecks> *****/
7289 if (tvp != NULL) {
7290 if (!swap) {
7291 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
7292 error = ENOTDIR;
7293 goto out;
7294 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
7295 error = EISDIR;
7296 goto out;
7297 }
7298 }
7299 } else if (swap) {
7300 /*
7301 * Caller should have already checked this and returned
7302 * ENOENT. If we send back ENOENT here, caller will retry
7303 * which isn't what we want so we send back EINVAL here
7304 * instead.
7305 */
7306 error = EINVAL;
7307 goto out;
7308 }
7309
7310 if (fvp == tdvp) {
7311 error = EINVAL;
7312 goto out;
7313 }
7314
7315 /*
7316 * The following edge case is caught here:
7317 * (to cannot be a descendent of from)
7318 *
7319 * o fdvp
7320 * /
7321 * /
7322 * o fvp
7323 * \
7324 * \
7325 * o tdvp
7326 * /
7327 * /
7328 * o tvp
7329 */
7330 if (tdvp->v_parent == fvp) {
7331 error = EINVAL;
7332 goto out;
7333 }
7334
7335 if (swap && fdvp->v_parent == tvp) {
7336 error = EINVAL;
7337 goto out;
7338 }
7339 /***** </MiscChecks> *****/
7340
7341 /***** <Kauth> *****/
7342
7343 /*
7344 * As part of the Kauth step, we call out to allow 3rd-party
7345 * fileop notification of "about to rename". This is needed
7346 * in the event that 3rd-parties need to know that the DELETE
7347 * authorization is actually part of a rename. It's important
7348 * that we guarantee that the DELETE call-out will always be
7349 * made if the WILL_RENAME call-out is made. Another fileop
7350 * call-out will be performed once the operation is completed.
7351 * We can ignore the result of kauth_authorize_fileop().
7352 *
7353 * N.B. We are passing the vnode and *both* paths to each
7354 * call; kauth_authorize_fileop() extracts the "from" path
7355 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
7356 * As such, we only post these notifications if all of the
7357 * information we need is provided.
7358 */
7359
7360 if (swap) {
7361 kauth_action_t f = 0, t = 0;
7362
7363 /*
7364 * Directories changing parents need ...ADD_SUBDIR... to
7365 * permit changing ".."
7366 */
7367 if (fdvp != tdvp) {
7368 if (vnode_isdir(fvp)) {
7369 f = KAUTH_VNODE_ADD_SUBDIRECTORY;
7370 }
7371 if (vnode_isdir(tvp)) {
7372 t = KAUTH_VNODE_ADD_SUBDIRECTORY;
7373 }
7374 }
7375 if (to_path != NULL) {
7376 kauth_authorize_fileop(vfs_context_ucred(ctx),
7377 KAUTH_FILEOP_WILL_RENAME,
7378 (uintptr_t)fvp,
7379 (uintptr_t)to_path);
7380 }
7381 error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
7382 if (error) {
7383 goto out;
7384 }
7385 if (from_path != NULL) {
7386 kauth_authorize_fileop(vfs_context_ucred(ctx),
7387 KAUTH_FILEOP_WILL_RENAME,
7388 (uintptr_t)tvp,
7389 (uintptr_t)from_path);
7390 }
7391 error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
7392 if (error) {
7393 goto out;
7394 }
7395 f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7396 t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7397 if (fdvp == tdvp) {
7398 error = vnode_authorize(fdvp, NULL, f | t, ctx);
7399 } else {
7400 error = vnode_authorize(fdvp, NULL, t, ctx);
7401 if (error) {
7402 goto out;
7403 }
7404 error = vnode_authorize(tdvp, NULL, f, ctx);
7405 }
7406 if (error) {
7407 goto out;
7408 }
7409 } else {
7410 error = 0;
7411 if ((tvp != NULL) && vnode_isdir(tvp)) {
7412 if (tvp != fdvp) {
7413 moving = 1;
7414 }
7415 } else if (tdvp != fdvp) {
7416 moving = 1;
7417 }
7418
7419 /*
7420 * must have delete rights to remove the old name even in
7421 * the simple case of fdvp == tdvp.
7422 *
7423 * If fvp is a directory, and we are changing it's parent,
7424 * then we also need rights to rewrite its ".." entry as well.
7425 */
7426 if (to_path != NULL) {
7427 kauth_authorize_fileop(vfs_context_ucred(ctx),
7428 KAUTH_FILEOP_WILL_RENAME,
7429 (uintptr_t)fvp,
7430 (uintptr_t)to_path);
7431 }
7432 if (vnode_isdir(fvp)) {
7433 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7434 goto out;
7435 }
7436 } else {
7437 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
7438 goto out;
7439 }
7440 }
7441 if (moving) {
7442 /* moving into tdvp or tvp, must have rights to add */
7443 if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
7444 NULL,
7445 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
7446 ctx)) != 0) {
7447 goto out;
7448 }
7449 } else {
7450 /* node staying in same directory, must be allowed to add new name */
7451 if ((error = vnode_authorize(fdvp, NULL,
7452 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
7453 goto out;
7454 }
7455 }
7456 /* overwriting tvp */
7457 if ((tvp != NULL) && !vnode_isdir(tvp) &&
7458 ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
7459 goto out;
7460 }
7461 }
7462
7463 /***** </Kauth> *****/
7464
7465 /* XXX more checks? */
7466 out:
7467 return error;
7468 }
7469
7470 int
7471 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7472 {
7473 #if !CONFIG_MACF
7474 #pragma unused(vap)
7475 #endif
7476 int error;
7477
7478 if (reserved != NULL) {
7479 panic("reserved not NULL in vn_authorize_mkdir()");
7480 }
7481
7482 /* XXX A hack for now, to make shadow files work */
7483 if (cnp->cn_ndp == NULL) {
7484 return 0;
7485 }
7486
7487 if (vnode_compound_mkdir_available(dvp)) {
7488 error = lookup_validate_creation_path(cnp->cn_ndp);
7489 if (error) {
7490 goto out;
7491 }
7492 }
7493
7494 #if CONFIG_MACF
7495 error = mac_vnode_check_create(ctx,
7496 dvp, cnp, vap);
7497 if (error) {
7498 goto out;
7499 }
7500 #endif
7501
7502 /* authorize addition of a directory to the parent */
7503 if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7504 goto out;
7505 }
7506
7507 out:
7508 return error;
7509 }
7510
7511 int
7512 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
7513 {
7514 #if CONFIG_MACF
7515 int error;
7516 #else
7517 #pragma unused(cnp)
7518 #endif
7519 if (reserved != NULL) {
7520 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
7521 }
7522
7523 if (vp->v_type != VDIR) {
7524 /*
7525 * rmdir only deals with directories
7526 */
7527 return ENOTDIR;
7528 }
7529
7530 if (dvp == vp) {
7531 /*
7532 * No rmdir "." please.
7533 */
7534 return EINVAL;
7535 }
7536
7537 #if CONFIG_MACF
7538 error = mac_vnode_check_unlink(ctx, dvp,
7539 vp, cnp);
7540 if (error) {
7541 return error;
7542 }
7543 #endif
7544
7545 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7546 }
7547
7548 /*
7549 * Authorizer for directory cloning. This does not use vnodes but instead
7550 * uses prefilled vnode attributes from the filesystem.
7551 *
7552 * The same function is called to set up the attributes required, perform the
7553 * authorization and cleanup (if required)
7554 */
7555 int
7556 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
7557 struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
7558 dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
7559 __unused void *reserved)
7560 {
7561 int error;
7562 int is_suser = vfs_context_issuser(ctx);
7563
7564 if (vattr_op == OP_VATTR_SETUP) {
7565 VATTR_INIT(vap);
7566
7567 /*
7568 * When ACL inheritence is implemented, both vap->va_acl and
7569 * dvap->va_acl will be required (even as superuser).
7570 */
7571 VATTR_WANTED(vap, va_type);
7572 VATTR_WANTED(vap, va_mode);
7573 VATTR_WANTED(vap, va_flags);
7574 VATTR_WANTED(vap, va_uid);
7575 VATTR_WANTED(vap, va_gid);
7576 if (dvap) {
7577 VATTR_INIT(dvap);
7578 VATTR_WANTED(dvap, va_flags);
7579 }
7580
7581 if (!is_suser) {
7582 /*
7583 * If not superuser, we have to evaluate ACLs and
7584 * need the target directory gid to set the initial
7585 * gid of the new object.
7586 */
7587 VATTR_WANTED(vap, va_acl);
7588 if (dvap) {
7589 VATTR_WANTED(dvap, va_gid);
7590 }
7591 } else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7592 VATTR_WANTED(dvap, va_gid);
7593 }
7594 return 0;
7595 } else if (vattr_op == OP_VATTR_CLEANUP) {
7596 return 0; /* Nothing to do for now */
7597 }
7598
7599 /* dvap isn't used for authorization */
7600 error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
7601
7602 if (error) {
7603 return error;
7604 }
7605
7606 /*
7607 * vn_attribute_prepare should be able to accept attributes as well as
7608 * vnodes but for now we do this inline.
7609 */
7610 if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7611 /*
7612 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
7613 * owner is set, that owner takes ownership of all new files.
7614 */
7615 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7616 (mp->mnt_fsowner != KAUTH_UID_NONE)) {
7617 VATTR_SET(vap, va_uid, mp->mnt_fsowner);
7618 } else {
7619 /* default owner is current user */
7620 VATTR_SET(vap, va_uid,
7621 kauth_cred_getuid(vfs_context_ucred(ctx)));
7622 }
7623
7624 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7625 (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
7626 VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
7627 } else {
7628 /*
7629 * default group comes from parent object,
7630 * fallback to current user
7631 */
7632 if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
7633 VATTR_SET(vap, va_gid, dvap->va_gid);
7634 } else {
7635 VATTR_SET(vap, va_gid,
7636 kauth_cred_getgid(vfs_context_ucred(ctx)));
7637 }
7638 }
7639 }
7640
7641 /* Inherit SF_RESTRICTED bit from destination directory only */
7642 if (VATTR_IS_ACTIVE(vap, va_flags)) {
7643 VATTR_SET(vap, va_flags,
7644 ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
7645 if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7646 VATTR_SET(vap, va_flags,
7647 vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7648 }
7649 } else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7650 VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7651 }
7652
7653 return 0;
7654 }
7655
7656
7657 /*
7658 * Authorize an operation on a vnode.
7659 *
7660 * This is KPI, but here because it needs vnode_scope.
7661 *
7662 * Returns: 0 Success
7663 * kauth_authorize_action:EPERM ...
7664 * xlate => EACCES Permission denied
7665 * kauth_authorize_action:0 Success
7666 * kauth_authorize_action: Depends on callback return; this is
7667 * usually only vnode_authorize_callback(),
7668 * but may include other listerners, if any
7669 * exist.
7670 * EROFS
7671 * EACCES
7672 * EPERM
7673 * ???
7674 */
7675 int
7676 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
7677 {
7678 int error, result;
7679
7680 /*
7681 * We can't authorize against a dead vnode; allow all operations through so that
7682 * the correct error can be returned.
7683 */
7684 if (vp->v_type == VBAD) {
7685 return 0;
7686 }
7687
7688 error = 0;
7689 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
7690 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
7691 if (result == EPERM) { /* traditional behaviour */
7692 result = EACCES;
7693 }
7694 /* did the lower layers give a better error return? */
7695 if ((result != 0) && (error != 0)) {
7696 return error;
7697 }
7698 return result;
7699 }
7700
7701 /*
7702 * Test for vnode immutability.
7703 *
7704 * The 'append' flag is set when the authorization request is constrained
7705 * to operations which only request the right to append to a file.
7706 *
7707 * The 'ignore' flag is set when an operation modifying the immutability flags
7708 * is being authorized. We check the system securelevel to determine which
7709 * immutability flags we can ignore.
7710 */
7711 static int
7712 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
7713 {
7714 int mask;
7715
7716 /* start with all bits precluding the operation */
7717 mask = IMMUTABLE | APPEND;
7718
7719 /* if appending only, remove the append-only bits */
7720 if (append) {
7721 mask &= ~APPEND;
7722 }
7723
7724 /* ignore only set when authorizing flags changes */
7725 if (ignore) {
7726 if (securelevel <= 0) {
7727 /* in insecure state, flags do not inhibit changes */
7728 mask = 0;
7729 } else {
7730 /* in secure state, user flags don't inhibit */
7731 mask &= ~(UF_IMMUTABLE | UF_APPEND);
7732 }
7733 }
7734 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
7735 if ((vap->va_flags & mask) != 0) {
7736 return EPERM;
7737 }
7738 return 0;
7739 }
7740
7741 static int
7742 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
7743 {
7744 int result;
7745
7746 /* default assumption is not-owner */
7747 result = 0;
7748
7749 /*
7750 * If the filesystem has given us a UID, we treat this as authoritative.
7751 */
7752 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
7753 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
7754 }
7755 /* we could test the owner UUID here if we had a policy for it */
7756
7757 return result;
7758 }
7759
7760 /*
7761 * vauth_node_group
7762 *
7763 * Description: Ask if a cred is a member of the group owning the vnode object
7764 *
7765 * Parameters: vap vnode attribute
7766 * vap->va_gid group owner of vnode object
7767 * cred credential to check
7768 * ismember pointer to where to put the answer
7769 * idontknow Return this if we can't get an answer
7770 *
7771 * Returns: 0 Success
7772 * idontknow Can't get information
7773 * kauth_cred_ismember_gid:? Error from kauth subsystem
7774 * kauth_cred_ismember_gid:? Error from kauth subsystem
7775 */
7776 static int
7777 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
7778 {
7779 int error;
7780 int result;
7781
7782 error = 0;
7783 result = 0;
7784
7785 /*
7786 * The caller is expected to have asked the filesystem for a group
7787 * at some point prior to calling this function. The answer may
7788 * have been that there is no group ownership supported for the
7789 * vnode object, in which case we return
7790 */
7791 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
7792 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
7793 /*
7794 * Credentials which are opted into external group membership
7795 * resolution which are not known to the external resolver
7796 * will result in an ENOENT error. We translate this into
7797 * the appropriate 'idontknow' response for our caller.
7798 *
7799 * XXX We do not make a distinction here between an ENOENT
7800 * XXX arising from a response from the external resolver,
7801 * XXX and an ENOENT which is internally generated. This is
7802 * XXX a deficiency of the published kauth_cred_ismember_gid()
7803 * XXX KPI which can not be overcome without new KPI. For
7804 * XXX all currently known cases, however, this wil result
7805 * XXX in correct behaviour.
7806 */
7807 if (error == ENOENT) {
7808 error = idontknow;
7809 }
7810 }
7811 /*
7812 * XXX We could test the group UUID here if we had a policy for it,
7813 * XXX but this is problematic from the perspective of synchronizing
7814 * XXX group UUID and POSIX GID ownership of a file and keeping the
7815 * XXX values coherent over time. The problem is that the local
7816 * XXX system will vend transient group UUIDs for unknown POSIX GID
7817 * XXX values, and these are not persistent, whereas storage of values
7818 * XXX is persistent. One potential solution to this is a local
7819 * XXX (persistent) replica of remote directory entries and vended
7820 * XXX local ids in a local directory server (think in terms of a
7821 * XXX caching DNS server).
7822 */
7823
7824 if (!error) {
7825 *ismember = result;
7826 }
7827 return error;
7828 }
7829
7830 static int
7831 vauth_file_owner(vauth_ctx vcp)
7832 {
7833 int result;
7834
7835 if (vcp->flags_valid & _VAC_IS_OWNER) {
7836 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
7837 } else {
7838 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
7839
7840 /* cache our result */
7841 vcp->flags_valid |= _VAC_IS_OWNER;
7842 if (result) {
7843 vcp->flags |= _VAC_IS_OWNER;
7844 } else {
7845 vcp->flags &= ~_VAC_IS_OWNER;
7846 }
7847 }
7848 return result;
7849 }
7850
7851
7852 /*
7853 * vauth_file_ingroup
7854 *
7855 * Description: Ask if a user is a member of the group owning the directory
7856 *
7857 * Parameters: vcp The vnode authorization context that
7858 * contains the user and directory info
7859 * vcp->flags_valid Valid flags
7860 * vcp->flags Flags values
7861 * vcp->vap File vnode attributes
7862 * vcp->ctx VFS Context (for user)
7863 * ismember pointer to where to put the answer
7864 * idontknow Return this if we can't get an answer
7865 *
7866 * Returns: 0 Success
7867 * vauth_node_group:? Error from vauth_node_group()
7868 *
7869 * Implicit returns: *ismember 0 The user is not a group member
7870 * 1 The user is a group member
7871 */
7872 static int
7873 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
7874 {
7875 int error;
7876
7877 /* Check for a cached answer first, to avoid the check if possible */
7878 if (vcp->flags_valid & _VAC_IN_GROUP) {
7879 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
7880 error = 0;
7881 } else {
7882 /* Otherwise, go look for it */
7883 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
7884
7885 if (!error) {
7886 /* cache our result */
7887 vcp->flags_valid |= _VAC_IN_GROUP;
7888 if (*ismember) {
7889 vcp->flags |= _VAC_IN_GROUP;
7890 } else {
7891 vcp->flags &= ~_VAC_IN_GROUP;
7892 }
7893 }
7894 }
7895 return error;
7896 }
7897
7898 static int
7899 vauth_dir_owner(vauth_ctx vcp)
7900 {
7901 int result;
7902
7903 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
7904 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
7905 } else {
7906 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
7907
7908 /* cache our result */
7909 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
7910 if (result) {
7911 vcp->flags |= _VAC_IS_DIR_OWNER;
7912 } else {
7913 vcp->flags &= ~_VAC_IS_DIR_OWNER;
7914 }
7915 }
7916 return result;
7917 }
7918
7919 /*
7920 * vauth_dir_ingroup
7921 *
7922 * Description: Ask if a user is a member of the group owning the directory
7923 *
7924 * Parameters: vcp The vnode authorization context that
7925 * contains the user and directory info
7926 * vcp->flags_valid Valid flags
7927 * vcp->flags Flags values
7928 * vcp->dvap Dir vnode attributes
7929 * vcp->ctx VFS Context (for user)
7930 * ismember pointer to where to put the answer
7931 * idontknow Return this if we can't get an answer
7932 *
7933 * Returns: 0 Success
7934 * vauth_node_group:? Error from vauth_node_group()
7935 *
7936 * Implicit returns: *ismember 0 The user is not a group member
7937 * 1 The user is a group member
7938 */
7939 static int
7940 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
7941 {
7942 int error;
7943
7944 /* Check for a cached answer first, to avoid the check if possible */
7945 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
7946 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
7947 error = 0;
7948 } else {
7949 /* Otherwise, go look for it */
7950 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
7951
7952 if (!error) {
7953 /* cache our result */
7954 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
7955 if (*ismember) {
7956 vcp->flags |= _VAC_IN_DIR_GROUP;
7957 } else {
7958 vcp->flags &= ~_VAC_IN_DIR_GROUP;
7959 }
7960 }
7961 }
7962 return error;
7963 }
7964
7965 /*
7966 * Test the posix permissions in (vap) to determine whether (credential)
7967 * may perform (action)
7968 */
7969 static int
7970 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
7971 {
7972 struct vnode_attr *vap;
7973 int needed, error, owner_ok, group_ok, world_ok, ismember;
7974 #ifdef KAUTH_DEBUG_ENABLE
7975 const char *where = "uninitialized";
7976 # define _SETWHERE(c) where = c;
7977 #else
7978 # define _SETWHERE(c)
7979 #endif
7980
7981 /* checking file or directory? */
7982 if (on_dir) {
7983 vap = vcp->dvap;
7984 } else {
7985 vap = vcp->vap;
7986 }
7987
7988 error = 0;
7989
7990 /*
7991 * We want to do as little work here as possible. So first we check
7992 * which sets of permissions grant us the access we need, and avoid checking
7993 * whether specific permissions grant access when more generic ones would.
7994 */
7995
7996 /* owner permissions */
7997 needed = 0;
7998 if (action & VREAD) {
7999 needed |= S_IRUSR;
8000 }
8001 if (action & VWRITE) {
8002 needed |= S_IWUSR;
8003 }
8004 if (action & VEXEC) {
8005 needed |= S_IXUSR;
8006 }
8007 owner_ok = (needed & vap->va_mode) == needed;
8008
8009 /* group permissions */
8010 needed = 0;
8011 if (action & VREAD) {
8012 needed |= S_IRGRP;
8013 }
8014 if (action & VWRITE) {
8015 needed |= S_IWGRP;
8016 }
8017 if (action & VEXEC) {
8018 needed |= S_IXGRP;
8019 }
8020 group_ok = (needed & vap->va_mode) == needed;
8021
8022 /* world permissions */
8023 needed = 0;
8024 if (action & VREAD) {
8025 needed |= S_IROTH;
8026 }
8027 if (action & VWRITE) {
8028 needed |= S_IWOTH;
8029 }
8030 if (action & VEXEC) {
8031 needed |= S_IXOTH;
8032 }
8033 world_ok = (needed & vap->va_mode) == needed;
8034
8035 /* If granted/denied by all three, we're done */
8036 if (owner_ok && group_ok && world_ok) {
8037 _SETWHERE("all");
8038 goto out;
8039 }
8040 if (!owner_ok && !group_ok && !world_ok) {
8041 _SETWHERE("all");
8042 error = EACCES;
8043 goto out;
8044 }
8045
8046 /* Check ownership (relatively cheap) */
8047 if ((on_dir && vauth_dir_owner(vcp)) ||
8048 (!on_dir && vauth_file_owner(vcp))) {
8049 _SETWHERE("user");
8050 if (!owner_ok) {
8051 error = EACCES;
8052 }
8053 goto out;
8054 }
8055
8056 /* Not owner; if group and world both grant it we're done */
8057 if (group_ok && world_ok) {
8058 _SETWHERE("group/world");
8059 goto out;
8060 }
8061 if (!group_ok && !world_ok) {
8062 _SETWHERE("group/world");
8063 error = EACCES;
8064 goto out;
8065 }
8066
8067 /* Check group membership (most expensive) */
8068 ismember = 0; /* Default to allow, if the target has no group owner */
8069
8070 /*
8071 * In the case we can't get an answer about the user from the call to
8072 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
8073 * the side of caution, rather than simply granting access, or we will
8074 * fail to correctly implement exclusion groups, so we set the third
8075 * parameter on the basis of the state of 'group_ok'.
8076 */
8077 if (on_dir) {
8078 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8079 } else {
8080 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8081 }
8082 if (error) {
8083 if (!group_ok) {
8084 ismember = 1;
8085 }
8086 error = 0;
8087 }
8088 if (ismember) {
8089 _SETWHERE("group");
8090 if (!group_ok) {
8091 error = EACCES;
8092 }
8093 goto out;
8094 }
8095
8096 /* Not owner, not in group, use world result */
8097 _SETWHERE("world");
8098 if (!world_ok) {
8099 error = EACCES;
8100 }
8101
8102 /* FALLTHROUGH */
8103
8104 out:
8105 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
8106 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
8107 (action & VREAD) ? "r" : "-",
8108 (action & VWRITE) ? "w" : "-",
8109 (action & VEXEC) ? "x" : "-",
8110 needed,
8111 (vap->va_mode & S_IRUSR) ? "r" : "-",
8112 (vap->va_mode & S_IWUSR) ? "w" : "-",
8113 (vap->va_mode & S_IXUSR) ? "x" : "-",
8114 (vap->va_mode & S_IRGRP) ? "r" : "-",
8115 (vap->va_mode & S_IWGRP) ? "w" : "-",
8116 (vap->va_mode & S_IXGRP) ? "x" : "-",
8117 (vap->va_mode & S_IROTH) ? "r" : "-",
8118 (vap->va_mode & S_IWOTH) ? "w" : "-",
8119 (vap->va_mode & S_IXOTH) ? "x" : "-",
8120 kauth_cred_getuid(vcp->ctx->vc_ucred),
8121 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
8122 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
8123 return error;
8124 }
8125
8126 /*
8127 * Authorize the deletion of the node vp from the directory dvp.
8128 *
8129 * We assume that:
8130 * - Neither the node nor the directory are immutable.
8131 * - The user is not the superuser.
8132 *
8133 * The precedence of factors for authorizing or denying delete for a credential
8134 *
8135 * 1) Explicit ACE on the node. (allow or deny DELETE)
8136 * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
8137 *
8138 * If there are conflicting ACEs on the node and the directory, the node
8139 * ACE wins.
8140 *
8141 * 3) Sticky bit on the directory.
8142 * Deletion is not permitted if the directory is sticky and the caller is
8143 * not owner of the node or directory. The sticky bit rules are like a deny
8144 * delete ACE except lower in priority than ACL's either allowing or denying
8145 * delete.
8146 *
8147 * 4) POSIX permisions on the directory.
8148 *
8149 * As an optimization, we cache whether or not delete child is permitted
8150 * on directories. This enables us to skip directory ACL and POSIX checks
8151 * as we already have the result from those checks. However, we always check the
8152 * node ACL and, if the directory has the sticky bit set, we always check its
8153 * ACL (even for a directory with an authorized delete child). Furthermore,
8154 * caching the delete child authorization is independent of the sticky bit
8155 * being set as it is only applicable in determining whether the node can be
8156 * deleted or not.
8157 */
8158 static int
8159 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
8160 {
8161 struct vnode_attr *vap = vcp->vap;
8162 struct vnode_attr *dvap = vcp->dvap;
8163 kauth_cred_t cred = vcp->ctx->vc_ucred;
8164 struct kauth_acl_eval eval;
8165 int error, ismember;
8166
8167 /* Check the ACL on the node first */
8168 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8169 eval.ae_requested = KAUTH_VNODE_DELETE;
8170 eval.ae_acl = &vap->va_acl->acl_ace[0];
8171 eval.ae_count = vap->va_acl->acl_entrycount;
8172 eval.ae_options = 0;
8173 if (vauth_file_owner(vcp)) {
8174 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8175 }
8176 /*
8177 * We use ENOENT as a marker to indicate we could not get
8178 * information in order to delay evaluation until after we
8179 * have the ACL evaluation answer. Previously, we would
8180 * always deny the operation at this point.
8181 */
8182 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8183 return error;
8184 }
8185 if (error == ENOENT) {
8186 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8187 } else if (ismember) {
8188 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8189 }
8190 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8191 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8192 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8193 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8194
8195 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8196 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8197 return error;
8198 }
8199
8200 switch (eval.ae_result) {
8201 case KAUTH_RESULT_DENY:
8202 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
8203 return EACCES;
8204 case KAUTH_RESULT_ALLOW:
8205 KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp);
8206 return 0;
8207 case KAUTH_RESULT_DEFER:
8208 default:
8209 /* Defer to directory */
8210 KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp->vp);
8211 break;
8212 }
8213 }
8214
8215 /*
8216 * Without a sticky bit, a previously authorized delete child is
8217 * sufficient to authorize this delete.
8218 *
8219 * If the sticky bit is set, a directory ACL which allows delete child
8220 * overrides a (potential) sticky bit deny. The authorized delete child
8221 * cannot tell us if it was authorized because of an explicit delete
8222 * child allow ACE or because of POSIX permisions so we have to check
8223 * the directory ACL everytime if the directory has a sticky bit.
8224 */
8225 if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
8226 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
8227 return 0;
8228 }
8229
8230 /* check the ACL on the directory */
8231 if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
8232 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
8233 eval.ae_acl = &dvap->va_acl->acl_ace[0];
8234 eval.ae_count = dvap->va_acl->acl_entrycount;
8235 eval.ae_options = 0;
8236 if (vauth_dir_owner(vcp)) {
8237 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8238 }
8239 /*
8240 * We use ENOENT as a marker to indicate we could not get
8241 * information in order to delay evaluation until after we
8242 * have the ACL evaluation answer. Previously, we would
8243 * always deny the operation at this point.
8244 */
8245 if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8246 return error;
8247 }
8248 if (error == ENOENT) {
8249 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8250 } else if (ismember) {
8251 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8252 }
8253 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8254 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8255 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8256 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8257
8258 /*
8259 * If there is no entry, we are going to defer to other
8260 * authorization mechanisms.
8261 */
8262 error = kauth_acl_evaluate(cred, &eval);
8263
8264 if (error != 0) {
8265 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8266 return error;
8267 }
8268 switch (eval.ae_result) {
8269 case KAUTH_RESULT_DENY:
8270 KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp);
8271 return EACCES;
8272 case KAUTH_RESULT_ALLOW:
8273 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
8274 if (!cached_delete_child && vcp->dvp) {
8275 vnode_cache_authorized_action(vcp->dvp,
8276 vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
8277 }
8278 return 0;
8279 case KAUTH_RESULT_DEFER:
8280 default:
8281 /* Deferred by directory ACL */
8282 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8283 break;
8284 }
8285 }
8286
8287 /*
8288 * From this point, we can't explicitly allow and if we reach the end
8289 * of the function without a denial, then the delete is authorized.
8290 */
8291 if (!cached_delete_child) {
8292 if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
8293 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
8294 return EACCES;
8295 }
8296 /*
8297 * Cache the authorized action on the vnode if allowed by the
8298 * directory ACL or POSIX permissions. It is correct to cache
8299 * this action even if sticky bit would deny deleting the node.
8300 */
8301 if (vcp->dvp) {
8302 vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
8303 KAUTH_VNODE_DELETE_CHILD);
8304 }
8305 }
8306
8307 /* enforce sticky bit behaviour */
8308 if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
8309 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
8310 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
8311 return EACCES;
8312 }
8313
8314 /* not denied, must be OK */
8315 return 0;
8316 }
8317
8318
8319 /*
8320 * Authorize an operation based on the node's attributes.
8321 */
8322 static int
8323 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
8324 {
8325 struct vnode_attr *vap = vcp->vap;
8326 kauth_cred_t cred = vcp->ctx->vc_ucred;
8327 struct kauth_acl_eval eval;
8328 int error, ismember;
8329 mode_t posix_action;
8330
8331 /*
8332 * If we are the file owner, we automatically have some rights.
8333 *
8334 * Do we need to expand this to support group ownership?
8335 */
8336 if (vauth_file_owner(vcp)) {
8337 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
8338 }
8339
8340 /*
8341 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
8342 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
8343 * change ownership to themselves, and WRITE_SECURITY is implicitly
8344 * granted to the owner. We need to do this because at this point
8345 * WRITE_SECURITY may not be granted as the caller is not currently
8346 * the owner.
8347 */
8348 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
8349 (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
8350 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
8351 }
8352
8353 if (acl_rights == 0) {
8354 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
8355 return 0;
8356 }
8357
8358 /* if we have an ACL, evaluate it */
8359 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8360 eval.ae_requested = acl_rights;
8361 eval.ae_acl = &vap->va_acl->acl_ace[0];
8362 eval.ae_count = vap->va_acl->acl_entrycount;
8363 eval.ae_options = 0;
8364 if (vauth_file_owner(vcp)) {
8365 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8366 }
8367 /*
8368 * We use ENOENT as a marker to indicate we could not get
8369 * information in order to delay evaluation until after we
8370 * have the ACL evaluation answer. Previously, we would
8371 * always deny the operation at this point.
8372 */
8373 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8374 return error;
8375 }
8376 if (error == ENOENT) {
8377 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8378 } else if (ismember) {
8379 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8380 }
8381 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8382 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8383 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8384 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8385
8386 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8387 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8388 return error;
8389 }
8390
8391 switch (eval.ae_result) {
8392 case KAUTH_RESULT_DENY:
8393 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
8394 return EACCES; /* deny, deny, counter-allege */
8395 case KAUTH_RESULT_ALLOW:
8396 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
8397 return 0;
8398 case KAUTH_RESULT_DEFER:
8399 default:
8400 /* Effectively the same as !delete_child_denied */
8401 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8402 break;
8403 }
8404
8405 *found_deny = eval.ae_found_deny;
8406
8407 /* fall through and evaluate residual rights */
8408 } else {
8409 /* no ACL, everything is residual */
8410 eval.ae_residual = acl_rights;
8411 }
8412
8413 /*
8414 * Grant residual rights that have been pre-authorized.
8415 */
8416 eval.ae_residual &= ~preauth_rights;
8417
8418 /*
8419 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
8420 */
8421 if (vauth_file_owner(vcp)) {
8422 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
8423 }
8424
8425 if (eval.ae_residual == 0) {
8426 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
8427 return 0;
8428 }
8429
8430 /*
8431 * Bail if we have residual rights that can't be granted by posix permissions,
8432 * or aren't presumed granted at this point.
8433 *
8434 * XXX these can be collapsed for performance
8435 */
8436 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
8437 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
8438 return EACCES;
8439 }
8440 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
8441 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
8442 return EACCES;
8443 }
8444
8445 #if DIAGNOSTIC
8446 if (eval.ae_residual & KAUTH_VNODE_DELETE) {
8447 panic("vnode_authorize: can't be checking delete permission here");
8448 }
8449 #endif
8450
8451 /*
8452 * Compute the fallback posix permissions that will satisfy the remaining
8453 * rights.
8454 */
8455 posix_action = 0;
8456 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
8457 KAUTH_VNODE_LIST_DIRECTORY |
8458 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
8459 posix_action |= VREAD;
8460 }
8461 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
8462 KAUTH_VNODE_ADD_FILE |
8463 KAUTH_VNODE_ADD_SUBDIRECTORY |
8464 KAUTH_VNODE_DELETE_CHILD |
8465 KAUTH_VNODE_WRITE_ATTRIBUTES |
8466 KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
8467 posix_action |= VWRITE;
8468 }
8469 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
8470 KAUTH_VNODE_SEARCH)) {
8471 posix_action |= VEXEC;
8472 }
8473
8474 if (posix_action != 0) {
8475 return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
8476 } else {
8477 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
8478 vcp->vp,
8479 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
8480 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
8481 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
8482 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
8483 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
8484 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
8485 (eval.ae_residual & KAUTH_VNODE_DELETE)
8486 ? " DELETE" : "",
8487 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
8488 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
8489 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
8490 ? " DELETE_CHILD" : "",
8491 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
8492 ? " READ_ATTRIBUTES" : "",
8493 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
8494 ? " WRITE_ATTRIBUTES" : "",
8495 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
8496 ? " READ_EXTATTRIBUTES" : "",
8497 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
8498 ? " WRITE_EXTATTRIBUTES" : "",
8499 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
8500 ? " READ_SECURITY" : "",
8501 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
8502 ? " WRITE_SECURITY" : "",
8503 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
8504 ? " CHECKIMMUTABLE" : "",
8505 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
8506 ? " CHANGE_OWNER" : "");
8507 }
8508
8509 /*
8510 * Lack of required Posix permissions implies no reason to deny access.
8511 */
8512 return 0;
8513 }
8514
8515 /*
8516 * Check for file immutability.
8517 */
8518 static int
8519 vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, int ignore)
8520 {
8521 int error;
8522 int append;
8523
8524 /*
8525 * Perform immutability checks for operations that change data.
8526 *
8527 * Sockets, fifos and devices require special handling.
8528 */
8529 switch (vap->va_type) {
8530 case VSOCK:
8531 case VFIFO:
8532 case VBLK:
8533 case VCHR:
8534 /*
8535 * Writing to these nodes does not change the filesystem data,
8536 * so forget that it's being tried.
8537 */
8538 rights &= ~KAUTH_VNODE_WRITE_DATA;
8539 break;
8540 default:
8541 break;
8542 }
8543
8544 error = 0;
8545 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
8546 /* check per-filesystem options if possible */
8547 if (mp != NULL) {
8548 /* check for no-EA filesystems */
8549 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
8550 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
8551 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vap);
8552 error = EACCES; /* User attributes disabled */
8553 goto out;
8554 }
8555 }
8556
8557 /*
8558 * check for file immutability. first, check if the requested rights are
8559 * allowable for a UF_APPEND file.
8560 */
8561 append = 0;
8562 if (vap->va_type == VDIR) {
8563 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8564 append = 1;
8565 }
8566 } else {
8567 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8568 append = 1;
8569 }
8570 }
8571 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
8572 KAUTH_DEBUG("%p DENIED - file is immutable", vap);
8573 goto out;
8574 }
8575 }
8576 out:
8577 return error;
8578 }
8579
8580 /*
8581 * Handle authorization actions for filesystems that advertise that the
8582 * server will be enforcing.
8583 *
8584 * Returns: 0 Authorization should be handled locally
8585 * 1 Authorization was handled by the FS
8586 *
8587 * Note: Imputed returns will only occur if the authorization request
8588 * was handled by the FS.
8589 *
8590 * Imputed: *resultp, modified Return code from FS when the request is
8591 * handled by the FS.
8592 * VNOP_ACCESS:???
8593 * VNOP_OPEN:???
8594 */
8595 static int
8596 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
8597 {
8598 int error;
8599
8600 /*
8601 * If the vp is a device node, socket or FIFO it actually represents a local
8602 * endpoint, so we need to handle it locally.
8603 */
8604 switch (vp->v_type) {
8605 case VBLK:
8606 case VCHR:
8607 case VSOCK:
8608 case VFIFO:
8609 return 0;
8610 default:
8611 break;
8612 }
8613
8614 /*
8615 * In the advisory request case, if the filesystem doesn't think it's reliable
8616 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
8617 */
8618 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
8619 return 0;
8620 }
8621
8622 /*
8623 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
8624 * VNOP_ACCESS, as most will authorise inline with the actual request.
8625 */
8626 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
8627 *resultp = error;
8628 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
8629 return 1;
8630 }
8631
8632 /*
8633 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
8634 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
8635 */
8636 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
8637 /* try a VNOP_OPEN for readonly access */
8638 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
8639 *resultp = error;
8640 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
8641 return 1;
8642 }
8643 VNOP_CLOSE(vp, FREAD, ctx);
8644 }
8645
8646 /*
8647 * We don't have any reason to believe that the request has to be denied at this point,
8648 * so go ahead and allow it.
8649 */
8650 *resultp = 0;
8651 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
8652 return 1;
8653 }
8654
8655
8656
8657
8658 /*
8659 * Returns: KAUTH_RESULT_ALLOW
8660 * KAUTH_RESULT_DENY
8661 *
8662 * Imputed: *arg3, modified Error code in the deny case
8663 * EROFS Read-only file system
8664 * EACCES Permission denied
8665 * EPERM Operation not permitted [no execute]
8666 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
8667 * vnode_getattr:???
8668 * vnode_authorize_opaque:*arg2 ???
8669 * vnode_authorize_checkimmutable:???
8670 * vnode_authorize_delete:???
8671 * vnode_authorize_simple:???
8672 */
8673
8674
8675 static int
8676 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
8677 kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
8678 uintptr_t arg3)
8679 {
8680 vfs_context_t ctx;
8681 vnode_t cvp = NULLVP;
8682 vnode_t vp, dvp;
8683 int result = KAUTH_RESULT_DENY;
8684 int parent_iocount = 0;
8685 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
8686
8687 ctx = (vfs_context_t)arg0;
8688 vp = (vnode_t)arg1;
8689 dvp = (vnode_t)arg2;
8690
8691 /*
8692 * if there are 2 vnodes passed in, we don't know at
8693 * this point which rights to look at based on the
8694 * combined action being passed in... defer until later...
8695 * otherwise check the kauth 'rights' cache hung
8696 * off of the vnode we're interested in... if we've already
8697 * been granted the right we're currently interested in,
8698 * we can just return success... otherwise we'll go through
8699 * the process of authorizing the requested right(s)... if that
8700 * succeeds, we'll add the right(s) to the cache.
8701 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
8702 */
8703 if (dvp && vp) {
8704 goto defer;
8705 }
8706 if (dvp) {
8707 cvp = dvp;
8708 } else {
8709 /*
8710 * For named streams on local-authorization volumes, rights are cached on the parent;
8711 * authorization is determined by looking at the parent's properties anyway, so storing
8712 * on the parent means that we don't recompute for the named stream and that if
8713 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
8714 * stream to flush its cache separately. If we miss in the cache, then we authorize
8715 * as if there were no cached rights (passing the named stream vnode and desired rights to
8716 * vnode_authorize_callback_int()).
8717 *
8718 * On an opaquely authorized volume, we don't know the relationship between the
8719 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
8720 * on such a volume are authorized directly (rather than using the parent) and have their
8721 * own caches. When a named stream vnode is created, we mark the parent as having a named
8722 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
8723 * find the stream and flush its cache.
8724 */
8725 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
8726 cvp = vnode_getparent(vp);
8727 if (cvp != NULLVP) {
8728 parent_iocount = 1;
8729 } else {
8730 cvp = NULL;
8731 goto defer; /* If we can't use the parent, take the slow path */
8732 }
8733
8734 /* Have to translate some actions */
8735 parent_action = action;
8736 if (parent_action & KAUTH_VNODE_READ_DATA) {
8737 parent_action &= ~KAUTH_VNODE_READ_DATA;
8738 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8739 }
8740 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
8741 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
8742 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8743 }
8744 } else {
8745 cvp = vp;
8746 }
8747 }
8748
8749 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
8750 result = KAUTH_RESULT_ALLOW;
8751 goto out;
8752 }
8753 defer:
8754 result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
8755
8756 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
8757 KAUTH_DEBUG("%p - caching action = %x", cvp, action);
8758 vnode_cache_authorized_action(cvp, ctx, action);
8759 }
8760
8761 out:
8762 if (parent_iocount) {
8763 vnode_put(cvp);
8764 }
8765
8766 return result;
8767 }
8768
8769 static int
8770 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
8771 kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
8772 int noimmutable, int parent_authorized_for_delete_child)
8773 {
8774 int result;
8775
8776 /*
8777 * Check for immutability.
8778 *
8779 * In the deletion case, parent directory immutability vetoes specific
8780 * file rights.
8781 */
8782 if ((result = vnode_authorize_checkimmutable(mp, vcp->vap, rights,
8783 noimmutable)) != 0) {
8784 goto out;
8785 }
8786
8787 if ((rights & KAUTH_VNODE_DELETE) &&
8788 !parent_authorized_for_delete_child) {
8789 result = vnode_authorize_checkimmutable(mp, vcp->dvap,
8790 KAUTH_VNODE_DELETE_CHILD, 0);
8791 if (result) {
8792 goto out;
8793 }
8794 }
8795
8796 /*
8797 * Clear rights that have been authorized by reaching this point, bail if nothing left to
8798 * check.
8799 */
8800 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
8801 if (rights == 0) {
8802 goto out;
8803 }
8804
8805 /*
8806 * If we're not the superuser, authorize based on file properties;
8807 * note that even if parent_authorized_for_delete_child is TRUE, we
8808 * need to check on the node itself.
8809 */
8810 if (!is_suser) {
8811 /* process delete rights */
8812 if ((rights & KAUTH_VNODE_DELETE) &&
8813 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
8814 goto out;
8815 }
8816
8817 /* process remaining rights */
8818 if ((rights & ~KAUTH_VNODE_DELETE) &&
8819 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
8820 goto out;
8821 }
8822 } else {
8823 /*
8824 * Execute is only granted to root if one of the x bits is set. This check only
8825 * makes sense if the posix mode bits are actually supported.
8826 */
8827 if ((rights & KAUTH_VNODE_EXECUTE) &&
8828 (vcp->vap->va_type == VREG) &&
8829 VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
8830 !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
8831 result = EPERM;
8832 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
8833 goto out;
8834 }
8835
8836 /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
8837 *found_deny = TRUE;
8838
8839 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vcp);
8840 }
8841 out:
8842 return result;
8843 }
8844
8845 static int
8846 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
8847 vnode_t vp, vnode_t dvp, int *errorp)
8848 {
8849 struct _vnode_authorize_context auth_context;
8850 vauth_ctx vcp;
8851 kauth_cred_t cred;
8852 kauth_ace_rights_t rights;
8853 struct vnode_attr va, dva;
8854 int result;
8855 int noimmutable;
8856 boolean_t parent_authorized_for_delete_child = FALSE;
8857 boolean_t found_deny = FALSE;
8858 boolean_t parent_ref = FALSE;
8859 boolean_t is_suser = FALSE;
8860
8861 vcp = &auth_context;
8862 vcp->ctx = ctx;
8863 vcp->vp = vp;
8864 vcp->dvp = dvp;
8865 /*
8866 * Note that we authorize against the context, not the passed cred
8867 * (the same thing anyway)
8868 */
8869 cred = ctx->vc_ucred;
8870
8871 VATTR_INIT(&va);
8872 vcp->vap = &va;
8873 VATTR_INIT(&dva);
8874 vcp->dvap = &dva;
8875
8876 vcp->flags = vcp->flags_valid = 0;
8877
8878 #if DIAGNOSTIC
8879 if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
8880 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
8881 }
8882 #endif
8883
8884 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
8885 vp, vfs_context_proc(ctx)->p_comm,
8886 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
8887 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
8888 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
8889 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
8890 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
8891 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
8892 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
8893 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
8894 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
8895 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
8896 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
8897 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
8898 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
8899 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
8900 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
8901 vnode_isdir(vp) ? "directory" : "file",
8902 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
8903
8904 /*
8905 * Extract the control bits from the action, everything else is
8906 * requested rights.
8907 */
8908 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
8909 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
8910
8911 if (rights & KAUTH_VNODE_DELETE) {
8912 #if DIAGNOSTIC
8913 if (dvp == NULL) {
8914 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
8915 }
8916 #endif
8917 /*
8918 * check to see if we've already authorized the parent
8919 * directory for deletion of its children... if so, we
8920 * can skip a whole bunch of work... we will still have to
8921 * authorize that this specific child can be removed
8922 */
8923 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
8924 parent_authorized_for_delete_child = TRUE;
8925 }
8926 } else {
8927 vcp->dvp = NULLVP;
8928 vcp->dvap = NULL;
8929 }
8930
8931 /*
8932 * Check for read-only filesystems.
8933 */
8934 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
8935 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
8936 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
8937 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
8938 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
8939 result = EROFS;
8940 goto out;
8941 }
8942
8943 /*
8944 * Check for noexec filesystems.
8945 */
8946 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
8947 result = EACCES;
8948 goto out;
8949 }
8950
8951 /*
8952 * Handle cases related to filesystems with non-local enforcement.
8953 * This call can return 0, in which case we will fall through to perform a
8954 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
8955 * an appropriate result, at which point we can return immediately.
8956 */
8957 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
8958 goto out;
8959 }
8960
8961 /*
8962 * If the vnode is a namedstream (extended attribute) data vnode (eg.
8963 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
8964 */
8965 if (vnode_isnamedstream(vp)) {
8966 if (rights & KAUTH_VNODE_READ_DATA) {
8967 rights &= ~KAUTH_VNODE_READ_DATA;
8968 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8969 }
8970 if (rights & KAUTH_VNODE_WRITE_DATA) {
8971 rights &= ~KAUTH_VNODE_WRITE_DATA;
8972 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8973 }
8974
8975 /*
8976 * Point 'vp' to the namedstream's parent for ACL checking
8977 */
8978 if ((vp->v_parent != NULL) &&
8979 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
8980 parent_ref = TRUE;
8981 vcp->vp = vp = vp->v_parent;
8982 }
8983 }
8984
8985 if (vfs_context_issuser(ctx)) {
8986 /*
8987 * if we're not asking for execute permissions or modifications,
8988 * then we're done, this action is authorized.
8989 */
8990 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
8991 goto success;
8992 }
8993
8994 is_suser = TRUE;
8995 }
8996
8997 /*
8998 * Get vnode attributes and extended security information for the vnode
8999 * and directory if required.
9000 *
9001 * If we're root we only want mode bits and flags for checking
9002 * execute and immutability.
9003 */
9004 VATTR_WANTED(&va, va_mode);
9005 VATTR_WANTED(&va, va_flags);
9006 if (!is_suser) {
9007 VATTR_WANTED(&va, va_uid);
9008 VATTR_WANTED(&va, va_gid);
9009 VATTR_WANTED(&va, va_acl);
9010 }
9011 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
9012 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
9013 goto out;
9014 }
9015 VATTR_WANTED(&va, va_type);
9016 VATTR_RETURN(&va, va_type, vnode_vtype(vp));
9017
9018 if (vcp->dvp) {
9019 VATTR_WANTED(&dva, va_mode);
9020 VATTR_WANTED(&dva, va_flags);
9021 if (!is_suser) {
9022 VATTR_WANTED(&dva, va_uid);
9023 VATTR_WANTED(&dva, va_gid);
9024 VATTR_WANTED(&dva, va_acl);
9025 }
9026 if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
9027 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
9028 goto out;
9029 }
9030 VATTR_WANTED(&dva, va_type);
9031 VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
9032 }
9033
9034 result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
9035 &found_deny, noimmutable, parent_authorized_for_delete_child);
9036 out:
9037 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
9038 kauth_acl_free(va.va_acl);
9039 }
9040 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
9041 kauth_acl_free(dva.va_acl);
9042 }
9043
9044 if (result) {
9045 if (parent_ref) {
9046 vnode_put(vp);
9047 }
9048 *errorp = result;
9049 KAUTH_DEBUG("%p DENIED - auth denied", vp);
9050 return KAUTH_RESULT_DENY;
9051 }
9052 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
9053 /*
9054 * if we were successfully granted the right to search this directory
9055 * and there were NO ACL DENYs for search and the posix permissions also don't
9056 * deny execute, we can synthesize a global right that allows anyone to
9057 * traverse this directory during a pathname lookup without having to
9058 * match the credential associated with this cache of rights.
9059 *
9060 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
9061 * only if we actually check ACLs which we don't for root. As
9062 * a workaround, the lookup fast path checks for root.
9063 */
9064 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
9065 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
9066 (S_IXUSR | S_IXGRP | S_IXOTH))) {
9067 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
9068 }
9069 }
9070 success:
9071 if (parent_ref) {
9072 vnode_put(vp);
9073 }
9074
9075 /*
9076 * Note that this implies that we will allow requests for no rights, as well as
9077 * for rights that we do not recognise. There should be none of these.
9078 */
9079 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
9080 return KAUTH_RESULT_ALLOW;
9081 }
9082
9083 int
9084 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
9085 kauth_action_t action, vfs_context_t ctx)
9086 {
9087 VATTR_INIT(vap);
9088 VATTR_WANTED(vap, va_type);
9089 VATTR_WANTED(vap, va_mode);
9090 VATTR_WANTED(vap, va_flags);
9091 if (dvap) {
9092 VATTR_INIT(dvap);
9093 if (action & KAUTH_VNODE_DELETE) {
9094 VATTR_WANTED(dvap, va_type);
9095 VATTR_WANTED(dvap, va_mode);
9096 VATTR_WANTED(dvap, va_flags);
9097 }
9098 } else if (action & KAUTH_VNODE_DELETE) {
9099 return EINVAL;
9100 }
9101
9102 if (!vfs_context_issuser(ctx)) {
9103 VATTR_WANTED(vap, va_uid);
9104 VATTR_WANTED(vap, va_gid);
9105 VATTR_WANTED(vap, va_acl);
9106 if (dvap && (action & KAUTH_VNODE_DELETE)) {
9107 VATTR_WANTED(dvap, va_uid);
9108 VATTR_WANTED(dvap, va_gid);
9109 VATTR_WANTED(dvap, va_acl);
9110 }
9111 }
9112
9113 return 0;
9114 }
9115
9116 int
9117 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
9118 kauth_action_t action, vfs_context_t ctx)
9119 {
9120 struct _vnode_authorize_context auth_context;
9121 vauth_ctx vcp;
9122 kauth_ace_rights_t rights;
9123 int noimmutable;
9124 boolean_t found_deny;
9125 boolean_t is_suser = FALSE;
9126 int result = 0;
9127
9128 vcp = &auth_context;
9129 vcp->ctx = ctx;
9130 vcp->vp = NULLVP;
9131 vcp->vap = vap;
9132 vcp->dvp = NULLVP;
9133 vcp->dvap = dvap;
9134 vcp->flags = vcp->flags_valid = 0;
9135
9136 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
9137 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
9138
9139 /*
9140 * Check for read-only filesystems.
9141 */
9142 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
9143 mp && (mp->mnt_flag & MNT_RDONLY) &&
9144 ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
9145 (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
9146 (rights & KAUTH_VNODE_DELETE_CHILD))) {
9147 result = EROFS;
9148 goto out;
9149 }
9150
9151 /*
9152 * Check for noexec filesystems.
9153 */
9154 if ((rights & KAUTH_VNODE_EXECUTE) &&
9155 (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
9156 result = EACCES;
9157 goto out;
9158 }
9159
9160 if (vfs_context_issuser(ctx)) {
9161 /*
9162 * if we're not asking for execute permissions or modifications,
9163 * then we're done, this action is authorized.
9164 */
9165 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
9166 goto out;
9167 }
9168 is_suser = TRUE;
9169 } else {
9170 if (!VATTR_IS_SUPPORTED(vap, va_uid) ||
9171 !VATTR_IS_SUPPORTED(vap, va_gid) ||
9172 (mp && vfs_extendedsecurity(mp) && !VATTR_IS_SUPPORTED(vap, va_acl))) {
9173 panic("vnode attrs not complete for vnode_attr_authorize\n");
9174 }
9175 }
9176
9177 if (mp) {
9178 vnode_attr_handle_mnt_ignore_ownership(vap, mp, ctx);
9179 }
9180
9181 result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
9182 &found_deny, noimmutable, FALSE);
9183
9184 if (result == EPERM) {
9185 result = EACCES;
9186 }
9187 out:
9188 return result;
9189 }
9190
9191
9192 int
9193 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
9194 {
9195 return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
9196 }
9197
9198 /*
9199 * Check that the attribute information in vattr can be legally applied to
9200 * a new file by the context.
9201 */
9202 static int
9203 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
9204 {
9205 int error;
9206 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
9207 uint32_t inherit_flags;
9208 kauth_cred_t cred;
9209 guid_t changer;
9210 mount_t dmp;
9211 struct vnode_attr dva;
9212
9213 error = 0;
9214
9215 if (defaulted_fieldsp) {
9216 *defaulted_fieldsp = 0;
9217 }
9218
9219 defaulted_owner = defaulted_group = defaulted_mode = 0;
9220
9221 inherit_flags = 0;
9222
9223 /*
9224 * Require that the filesystem support extended security to apply any.
9225 */
9226 if (!vfs_extendedsecurity(dvp->v_mount) &&
9227 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
9228 error = EINVAL;
9229 goto out;
9230 }
9231
9232 /*
9233 * Default some fields.
9234 */
9235 dmp = dvp->v_mount;
9236
9237 /*
9238 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
9239 * owner takes ownership of all new files.
9240 */
9241 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
9242 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
9243 defaulted_owner = 1;
9244 } else {
9245 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
9246 /* default owner is current user */
9247 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
9248 defaulted_owner = 1;
9249 }
9250 }
9251
9252 /*
9253 * We need the dvp's va_flags and *may* need the gid of the directory,
9254 * we ask for both here.
9255 */
9256 VATTR_INIT(&dva);
9257 VATTR_WANTED(&dva, va_gid);
9258 VATTR_WANTED(&dva, va_flags);
9259 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
9260 goto out;
9261 }
9262
9263 /*
9264 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
9265 * group takes ownership of all new files.
9266 */
9267 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
9268 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
9269 defaulted_group = 1;
9270 } else {
9271 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
9272 /* default group comes from parent object, fallback to current user */
9273 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
9274 VATTR_SET(vap, va_gid, dva.va_gid);
9275 } else {
9276 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
9277 }
9278 defaulted_group = 1;
9279 }
9280 }
9281
9282 if (!VATTR_IS_ACTIVE(vap, va_flags)) {
9283 VATTR_SET(vap, va_flags, 0);
9284 }
9285
9286 /* Determine if SF_RESTRICTED should be inherited from the parent
9287 * directory. */
9288 if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
9289 inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
9290 }
9291
9292 /* default mode is everything, masked with current umask */
9293 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
9294 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
9295 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
9296 defaulted_mode = 1;
9297 }
9298 /* set timestamps to now */
9299 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
9300 nanotime(&vap->va_create_time);
9301 VATTR_SET_ACTIVE(vap, va_create_time);
9302 }
9303
9304 /*
9305 * Check for attempts to set nonsensical fields.
9306 */
9307 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
9308 error = EINVAL;
9309 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
9310 vap->va_active & ~VNODE_ATTR_NEWOBJ);
9311 goto out;
9312 }
9313
9314 /*
9315 * Quickly check for the applicability of any enforcement here.
9316 * Tests below maintain the integrity of the local security model.
9317 */
9318 if (vfs_authopaque(dvp->v_mount)) {
9319 goto out;
9320 }
9321
9322 /*
9323 * We need to know if the caller is the superuser, or if the work is
9324 * otherwise already authorised.
9325 */
9326 cred = vfs_context_ucred(ctx);
9327 if (noauth) {
9328 /* doing work for the kernel */
9329 has_priv_suser = 1;
9330 } else {
9331 has_priv_suser = vfs_context_issuser(ctx);
9332 }
9333
9334
9335 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9336 vap->va_flags &= ~SF_SYNTHETIC;
9337 if (has_priv_suser) {
9338 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
9339 error = EPERM;
9340 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9341 goto out;
9342 }
9343 } else {
9344 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
9345 error = EPERM;
9346 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9347 goto out;
9348 }
9349 }
9350 }
9351
9352 /* if not superuser, validate legality of new-item attributes */
9353 if (!has_priv_suser) {
9354 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
9355 /* setgid? */
9356 if (vap->va_mode & S_ISGID) {
9357 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9358 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9359 goto out;
9360 }
9361 if (!ismember) {
9362 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
9363 error = EPERM;
9364 goto out;
9365 }
9366 }
9367
9368 /* setuid? */
9369 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
9370 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9371 error = EPERM;
9372 goto out;
9373 }
9374 }
9375 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
9376 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
9377 error = EPERM;
9378 goto out;
9379 }
9380 if (!defaulted_group) {
9381 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9382 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9383 goto out;
9384 }
9385 if (!ismember) {
9386 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
9387 error = EPERM;
9388 goto out;
9389 }
9390 }
9391
9392 /* initialising owner/group UUID */
9393 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9394 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9395 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9396 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
9397 goto out;
9398 }
9399 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
9400 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
9401 error = EPERM;
9402 goto out;
9403 }
9404 }
9405 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
9406 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
9407 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
9408 goto out;
9409 }
9410 if (!ismember) {
9411 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
9412 error = EPERM;
9413 goto out;
9414 }
9415 }
9416 }
9417 out:
9418 if (inherit_flags) {
9419 /* Apply SF_RESTRICTED to the file if its parent directory was
9420 * restricted. This is done at the end so that root is not
9421 * required if this flag is only set due to inheritance. */
9422 VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
9423 }
9424 if (defaulted_fieldsp) {
9425 if (defaulted_mode) {
9426 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
9427 }
9428 if (defaulted_group) {
9429 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
9430 }
9431 if (defaulted_owner) {
9432 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
9433 }
9434 }
9435 return error;
9436 }
9437
9438 /*
9439 * Check that the attribute information in vap can be legally written by the
9440 * context.
9441 *
9442 * Call this when you're not sure about the vnode_attr; either its contents
9443 * have come from an unknown source, or when they are variable.
9444 *
9445 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
9446 * must be authorized to be permitted to write the vattr.
9447 */
9448 int
9449 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
9450 {
9451 struct vnode_attr ova;
9452 kauth_action_t required_action;
9453 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
9454 guid_t changer;
9455 gid_t group;
9456 uid_t owner;
9457 mode_t newmode;
9458 kauth_cred_t cred;
9459 uint32_t fdelta;
9460
9461 VATTR_INIT(&ova);
9462 required_action = 0;
9463 error = 0;
9464
9465 /*
9466 * Quickly check for enforcement applicability.
9467 */
9468 if (vfs_authopaque(vp->v_mount)) {
9469 goto out;
9470 }
9471
9472 /*
9473 * Check for attempts to set nonsensical fields.
9474 */
9475 if (vap->va_active & VNODE_ATTR_RDONLY) {
9476 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
9477 error = EINVAL;
9478 goto out;
9479 }
9480
9481 /*
9482 * We need to know if the caller is the superuser.
9483 */
9484 cred = vfs_context_ucred(ctx);
9485 has_priv_suser = kauth_cred_issuser(cred);
9486
9487 /*
9488 * If any of the following are changing, we need information from the old file:
9489 * va_uid
9490 * va_gid
9491 * va_mode
9492 * va_uuuid
9493 * va_guuid
9494 */
9495 if (VATTR_IS_ACTIVE(vap, va_uid) ||
9496 VATTR_IS_ACTIVE(vap, va_gid) ||
9497 VATTR_IS_ACTIVE(vap, va_mode) ||
9498 VATTR_IS_ACTIVE(vap, va_uuuid) ||
9499 VATTR_IS_ACTIVE(vap, va_guuid)) {
9500 VATTR_WANTED(&ova, va_mode);
9501 VATTR_WANTED(&ova, va_uid);
9502 VATTR_WANTED(&ova, va_gid);
9503 VATTR_WANTED(&ova, va_uuuid);
9504 VATTR_WANTED(&ova, va_guuid);
9505 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
9506 }
9507
9508 /*
9509 * If timestamps are being changed, we need to know who the file is owned
9510 * by.
9511 */
9512 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9513 VATTR_IS_ACTIVE(vap, va_change_time) ||
9514 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9515 VATTR_IS_ACTIVE(vap, va_access_time) ||
9516 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9517 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9518 VATTR_WANTED(&ova, va_uid);
9519 #if 0 /* enable this when we support UUIDs as official owners */
9520 VATTR_WANTED(&ova, va_uuuid);
9521 #endif
9522 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
9523 }
9524
9525 /*
9526 * If flags are being changed, we need the old flags.
9527 */
9528 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9529 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
9530 VATTR_WANTED(&ova, va_flags);
9531 }
9532
9533 /*
9534 * If ACLs are being changed, we need the old ACLs.
9535 */
9536 if (VATTR_IS_ACTIVE(vap, va_acl)) {
9537 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
9538 VATTR_WANTED(&ova, va_acl);
9539 }
9540
9541 /*
9542 * If the size is being set, make sure it's not a directory.
9543 */
9544 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9545 /* size is only meaningful on regular files, don't permit otherwise */
9546 if (!vnode_isreg(vp)) {
9547 KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
9548 error = vnode_isdir(vp) ? EISDIR : EINVAL;
9549 goto out;
9550 }
9551 }
9552
9553 /*
9554 * Get old data.
9555 */
9556 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
9557 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
9558 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
9559 goto out;
9560 }
9561
9562 /*
9563 * Size changes require write access to the file data.
9564 */
9565 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9566 /* if we can't get the size, or it's different, we need write access */
9567 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
9568 required_action |= KAUTH_VNODE_WRITE_DATA;
9569 }
9570
9571 /*
9572 * Changing timestamps?
9573 *
9574 * Note that we are only called to authorize user-requested time changes;
9575 * side-effect time changes are not authorized. Authorisation is only
9576 * required for existing files.
9577 *
9578 * Non-owners are not permitted to change the time on an existing
9579 * file to anything other than the current time.
9580 */
9581 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9582 VATTR_IS_ACTIVE(vap, va_change_time) ||
9583 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9584 VATTR_IS_ACTIVE(vap, va_access_time) ||
9585 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9586 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9587 /*
9588 * The owner and root may set any timestamps they like,
9589 * provided that the file is not immutable. The owner still needs
9590 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
9591 */
9592 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
9593 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
9594 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
9595 } else {
9596 /* just setting the current time? */
9597 if (vap->va_vaflags & VA_UTIMES_NULL) {
9598 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
9599 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
9600 } else {
9601 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
9602 error = EACCES;
9603 goto out;
9604 }
9605 }
9606 }
9607
9608 /*
9609 * Changing file mode?
9610 */
9611 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
9612 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
9613
9614 /*
9615 * Mode changes always have the same basic auth requirements.
9616 */
9617 if (has_priv_suser) {
9618 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
9619 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
9620 } else {
9621 /* need WRITE_SECURITY */
9622 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
9623 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9624 }
9625
9626 /*
9627 * Can't set the setgid bit if you're not in the group and not root. Have to have
9628 * existing group information in the case we're not setting it right now.
9629 */
9630 if (vap->va_mode & S_ISGID) {
9631 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9632 if (!has_priv_suser) {
9633 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9634 group = vap->va_gid;
9635 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
9636 group = ova.va_gid;
9637 } else {
9638 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
9639 error = EINVAL;
9640 goto out;
9641 }
9642 /*
9643 * This might be too restrictive; WRITE_SECURITY might be implied by
9644 * membership in this case, rather than being an additional requirement.
9645 */
9646 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
9647 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9648 goto out;
9649 }
9650 if (!ismember) {
9651 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
9652 error = EPERM;
9653 goto out;
9654 }
9655 }
9656 }
9657
9658 /*
9659 * Can't set the setuid bit unless you're root or the file's owner.
9660 */
9661 if (vap->va_mode & S_ISUID) {
9662 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9663 if (!has_priv_suser) {
9664 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9665 owner = vap->va_uid;
9666 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
9667 owner = ova.va_uid;
9668 } else {
9669 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
9670 error = EINVAL;
9671 goto out;
9672 }
9673 if (owner != kauth_cred_getuid(cred)) {
9674 /*
9675 * We could allow this if WRITE_SECURITY is permitted, perhaps.
9676 */
9677 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9678 error = EPERM;
9679 goto out;
9680 }
9681 }
9682 }
9683 }
9684
9685 /*
9686 * Validate/mask flags changes. This checks that only the flags in
9687 * the UF_SETTABLE mask are being set, and preserves the flags in
9688 * the SF_SETTABLE case.
9689 *
9690 * Since flags changes may be made in conjunction with other changes,
9691 * we will ask the auth code to ignore immutability in the case that
9692 * the SF_* flags are not set and we are only manipulating the file flags.
9693 *
9694 */
9695 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9696 /* compute changing flags bits */
9697 vap->va_flags &= ~SF_SYNTHETIC;
9698 ova.va_flags &= ~SF_SYNTHETIC;
9699 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
9700 fdelta = vap->va_flags ^ ova.va_flags;
9701 } else {
9702 fdelta = vap->va_flags;
9703 }
9704
9705 if (fdelta != 0) {
9706 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
9707 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9708
9709 /* check that changing bits are legal */
9710 if (has_priv_suser) {
9711 /*
9712 * The immutability check will prevent us from clearing the SF_*
9713 * flags unless the system securelevel permits it, so just check
9714 * for legal flags here.
9715 */
9716 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
9717 error = EPERM;
9718 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9719 goto out;
9720 }
9721 } else {
9722 if (fdelta & ~UF_SETTABLE) {
9723 error = EPERM;
9724 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9725 goto out;
9726 }
9727 }
9728 /*
9729 * If the caller has the ability to manipulate file flags,
9730 * security is not reduced by ignoring them for this operation.
9731 *
9732 * A more complete test here would consider the 'after' states of the flags
9733 * to determine whether it would permit the operation, but this becomes
9734 * very complex.
9735 *
9736 * Ignoring immutability is conditional on securelevel; this does not bypass
9737 * the SF_* flags if securelevel > 0.
9738 */
9739 required_action |= KAUTH_VNODE_NOIMMUTABLE;
9740 }
9741 }
9742
9743 /*
9744 * Validate ownership information.
9745 */
9746 chowner = 0;
9747 chgroup = 0;
9748 clear_suid = 0;
9749 clear_sgid = 0;
9750
9751 /*
9752 * uid changing
9753 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
9754 * support them in general, and will ignore it if/when we try to set it.
9755 * We might want to clear the uid out of vap completely here.
9756 */
9757 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9758 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
9759 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
9760 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
9761 error = EPERM;
9762 goto out;
9763 }
9764 chowner = 1;
9765 }
9766 clear_suid = 1;
9767 }
9768
9769 /*
9770 * gid changing
9771 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
9772 * support them in general, and will ignore it if/when we try to set it.
9773 * We might want to clear the gid out of vap completely here.
9774 */
9775 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9776 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
9777 if (!has_priv_suser) {
9778 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9779 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9780 goto out;
9781 }
9782 if (!ismember) {
9783 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
9784 ova.va_gid, vap->va_gid);
9785 error = EPERM;
9786 goto out;
9787 }
9788 }
9789 chgroup = 1;
9790 }
9791 clear_sgid = 1;
9792 }
9793
9794 /*
9795 * Owner UUID being set or changed.
9796 */
9797 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9798 /* if the owner UUID is not actually changing ... */
9799 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
9800 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
9801 goto no_uuuid_change;
9802 }
9803
9804 /*
9805 * If the current owner UUID is a null GUID, check
9806 * it against the UUID corresponding to the owner UID.
9807 */
9808 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
9809 VATTR_IS_SUPPORTED(&ova, va_uid)) {
9810 guid_t uid_guid;
9811
9812 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
9813 kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
9814 goto no_uuuid_change;
9815 }
9816 }
9817 }
9818
9819 /*
9820 * The owner UUID cannot be set by a non-superuser to anything other than
9821 * their own or a null GUID (to "unset" the owner UUID).
9822 * Note that file systems must be prepared to handle the
9823 * null UUID case in a manner appropriate for that file
9824 * system.
9825 */
9826 if (!has_priv_suser) {
9827 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9828 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9829 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
9830 goto out;
9831 }
9832 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
9833 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
9834 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
9835 error = EPERM;
9836 goto out;
9837 }
9838 }
9839 chowner = 1;
9840 clear_suid = 1;
9841 }
9842 no_uuuid_change:
9843 /*
9844 * Group UUID being set or changed.
9845 */
9846 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
9847 /* if the group UUID is not actually changing ... */
9848 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
9849 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
9850 goto no_guuid_change;
9851 }
9852
9853 /*
9854 * If the current group UUID is a null UUID, check
9855 * it against the UUID corresponding to the group GID.
9856 */
9857 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
9858 VATTR_IS_SUPPORTED(&ova, va_gid)) {
9859 guid_t gid_guid;
9860
9861 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
9862 kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
9863 goto no_guuid_change;
9864 }
9865 }
9866 }
9867
9868 /*
9869 * The group UUID cannot be set by a non-superuser to anything other than
9870 * one of which they are a member or a null GUID (to "unset"
9871 * the group UUID).
9872 * Note that file systems must be prepared to handle the
9873 * null UUID case in a manner appropriate for that file
9874 * system.
9875 */
9876 if (!has_priv_suser) {
9877 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
9878 ismember = 1;
9879 } else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
9880 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
9881 goto out;
9882 }
9883 if (!ismember) {
9884 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
9885 error = EPERM;
9886 goto out;
9887 }
9888 }
9889 chgroup = 1;
9890 }
9891 no_guuid_change:
9892
9893 /*
9894 * Compute authorisation for group/ownership changes.
9895 */
9896 if (chowner || chgroup || clear_suid || clear_sgid) {
9897 if (has_priv_suser) {
9898 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
9899 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
9900 } else {
9901 if (chowner) {
9902 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
9903 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
9904 }
9905 if (chgroup && !chowner) {
9906 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
9907 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9908 }
9909 }
9910
9911 /*
9912 * clear set-uid and set-gid bits. POSIX only requires this for
9913 * non-privileged processes but we do it even for root.
9914 */
9915 if (VATTR_IS_ACTIVE(vap, va_mode)) {
9916 newmode = vap->va_mode;
9917 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
9918 newmode = ova.va_mode;
9919 } else {
9920 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
9921 newmode = 0;
9922 }
9923
9924 /* chown always clears setuid/gid bits. An exception is made for
9925 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
9926 * setattrlist is allowed to set the new mode on the file and change (chown)
9927 * uid/gid.
9928 */
9929 if (newmode & (S_ISUID | S_ISGID)) {
9930 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
9931 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
9932 newmode, newmode & ~(S_ISUID | S_ISGID));
9933 newmode &= ~(S_ISUID | S_ISGID);
9934 }
9935 VATTR_SET(vap, va_mode, newmode);
9936 }
9937 }
9938
9939 /*
9940 * Authorise changes in the ACL.
9941 */
9942 if (VATTR_IS_ACTIVE(vap, va_acl)) {
9943 /* no existing ACL */
9944 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
9945 /* adding an ACL */
9946 if (vap->va_acl != NULL) {
9947 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9948 KAUTH_DEBUG("CHMOD - adding ACL");
9949 }
9950
9951 /* removing an existing ACL */
9952 } else if (vap->va_acl == NULL) {
9953 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9954 KAUTH_DEBUG("CHMOD - removing ACL");
9955
9956 /* updating an existing ACL */
9957 } else {
9958 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
9959 /* entry count changed, must be different */
9960 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9961 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
9962 } else if (vap->va_acl->acl_entrycount > 0) {
9963 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
9964 if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
9965 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
9966 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9967 KAUTH_DEBUG("CHMOD - changing ACL entries");
9968 }
9969 }
9970 }
9971 }
9972
9973 /*
9974 * Other attributes that require authorisation.
9975 */
9976 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
9977 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
9978 }
9979
9980 out:
9981 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
9982 kauth_acl_free(ova.va_acl);
9983 }
9984 if (error == 0) {
9985 *actionp = required_action;
9986 }
9987 return error;
9988 }
9989
9990 static int
9991 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
9992 {
9993 vnode_lock_spin(vp);
9994 vp->v_flag |= VLOCKLOCAL;
9995 vnode_unlock(vp);
9996
9997 return VNODE_RETURNED;
9998 }
9999
10000 void
10001 vfs_setlocklocal(mount_t mp)
10002 {
10003 mount_lock_spin(mp);
10004 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
10005 mount_unlock(mp);
10006
10007 /*
10008 * The number of active vnodes is expected to be
10009 * very small when vfs_setlocklocal is invoked.
10010 */
10011 vnode_iterate(mp, 0, setlocklocal_callback, NULL);
10012 }
10013
10014 void
10015 vfs_setcompoundopen(mount_t mp)
10016 {
10017 mount_lock_spin(mp);
10018 mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
10019 mount_unlock(mp);
10020 }
10021
10022 void
10023 vnode_setswapmount(vnode_t vp)
10024 {
10025 mount_lock(vp->v_mount);
10026 vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
10027 mount_unlock(vp->v_mount);
10028 }
10029
10030
10031 int64_t
10032 vnode_getswappin_avail(vnode_t vp)
10033 {
10034 int64_t max_swappin_avail = 0;
10035
10036 mount_lock(vp->v_mount);
10037 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
10038 max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
10039 }
10040 mount_unlock(vp->v_mount);
10041
10042 return max_swappin_avail;
10043 }
10044
10045
10046 void
10047 vn_setunionwait(vnode_t vp)
10048 {
10049 vnode_lock_spin(vp);
10050 vp->v_flag |= VISUNION;
10051 vnode_unlock(vp);
10052 }
10053
10054
10055 void
10056 vn_checkunionwait(vnode_t vp)
10057 {
10058 vnode_lock_spin(vp);
10059 while ((vp->v_flag & VISUNION) == VISUNION) {
10060 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
10061 }
10062 vnode_unlock(vp);
10063 }
10064
10065 void
10066 vn_clearunionwait(vnode_t vp, int locked)
10067 {
10068 if (!locked) {
10069 vnode_lock_spin(vp);
10070 }
10071 if ((vp->v_flag & VISUNION) == VISUNION) {
10072 vp->v_flag &= ~VISUNION;
10073 wakeup((caddr_t)&vp->v_flag);
10074 }
10075 if (!locked) {
10076 vnode_unlock(vp);
10077 }
10078 }
10079
10080 int
10081 vnode_materialize_dataless_file(vnode_t vp, uint64_t op_type)
10082 {
10083 int error;
10084
10085 /* Swap files are special; ignore them */
10086 if (vnode_isswap(vp)) {
10087 return 0;
10088 }
10089
10090 error = resolve_nspace_item(vp,
10091 op_type | NAMESPACE_HANDLER_NSPACE_EVENT);
10092
10093 /*
10094 * The file resolver owns the logic about what error to return
10095 * to the caller. We only need to handle a couple of special
10096 * cases here:
10097 */
10098 if (error == EJUSTRETURN) {
10099 /*
10100 * The requesting process is allowed to interact with
10101 * dataless objects. Make a couple of sanity-checks
10102 * here to ensure the action makes sense.
10103 */
10104 switch (op_type) {
10105 case NAMESPACE_HANDLER_WRITE_OP:
10106 case NAMESPACE_HANDLER_TRUNCATE_OP:
10107 case NAMESPACE_HANDLER_RENAME_OP:
10108 /*
10109 * This handles the case of the resolver itself
10110 * writing data to the file (or throwing it
10111 * away).
10112 */
10113 error = 0;
10114 break;
10115 case NAMESPACE_HANDLER_READ_OP:
10116 /*
10117 * This handles the case of the resolver needing
10118 * to look up inside of a dataless directory while
10119 * it's in the process of materializing it (for
10120 * example, creating files or directories).
10121 */
10122 error = (vnode_vtype(vp) == VDIR) ? 0 : EBADF;
10123 break;
10124 default:
10125 error = EBADF;
10126 break;
10127 }
10128 }
10129
10130 return error;
10131 }
10132
10133 /*
10134 * Removes orphaned apple double files during a rmdir
10135 * Works by:
10136 * 1. vnode_suspend().
10137 * 2. Call VNOP_READDIR() till the end of directory is reached.
10138 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
10139 * 4. Continue (2) and (3) till end of directory is reached.
10140 * 5. If all the entries in the directory were files with "._" name, delete all the files.
10141 * 6. vnode_resume()
10142 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
10143 */
10144
10145 errno_t
10146 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
10147 {
10148 #define UIO_BUFF_SIZE 2048
10149 uio_t auio = NULL;
10150 int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
10151 int open_flag = 0, full_erase_flag = 0;
10152 char uio_buf[UIO_SIZEOF(1)];
10153 char *rbuf = NULL;
10154 void *dir_pos;
10155 void *dir_end;
10156 struct dirent *dp;
10157 errno_t error;
10158
10159 error = vnode_suspend(vp);
10160
10161 /*
10162 * restart_flag is set so that the calling rmdir sleeps and resets
10163 */
10164 if (error == EBUSY) {
10165 *restart_flag = 1;
10166 }
10167 if (error != 0) {
10168 return error;
10169 }
10170
10171 /*
10172 * Prevent dataless fault materialization while we have
10173 * a suspended vnode.
10174 */
10175 uthread_t ut = get_bsdthread_info(current_thread());
10176 bool saved_nodatalessfaults =
10177 (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
10178 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
10179
10180 /*
10181 * set up UIO
10182 */
10183 rbuf = kheap_alloc(KHEAP_DATA_BUFFERS, siz, Z_WAITOK);
10184 alloc_size = siz;
10185 if (rbuf) {
10186 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
10187 &uio_buf[0], sizeof(uio_buf));
10188 }
10189 if (!rbuf || !auio) {
10190 error = ENOMEM;
10191 goto outsc;
10192 }
10193
10194 uio_setoffset(auio, 0);
10195
10196 eofflag = 0;
10197
10198 if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
10199 goto outsc;
10200 } else {
10201 open_flag = 1;
10202 }
10203
10204 /*
10205 * First pass checks if all files are appleDouble files.
10206 */
10207
10208 do {
10209 siz = UIO_BUFF_SIZE;
10210 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10211 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10212
10213 if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
10214 goto outsc;
10215 }
10216
10217 if (uio_resid(auio) != 0) {
10218 siz -= uio_resid(auio);
10219 }
10220
10221 /*
10222 * Iterate through directory
10223 */
10224 dir_pos = (void*) rbuf;
10225 dir_end = (void*) (rbuf + siz);
10226 dp = (struct dirent*) (dir_pos);
10227
10228 if (dir_pos == dir_end) {
10229 eofflag = 1;
10230 }
10231
10232 while (dir_pos < dir_end) {
10233 /*
10234 * Check for . and .. as well as directories
10235 */
10236 if (dp->d_ino != 0 &&
10237 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10238 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
10239 /*
10240 * Check for irregular files and ._ files
10241 * If there is a ._._ file abort the op
10242 */
10243 if (dp->d_namlen < 2 ||
10244 strncmp(dp->d_name, "._", 2) ||
10245 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
10246 error = ENOTEMPTY;
10247 goto outsc;
10248 }
10249 }
10250 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10251 dp = (struct dirent*)dir_pos;
10252 }
10253
10254 /*
10255 * workaround for HFS/NFS setting eofflag before end of file
10256 */
10257 if (vp->v_tag == VT_HFS && nentries > 2) {
10258 eofflag = 0;
10259 }
10260
10261 if (vp->v_tag == VT_NFS) {
10262 if (eofflag && !full_erase_flag) {
10263 full_erase_flag = 1;
10264 eofflag = 0;
10265 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10266 } else if (!eofflag && full_erase_flag) {
10267 full_erase_flag = 0;
10268 }
10269 }
10270 } while (!eofflag);
10271 /*
10272 * If we've made it here all the files in the dir are ._ files.
10273 * We can delete the files even though the node is suspended
10274 * because we are the owner of the file.
10275 */
10276
10277 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10278 eofflag = 0;
10279 full_erase_flag = 0;
10280
10281 do {
10282 siz = UIO_BUFF_SIZE;
10283 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10284 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10285
10286 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
10287
10288 if (error != 0) {
10289 goto outsc;
10290 }
10291
10292 if (uio_resid(auio) != 0) {
10293 siz -= uio_resid(auio);
10294 }
10295
10296 /*
10297 * Iterate through directory
10298 */
10299 dir_pos = (void*) rbuf;
10300 dir_end = (void*) (rbuf + siz);
10301 dp = (struct dirent*) dir_pos;
10302
10303 if (dir_pos == dir_end) {
10304 eofflag = 1;
10305 }
10306
10307 while (dir_pos < dir_end) {
10308 /*
10309 * Check for . and .. as well as directories
10310 */
10311 if (dp->d_ino != 0 &&
10312 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10313 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
10314 ) {
10315 error = unlink1(ctx, vp,
10316 CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
10317 VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
10318 VNODE_REMOVE_NO_AUDIT_PATH);
10319
10320 if (error && error != ENOENT) {
10321 goto outsc;
10322 }
10323 }
10324 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10325 dp = (struct dirent*)dir_pos;
10326 }
10327
10328 /*
10329 * workaround for HFS/NFS setting eofflag before end of file
10330 */
10331 if (vp->v_tag == VT_HFS && nentries > 2) {
10332 eofflag = 0;
10333 }
10334
10335 if (vp->v_tag == VT_NFS) {
10336 if (eofflag && !full_erase_flag) {
10337 full_erase_flag = 1;
10338 eofflag = 0;
10339 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10340 } else if (!eofflag && full_erase_flag) {
10341 full_erase_flag = 0;
10342 }
10343 }
10344 } while (!eofflag);
10345
10346
10347 error = 0;
10348
10349 outsc:
10350 if (open_flag) {
10351 VNOP_CLOSE(vp, FREAD, ctx);
10352 }
10353
10354 if (auio) {
10355 uio_free(auio);
10356 }
10357 kheap_free(KHEAP_DATA_BUFFERS, rbuf, alloc_size);
10358
10359 if (saved_nodatalessfaults == false) {
10360 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
10361 }
10362
10363 vnode_resume(vp);
10364
10365 return error;
10366 }
10367
10368
10369 void
10370 lock_vnode_and_post(vnode_t vp, int kevent_num)
10371 {
10372 /* Only take the lock if there's something there! */
10373 if (vp->v_knotes.slh_first != NULL) {
10374 vnode_lock(vp);
10375 KNOTE(&vp->v_knotes, kevent_num);
10376 vnode_unlock(vp);
10377 }
10378 }
10379
10380 void panic_print_vnodes(void);
10381
10382 /* define PANIC_PRINTS_VNODES only if investigation is required. */
10383 #ifdef PANIC_PRINTS_VNODES
10384
10385 static const char *
10386 __vtype(uint16_t vtype)
10387 {
10388 switch (vtype) {
10389 case VREG:
10390 return "R";
10391 case VDIR:
10392 return "D";
10393 case VBLK:
10394 return "B";
10395 case VCHR:
10396 return "C";
10397 case VLNK:
10398 return "L";
10399 case VSOCK:
10400 return "S";
10401 case VFIFO:
10402 return "F";
10403 case VBAD:
10404 return "x";
10405 case VSTR:
10406 return "T";
10407 case VCPLX:
10408 return "X";
10409 default:
10410 return "?";
10411 }
10412 }
10413
10414 /*
10415 * build a path from the bottom up
10416 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
10417 */
10418 static char *
10419 __vpath(vnode_t vp, char *str, int len, int depth)
10420 {
10421 int vnm_len;
10422 const char *src;
10423 char *dst;
10424
10425 if (len <= 0) {
10426 return str;
10427 }
10428 /* str + len is the start of the string we created */
10429 if (!vp->v_name) {
10430 return str + len;
10431 }
10432
10433 /* follow mount vnodes to get the full path */
10434 if ((vp->v_flag & VROOT)) {
10435 if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
10436 return __vpath(vp->v_mount->mnt_vnodecovered,
10437 str, len, depth + 1);
10438 }
10439 return str + len;
10440 }
10441
10442 src = vp->v_name;
10443 vnm_len = strlen(src);
10444 if (vnm_len > len) {
10445 /* truncate the name to fit in the string */
10446 src += (vnm_len - len);
10447 vnm_len = len;
10448 }
10449
10450 /* start from the back and copy just characters (no NULLs) */
10451
10452 /* this will chop off leaf path (file) names */
10453 if (depth > 0) {
10454 dst = str + len - vnm_len;
10455 memcpy(dst, src, vnm_len);
10456 len -= vnm_len;
10457 } else {
10458 dst = str + len;
10459 }
10460
10461 if (vp->v_parent && len > 1) {
10462 /* follow parents up the chain */
10463 len--;
10464 *(dst - 1) = '/';
10465 return __vpath(vp->v_parent, str, len, depth + 1);
10466 }
10467
10468 return dst;
10469 }
10470
10471 #define SANE_VNODE_PRINT_LIMIT 5000
10472 void
10473 panic_print_vnodes(void)
10474 {
10475 mount_t mnt;
10476 vnode_t vp;
10477 int nvnodes = 0;
10478 const char *type;
10479 char *nm;
10480 char vname[257];
10481
10482 paniclog_append_noflush("\n***** VNODES *****\n"
10483 "TYPE UREF ICNT PATH\n");
10484
10485 /* NULL-terminate the path name */
10486 vname[sizeof(vname) - 1] = '\0';
10487
10488 /*
10489 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
10490 */
10491 TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
10492 if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
10493 paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
10494 &mountlist, mnt);
10495 break;
10496 }
10497
10498 TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
10499 if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
10500 paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
10501 &mnt->mnt_vnodelist, vp);
10502 break;
10503 }
10504
10505 if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
10506 return;
10507 }
10508 type = __vtype(vp->v_type);
10509 nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
10510 paniclog_append_noflush("%s %0d %0d %s\n",
10511 type, vp->v_usecount, vp->v_iocount, nm);
10512 }
10513 }
10514 }
10515
10516 #else /* !PANIC_PRINTS_VNODES */
10517 void
10518 panic_print_vnodes(void)
10519 {
10520 return;
10521 }
10522 #endif
10523
10524
10525 #ifdef JOE_DEBUG
10526 static void
10527 record_vp(vnode_t vp, int count)
10528 {
10529 struct uthread *ut;
10530
10531 #if CONFIG_TRIGGERS
10532 if (vp->v_resolve) {
10533 return;
10534 }
10535 #endif
10536 if ((vp->v_flag & VSYSTEM)) {
10537 return;
10538 }
10539
10540 ut = get_bsdthread_info(current_thread());
10541 ut->uu_iocount += count;
10542
10543 if (count == 1) {
10544 if (ut->uu_vpindex < 32) {
10545 OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
10546
10547 ut->uu_vps[ut->uu_vpindex] = vp;
10548 ut->uu_vpindex++;
10549 }
10550 }
10551 }
10552 #endif
10553
10554
10555 #if CONFIG_TRIGGERS
10556
10557 #define TRIG_DEBUG 0
10558
10559 #if TRIG_DEBUG
10560 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
10561 #else
10562 #define TRIG_LOG(...)
10563 #endif
10564
10565 /*
10566 * Resolver result functions
10567 */
10568
10569 resolver_result_t
10570 vfs_resolver_result(uint32_t seq, enum resolver_status stat, int aux)
10571 {
10572 /*
10573 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
10574 * sequence auxiliary status
10575 */
10576 return (((uint64_t)seq) << 32) |
10577 (((uint64_t)(aux & 0x0fffffff)) << 4) |
10578 (uint64_t)(stat & 0x0000000F);
10579 }
10580
10581 enum resolver_status
10582 vfs_resolver_status(resolver_result_t result)
10583 {
10584 /* lower 4 bits is status */
10585 return result & 0x0000000F;
10586 }
10587
10588 uint32_t
10589 vfs_resolver_sequence(resolver_result_t result)
10590 {
10591 /* upper 32 bits is sequence */
10592 return (uint32_t)(result >> 32);
10593 }
10594
10595 int
10596 vfs_resolver_auxiliary(resolver_result_t result)
10597 {
10598 /* 28 bits of auxiliary */
10599 return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
10600 }
10601
10602 /*
10603 * SPI
10604 * Call in for resolvers to update vnode trigger state
10605 */
10606 int
10607 vnode_trigger_update(vnode_t vp, resolver_result_t result)
10608 {
10609 vnode_resolve_t rp;
10610 uint32_t seq;
10611 enum resolver_status stat;
10612
10613 if (vp->v_resolve == NULL) {
10614 return EINVAL;
10615 }
10616
10617 stat = vfs_resolver_status(result);
10618 seq = vfs_resolver_sequence(result);
10619
10620 if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
10621 return EINVAL;
10622 }
10623
10624 rp = vp->v_resolve;
10625 lck_mtx_lock(&rp->vr_lock);
10626
10627 if (seq > rp->vr_lastseq) {
10628 if (stat == RESOLVER_RESOLVED) {
10629 rp->vr_flags |= VNT_RESOLVED;
10630 } else {
10631 rp->vr_flags &= ~VNT_RESOLVED;
10632 }
10633
10634 rp->vr_lastseq = seq;
10635 }
10636
10637 lck_mtx_unlock(&rp->vr_lock);
10638
10639 return 0;
10640 }
10641
10642 static int
10643 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
10644 {
10645 int error;
10646
10647 vnode_lock_spin(vp);
10648 if (vp->v_resolve != NULL) {
10649 vnode_unlock(vp);
10650 return EINVAL;
10651 } else {
10652 vp->v_resolve = rp;
10653 }
10654 vnode_unlock(vp);
10655
10656 if (ref) {
10657 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
10658 if (error != 0) {
10659 panic("VNODE_REF_FORCE didn't help...");
10660 }
10661 }
10662
10663 return 0;
10664 }
10665
10666 /*
10667 * VFS internal interfaces for vnode triggers
10668 *
10669 * vnode must already have an io count on entry
10670 * v_resolve is stable when io count is non-zero
10671 */
10672 static int
10673 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
10674 {
10675 vnode_resolve_t rp;
10676 int result;
10677 char byte;
10678
10679 #if 1
10680 /* minimum pointer test (debugging) */
10681 if (tinfo->vnt_data) {
10682 byte = *((char *)tinfo->vnt_data);
10683 }
10684 #endif
10685 rp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct vnode_resolve), Z_WAITOK);
10686 if (rp == NULL) {
10687 return ENOMEM;
10688 }
10689
10690 lck_mtx_init(&rp->vr_lock, trigger_vnode_lck_grp, trigger_vnode_lck_attr);
10691
10692 rp->vr_resolve_func = tinfo->vnt_resolve_func;
10693 rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
10694 rp->vr_rearm_func = tinfo->vnt_rearm_func;
10695 rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
10696 rp->vr_data = tinfo->vnt_data;
10697 rp->vr_lastseq = 0;
10698 rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
10699 if (external) {
10700 rp->vr_flags |= VNT_EXTERNAL;
10701 }
10702
10703 result = vnode_resolver_attach(vp, rp, external);
10704 if (result != 0) {
10705 goto out;
10706 }
10707
10708 if (mp) {
10709 OSAddAtomic(1, &mp->mnt_numtriggers);
10710 }
10711
10712 return result;
10713
10714 out:
10715 kheap_free(KHEAP_DEFAULT, rp, sizeof(struct vnode_resolve));
10716 return result;
10717 }
10718
10719 static void
10720 vnode_resolver_release(vnode_resolve_t rp)
10721 {
10722 /*
10723 * Give them a chance to free any private data
10724 */
10725 if (rp->vr_data && rp->vr_reclaim_func) {
10726 rp->vr_reclaim_func(NULLVP, rp->vr_data);
10727 }
10728
10729 lck_mtx_destroy(&rp->vr_lock, trigger_vnode_lck_grp);
10730 kheap_free(KHEAP_DEFAULT, rp, sizeof(struct vnode_resolve));
10731 }
10732
10733 /* Called after the vnode has been drained */
10734 static void
10735 vnode_resolver_detach(vnode_t vp)
10736 {
10737 vnode_resolve_t rp;
10738 mount_t mp;
10739
10740 mp = vnode_mount(vp);
10741
10742 vnode_lock(vp);
10743 rp = vp->v_resolve;
10744 vp->v_resolve = NULL;
10745 vnode_unlock(vp);
10746
10747 if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
10748 vnode_rele_ext(vp, O_EVTONLY, 1);
10749 }
10750
10751 vnode_resolver_release(rp);
10752
10753 /* Keep count of active trigger vnodes per mount */
10754 OSAddAtomic(-1, &mp->mnt_numtriggers);
10755 }
10756
10757 __private_extern__
10758 void
10759 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
10760 {
10761 vnode_resolve_t rp;
10762 resolver_result_t result;
10763 enum resolver_status status;
10764 uint32_t seq;
10765
10766 if ((vp->v_resolve == NULL) ||
10767 (vp->v_resolve->vr_rearm_func == NULL) ||
10768 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
10769 return;
10770 }
10771
10772 rp = vp->v_resolve;
10773 lck_mtx_lock(&rp->vr_lock);
10774
10775 /*
10776 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
10777 */
10778 if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
10779 lck_mtx_unlock(&rp->vr_lock);
10780 return;
10781 }
10782
10783 /* Check if this vnode is already armed */
10784 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
10785 lck_mtx_unlock(&rp->vr_lock);
10786 return;
10787 }
10788
10789 lck_mtx_unlock(&rp->vr_lock);
10790
10791 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
10792 status = vfs_resolver_status(result);
10793 seq = vfs_resolver_sequence(result);
10794
10795 lck_mtx_lock(&rp->vr_lock);
10796 if (seq > rp->vr_lastseq) {
10797 if (status == RESOLVER_UNRESOLVED) {
10798 rp->vr_flags &= ~VNT_RESOLVED;
10799 }
10800 rp->vr_lastseq = seq;
10801 }
10802 lck_mtx_unlock(&rp->vr_lock);
10803 }
10804
10805 __private_extern__
10806 int
10807 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
10808 {
10809 vnode_resolve_t rp;
10810 enum path_operation op;
10811 resolver_result_t result;
10812 enum resolver_status status;
10813 uint32_t seq;
10814
10815 /*
10816 * N.B. we cannot call vfs_context_can_resolve_triggers()
10817 * here because we really only want to suppress that in
10818 * the event the trigger will be resolved by something in
10819 * user-space. Any triggers that are resolved by the kernel
10820 * do not pose a threat of deadlock.
10821 */
10822
10823 /* Only trigger on topmost vnodes */
10824 if ((vp->v_resolve == NULL) ||
10825 (vp->v_resolve->vr_resolve_func == NULL) ||
10826 (vp->v_mountedhere != NULL)) {
10827 return 0;
10828 }
10829
10830 rp = vp->v_resolve;
10831 lck_mtx_lock(&rp->vr_lock);
10832
10833 /* Check if this vnode is already resolved */
10834 if (rp->vr_flags & VNT_RESOLVED) {
10835 lck_mtx_unlock(&rp->vr_lock);
10836 return 0;
10837 }
10838
10839 lck_mtx_unlock(&rp->vr_lock);
10840
10841 #if CONFIG_MACF
10842 if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
10843 /*
10844 * VNT_KERN_RESOLVE indicates this trigger has no parameters
10845 * at the discression of the accessing process other than
10846 * the act of access. All other triggers must be checked
10847 */
10848 int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
10849 if (rv != 0) {
10850 return rv;
10851 }
10852 }
10853 #endif
10854
10855 /*
10856 * XXX
10857 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
10858 * is there anyway to know this???
10859 * there can also be other legitimate lookups in parallel
10860 *
10861 * XXX - should we call this on a separate thread with a timeout?
10862 *
10863 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
10864 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
10865 */
10866 op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
10867
10868 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
10869 status = vfs_resolver_status(result);
10870 seq = vfs_resolver_sequence(result);
10871
10872 lck_mtx_lock(&rp->vr_lock);
10873 if (seq > rp->vr_lastseq) {
10874 if (status == RESOLVER_RESOLVED) {
10875 rp->vr_flags |= VNT_RESOLVED;
10876 }
10877 rp->vr_lastseq = seq;
10878 }
10879 lck_mtx_unlock(&rp->vr_lock);
10880
10881 /* On resolver errors, propagate the error back up */
10882 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
10883 }
10884
10885 static int
10886 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
10887 {
10888 vnode_resolve_t rp;
10889 resolver_result_t result;
10890 enum resolver_status status;
10891 uint32_t seq;
10892
10893 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
10894 return 0;
10895 }
10896
10897 rp = vp->v_resolve;
10898 lck_mtx_lock(&rp->vr_lock);
10899
10900 /* Check if this vnode is already resolved */
10901 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
10902 printf("vnode_trigger_unresolve: not currently resolved\n");
10903 lck_mtx_unlock(&rp->vr_lock);
10904 return 0;
10905 }
10906
10907 rp->vr_flags |= VNT_VFS_UNMOUNTED;
10908
10909 lck_mtx_unlock(&rp->vr_lock);
10910
10911 /*
10912 * XXX
10913 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
10914 * there can also be other legitimate lookups in parallel
10915 *
10916 * XXX - should we call this on a separate thread with a timeout?
10917 */
10918
10919 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
10920 status = vfs_resolver_status(result);
10921 seq = vfs_resolver_sequence(result);
10922
10923 lck_mtx_lock(&rp->vr_lock);
10924 if (seq > rp->vr_lastseq) {
10925 if (status == RESOLVER_UNRESOLVED) {
10926 rp->vr_flags &= ~VNT_RESOLVED;
10927 }
10928 rp->vr_lastseq = seq;
10929 }
10930 rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
10931 lck_mtx_unlock(&rp->vr_lock);
10932
10933 /* On resolver errors, propagate the error back up */
10934 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
10935 }
10936
10937 static int
10938 triggerisdescendant(mount_t mp, mount_t rmp)
10939 {
10940 int match = FALSE;
10941
10942 /*
10943 * walk up vnode covered chain looking for a match
10944 */
10945 name_cache_lock_shared();
10946
10947 while (1) {
10948 vnode_t vp;
10949
10950 /* did we encounter "/" ? */
10951 if (mp->mnt_flag & MNT_ROOTFS) {
10952 break;
10953 }
10954
10955 vp = mp->mnt_vnodecovered;
10956 if (vp == NULLVP) {
10957 break;
10958 }
10959
10960 mp = vp->v_mount;
10961 if (mp == rmp) {
10962 match = TRUE;
10963 break;
10964 }
10965 }
10966
10967 name_cache_unlock();
10968
10969 return match;
10970 }
10971
10972 struct trigger_unmount_info {
10973 vfs_context_t ctx;
10974 mount_t top_mp;
10975 vnode_t trigger_vp;
10976 mount_t trigger_mp;
10977 uint32_t trigger_vid;
10978 int flags;
10979 };
10980
10981 static int
10982 trigger_unmount_callback(mount_t mp, void * arg)
10983 {
10984 struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
10985 boolean_t mountedtrigger = FALSE;
10986
10987 /*
10988 * When we encounter the top level mount we're done
10989 */
10990 if (mp == infop->top_mp) {
10991 return VFS_RETURNED_DONE;
10992 }
10993
10994 if ((mp->mnt_vnodecovered == NULL) ||
10995 (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
10996 return VFS_RETURNED;
10997 }
10998
10999 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11000 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11001 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
11002 mountedtrigger = TRUE;
11003 }
11004 vnode_put(mp->mnt_vnodecovered);
11005
11006 /*
11007 * When we encounter a mounted trigger, check if its under the top level mount
11008 */
11009 if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
11010 return VFS_RETURNED;
11011 }
11012
11013 /*
11014 * Process any pending nested mount (now that its not referenced)
11015 */
11016 if ((infop->trigger_vp != NULLVP) &&
11017 (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
11018 vnode_t vp = infop->trigger_vp;
11019 int error;
11020
11021 infop->trigger_vp = NULLVP;
11022
11023 if (mp == vp->v_mountedhere) {
11024 vnode_put(vp);
11025 printf("trigger_unmount_callback: unexpected match '%s'\n",
11026 mp->mnt_vfsstat.f_mntonname);
11027 return VFS_RETURNED;
11028 }
11029 if (infop->trigger_mp != vp->v_mountedhere) {
11030 vnode_put(vp);
11031 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
11032 infop->trigger_mp, vp->v_mountedhere);
11033 goto savenext;
11034 }
11035
11036 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
11037 vnode_put(vp);
11038 if (error) {
11039 printf("unresolving: '%s', err %d\n",
11040 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
11041 "???", error);
11042 return VFS_RETURNED_DONE; /* stop iteration on errors */
11043 }
11044 }
11045 savenext:
11046 /*
11047 * We can't call resolver here since we hold a mount iter
11048 * ref on mp so save its covered vp for later processing
11049 */
11050 infop->trigger_vp = mp->mnt_vnodecovered;
11051 if ((infop->trigger_vp != NULLVP) &&
11052 (vnode_getwithref(infop->trigger_vp) == 0)) {
11053 if (infop->trigger_vp->v_mountedhere == mp) {
11054 infop->trigger_vid = infop->trigger_vp->v_id;
11055 infop->trigger_mp = mp;
11056 }
11057 vnode_put(infop->trigger_vp);
11058 }
11059
11060 return VFS_RETURNED;
11061 }
11062
11063 /*
11064 * Attempt to unmount any trigger mounts nested underneath a mount.
11065 * This is a best effort attempt and no retries are performed here.
11066 *
11067 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
11068 */
11069 __private_extern__
11070 void
11071 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
11072 {
11073 struct trigger_unmount_info info;
11074
11075 /* Must have trigger vnodes */
11076 if (mp->mnt_numtriggers == 0) {
11077 return;
11078 }
11079 /* Avoid recursive requests (by checking covered vnode) */
11080 if ((mp->mnt_vnodecovered != NULL) &&
11081 (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
11082 boolean_t recursive = FALSE;
11083
11084 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11085 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11086 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
11087 recursive = TRUE;
11088 }
11089 vnode_put(mp->mnt_vnodecovered);
11090 if (recursive) {
11091 return;
11092 }
11093 }
11094
11095 /*
11096 * Attempt to unmount any nested trigger mounts (best effort)
11097 */
11098 info.ctx = ctx;
11099 info.top_mp = mp;
11100 info.trigger_vp = NULLVP;
11101 info.trigger_vid = 0;
11102 info.trigger_mp = NULL;
11103 info.flags = flags;
11104
11105 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
11106
11107 /*
11108 * Process remaining nested mount (now that its not referenced)
11109 */
11110 if ((info.trigger_vp != NULLVP) &&
11111 (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
11112 vnode_t vp = info.trigger_vp;
11113
11114 if (info.trigger_mp == vp->v_mountedhere) {
11115 (void) vnode_trigger_unresolve(vp, flags, ctx);
11116 }
11117 vnode_put(vp);
11118 }
11119 }
11120
11121 int
11122 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
11123 {
11124 struct nameidata *ndp;
11125 int res;
11126 vnode_t rvp, vp;
11127 struct vnode_trigger_param vtp;
11128
11129 /*
11130 * Must be called for trigger callback, wherein rwlock is held
11131 */
11132 lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
11133
11134 TRIG_LOG("Adding trigger at %s\n", relpath);
11135 TRIG_LOG("Trying VFS_ROOT\n");
11136
11137 ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
11138 if (!ndp) {
11139 return ENOMEM;
11140 }
11141
11142 /*
11143 * We do a lookup starting at the root of the mountpoint, unwilling
11144 * to cross into other mountpoints.
11145 */
11146 res = VFS_ROOT(mp, &rvp, ctx);
11147 if (res != 0) {
11148 goto out;
11149 }
11150
11151 TRIG_LOG("Trying namei\n");
11152
11153 NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
11154 CAST_USER_ADDR_T(relpath), ctx);
11155 ndp->ni_dvp = rvp;
11156 res = namei(ndp);
11157 if (res != 0) {
11158 vnode_put(rvp);
11159 goto out;
11160 }
11161
11162 vp = ndp->ni_vp;
11163 nameidone(ndp);
11164 vnode_put(rvp);
11165
11166 TRIG_LOG("Trying vnode_resolver_create()\n");
11167
11168 /*
11169 * Set up blob. vnode_create() takes a larger structure
11170 * with creation info, and we needed something different
11171 * for this case. One needs to win, or we need to munge both;
11172 * vnode_create() wins.
11173 */
11174 bzero(&vtp, sizeof(vtp));
11175 vtp.vnt_resolve_func = vtip->vti_resolve_func;
11176 vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
11177 vtp.vnt_rearm_func = vtip->vti_rearm_func;
11178 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11179 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11180 vtp.vnt_data = vtip->vti_data;
11181 vtp.vnt_flags = vtip->vti_flags;
11182
11183 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
11184 vnode_put(vp);
11185 out:
11186 kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata));
11187 TRIG_LOG("Returning %d\n", res);
11188 return res;
11189 }
11190
11191 #endif /* CONFIG_TRIGGERS */
11192
11193 vm_offset_t
11194 kdebug_vnode(vnode_t vp)
11195 {
11196 return VM_KERNEL_ADDRPERM(vp);
11197 }
11198
11199 static int flush_cache_on_write = 0;
11200 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
11201 CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
11202 "always flush the drive cache on writes to uncached files");
11203
11204 int
11205 vnode_should_flush_after_write(vnode_t vp, int ioflag)
11206 {
11207 return flush_cache_on_write
11208 && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
11209 }
11210
11211 /*
11212 * sysctl for use by disk I/O tracing tools to get the list of existing
11213 * vnodes' paths
11214 */
11215
11216 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
11217 struct vnode_trace_paths_context {
11218 uint64_t count;
11219 /*
11220 * Must be a multiple of 4, then -1, for tracing!
11221 */
11222 unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
11223 };
11224
11225 static int
11226 vnode_trace_path_callback(struct vnode *vp, void *vctx)
11227 {
11228 struct vnode_trace_paths_context *ctx = vctx;
11229 size_t path_len = sizeof(ctx->path);
11230
11231 int getpath_len = (int)path_len;
11232 if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
11233 /* vn_getpath() NUL-terminates, and len includes the NUL. */
11234 assert(getpath_len >= 0);
11235 path_len = (size_t)getpath_len;
11236
11237 assert(path_len <= sizeof(ctx->path));
11238 kdebug_vfs_lookup(ctx->path, (int)path_len, vp,
11239 KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
11240
11241 if (++(ctx->count) == 1000) {
11242 thread_yield_to_preemption();
11243 ctx->count = 0;
11244 }
11245 }
11246
11247 return VNODE_RETURNED;
11248 }
11249
11250 static int
11251 vfs_trace_paths_callback(mount_t mp, void *arg)
11252 {
11253 if (mp->mnt_flag & MNT_LOCAL) {
11254 vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
11255 }
11256
11257 return VFS_RETURNED;
11258 }
11259
11260 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
11261 struct vnode_trace_paths_context ctx;
11262
11263 (void)oidp;
11264 (void)arg1;
11265 (void)arg2;
11266 (void)req;
11267
11268 if (!kauth_cred_issuser(kauth_cred_get())) {
11269 return EPERM;
11270 }
11271
11272 if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
11273 return EINVAL;
11274 }
11275
11276 bzero(&ctx, sizeof(struct vnode_trace_paths_context));
11277
11278 vfs_iterate(0, vfs_trace_paths_callback, &ctx);
11279
11280 return 0;
11281 }
11282
11283 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");