]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
f5dffc36ebf803add1b71951bb4869b3fc008709
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/kern_memorystatus.h>
109 #include <miscfs/fifofs/fifo.h>
110
111 #include <string.h>
112 #include <machine/spl.h>
113
114
115 #include <kern/assert.h>
116
117 #include <miscfs/specfs/specdev.h>
118
119 #include <mach/mach_types.h>
120 #include <mach/memory_object_types.h>
121
122 #include <kern/kalloc.h> /* kalloc()/kfree() */
123 #include <kern/clock.h> /* delay_for_interval() */
124 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
125
126
127 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #endif
132
133 extern lck_grp_t *vnode_lck_grp;
134 extern lck_attr_t *vnode_lck_attr;
135
136
137 extern lck_mtx_t * mnt_list_mtx_lock;
138
139 enum vtype iftovt_tab[16] = {
140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[9] = {
144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 S_IFSOCK, S_IFIFO, S_IFMT,
146 };
147
148 /* XXX next protptype should be from <nfs/nfs.h> */
149 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
150
151 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
152 __private_extern__ void qsort(
153 void * array,
154 size_t nmembers,
155 size_t member_size,
156 int (*)(const void *, const void *));
157
158 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
159 __private_extern__ void vntblinit(void);
160 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
161 unsigned int val2);
162 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
163
164 extern int system_inshutdown;
165
166 static void vnode_list_add(vnode_t);
167 static void vnode_list_remove(vnode_t);
168 static void vnode_list_remove_locked(vnode_t);
169
170 static errno_t vnode_drain(vnode_t);
171 static void vgone(vnode_t, int flags);
172 static void vclean(vnode_t vp, int flag);
173 static void vnode_reclaim_internal(vnode_t, int, int, int);
174
175 static void vnode_dropiocount (vnode_t);
176 static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
177
178 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
179 static int vnode_reload(vnode_t);
180 static int vnode_isinuse_locked(vnode_t, int, int);
181
182 static void insmntque(vnode_t vp, mount_t mp);
183 static int mount_getvfscnt(void);
184 static int mount_fillfsids(fsid_t *, int );
185 static void vnode_iterate_setup(mount_t);
186 int vnode_umount_preflight(mount_t, vnode_t, int);
187 static int vnode_iterate_prepare(mount_t);
188 static int vnode_iterate_reloadq(mount_t);
189 static void vnode_iterate_clear(mount_t);
190 static mount_t vfs_getvfs_locked(fsid_t *);
191
192 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
193
194 #ifdef JOE_DEBUG
195 static void record_vp(vnode_t vp, int count);
196 #endif
197
198 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
199 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
200
201 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
202 struct timeval rage_tv;
203 int rage_limit = 0;
204 int ragevnodes = 0;
205
206 #define RAGE_LIMIT_MIN 100
207 #define RAGE_TIME_LIMIT 5
208
209 struct mntlist mountlist; /* mounted filesystem list */
210 static int nummounts = 0;
211
212 #if DIAGNOSTIC
213 #define VLISTCHECK(fun, vp, list) \
214 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
215 panic("%s: %s vnode not on %slist", (fun), (list), (list));
216 #else
217 #define VLISTCHECK(fun, vp, list)
218 #endif /* DIAGNOSTIC */
219
220 #define VLISTNONE(vp) \
221 do { \
222 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
223 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
224 } while(0)
225
226 #define VONLIST(vp) \
227 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
228
229 /* remove a vnode from free vnode list */
230 #define VREMFREE(fun, vp) \
231 do { \
232 VLISTCHECK((fun), (vp), "free"); \
233 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
234 VLISTNONE((vp)); \
235 freevnodes--; \
236 } while(0)
237
238
239
240 /* remove a vnode from dead vnode list */
241 #define VREMDEAD(fun, vp) \
242 do { \
243 VLISTCHECK((fun), (vp), "dead"); \
244 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
245 VLISTNONE((vp)); \
246 vp->v_listflag &= ~VLIST_DEAD; \
247 deadvnodes--; \
248 } while(0)
249
250
251 /* remove a vnode from rage vnode list */
252 #define VREMRAGE(fun, vp) \
253 do { \
254 if ( !(vp->v_listflag & VLIST_RAGE)) \
255 panic("VREMRAGE: vp not on rage list"); \
256 VLISTCHECK((fun), (vp), "rage"); \
257 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
258 VLISTNONE((vp)); \
259 vp->v_listflag &= ~VLIST_RAGE; \
260 ragevnodes--; \
261 } while(0)
262
263
264 /*
265 * vnodetarget hasn't been used in a long time, but
266 * it was exported for some reason... I'm leaving in
267 * place for now... it should be deprecated out of the
268 * exports and removed eventually.
269 */
270 u_int32_t vnodetarget; /* target for vnreclaim() */
271 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
272
273 /*
274 * We need quite a few vnodes on the free list to sustain the
275 * rapid stat() the compilation process does, and still benefit from the name
276 * cache. Having too few vnodes on the free list causes serious disk
277 * thrashing as we cycle through them.
278 */
279 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
280
281 /*
282 * Initialize the vnode management data structures.
283 */
284 __private_extern__ void
285 vntblinit(void)
286 {
287 TAILQ_INIT(&vnode_free_list);
288 TAILQ_INIT(&vnode_rage_list);
289 TAILQ_INIT(&vnode_dead_list);
290 TAILQ_INIT(&mountlist);
291
292 if (!vnodetarget)
293 vnodetarget = VNODE_FREE_TARGET;
294
295 microuptime(&rage_tv);
296 rage_limit = desiredvnodes / 100;
297
298 if (rage_limit < RAGE_LIMIT_MIN)
299 rage_limit = RAGE_LIMIT_MIN;
300
301 /*
302 * Scale the vm_object_cache to accomodate the vnodes
303 * we want to cache
304 */
305 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
306 }
307
308 /* Reset the VM Object Cache with the values passed in */
309 __private_extern__ kern_return_t
310 reset_vmobjectcache(unsigned int val1, unsigned int val2)
311 {
312 vm_size_t oval = val1 - VNODE_FREE_MIN;
313 vm_size_t nval;
314
315 if (val1 == val2) {
316 return KERN_SUCCESS;
317 }
318
319 if(val2 < VNODE_FREE_MIN)
320 nval = 0;
321 else
322 nval = val2 - VNODE_FREE_MIN;
323
324 return(adjust_vm_object_cache(oval, nval));
325 }
326
327
328 /* the timeout is in 10 msecs */
329 int
330 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
331 int error = 0;
332 struct timespec ts;
333
334 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
335
336 if (vp->v_numoutput > output_target) {
337
338 slpflag |= PDROP;
339
340 vnode_lock_spin(vp);
341
342 while ((vp->v_numoutput > output_target) && error == 0) {
343 if (output_target)
344 vp->v_flag |= VTHROTTLED;
345 else
346 vp->v_flag |= VBWAIT;
347
348 ts.tv_sec = (slptimeout/100);
349 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
350 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
351
352 vnode_lock_spin(vp);
353 }
354 vnode_unlock(vp);
355 }
356 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
357
358 return error;
359 }
360
361
362 void
363 vnode_startwrite(vnode_t vp) {
364
365 OSAddAtomic(1, &vp->v_numoutput);
366 }
367
368
369 void
370 vnode_writedone(vnode_t vp)
371 {
372 if (vp) {
373 OSAddAtomic(-1, &vp->v_numoutput);
374
375 if (vp->v_numoutput <= 1) {
376 int need_wakeup = 0;
377
378 vnode_lock_spin(vp);
379
380 if (vp->v_numoutput < 0)
381 panic("vnode_writedone: numoutput < 0");
382
383 if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) {
384 vp->v_flag &= ~VTHROTTLED;
385 need_wakeup = 1;
386 }
387 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
388 vp->v_flag &= ~VBWAIT;
389 need_wakeup = 1;
390 }
391 vnode_unlock(vp);
392
393 if (need_wakeup)
394 wakeup((caddr_t)&vp->v_numoutput);
395 }
396 }
397 }
398
399
400
401 int
402 vnode_hasdirtyblks(vnode_t vp)
403 {
404 struct cl_writebehind *wbp;
405
406 /*
407 * Not taking the buf_mtxp as there is little
408 * point doing it. Even if the lock is taken the
409 * state can change right after that. If their
410 * needs to be a synchronization, it must be driven
411 * by the caller
412 */
413 if (vp->v_dirtyblkhd.lh_first)
414 return (1);
415
416 if (!UBCINFOEXISTS(vp))
417 return (0);
418
419 wbp = vp->v_ubcinfo->cl_wbehind;
420
421 if (wbp && (wbp->cl_number || wbp->cl_scmap))
422 return (1);
423
424 return (0);
425 }
426
427 int
428 vnode_hascleanblks(vnode_t vp)
429 {
430 /*
431 * Not taking the buf_mtxp as there is little
432 * point doing it. Even if the lock is taken the
433 * state can change right after that. If their
434 * needs to be a synchronization, it must be driven
435 * by the caller
436 */
437 if (vp->v_cleanblkhd.lh_first)
438 return (1);
439 return (0);
440 }
441
442 void
443 vnode_iterate_setup(mount_t mp)
444 {
445 while (mp->mnt_lflag & MNT_LITER) {
446 mp->mnt_lflag |= MNT_LITERWAIT;
447 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
448 }
449
450 mp->mnt_lflag |= MNT_LITER;
451
452 }
453
454 int
455 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
456 {
457 vnode_t vp;
458
459 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
460 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
461 if (vp->v_tag == VT_UDF)
462 return 0;
463 if (vp->v_type == VDIR)
464 continue;
465 if (vp == skipvp)
466 continue;
467 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
468 (vp->v_flag & VNOFLUSH)))
469 continue;
470 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
471 continue;
472 if ((flags & WRITECLOSE) &&
473 (vp->v_writecount == 0 || vp->v_type != VREG))
474 continue;
475 /* Look for busy vnode */
476 if (((vp->v_usecount != 0) &&
477 ((vp->v_usecount - vp->v_kusecount) != 0)))
478 return(1);
479 }
480
481 return(0);
482 }
483
484 /*
485 * This routine prepares iteration by moving all the vnodes to worker queue
486 * called with mount lock held
487 */
488 int
489 vnode_iterate_prepare(mount_t mp)
490 {
491 vnode_t vp;
492
493 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
494 /* nothing to do */
495 return (0);
496 }
497
498 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
499 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
500 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
501 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
502
503 TAILQ_INIT(&mp->mnt_vnodelist);
504 if (mp->mnt_newvnodes.tqh_first != NULL)
505 panic("vnode_iterate_prepare: newvnode when entering vnode");
506 TAILQ_INIT(&mp->mnt_newvnodes);
507
508 return (1);
509 }
510
511
512 /* called with mount lock held */
513 int
514 vnode_iterate_reloadq(mount_t mp)
515 {
516 int moved = 0;
517
518 /* add the remaining entries in workerq to the end of mount vnode list */
519 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
520 struct vnode * mvp;
521 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
522
523 /* Joining the workerque entities to mount vnode list */
524 if (mvp)
525 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
526 else
527 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
528 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
529 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
530 TAILQ_INIT(&mp->mnt_workerqueue);
531 }
532
533 /* add the newvnodes to the head of mount vnode list */
534 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
535 struct vnode * nlvp;
536 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
537
538 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
539 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
540 if(mp->mnt_vnodelist.tqh_first)
541 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
542 else
543 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
544 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
545 TAILQ_INIT(&mp->mnt_newvnodes);
546 moved = 1;
547 }
548
549 return(moved);
550 }
551
552
553 void
554 vnode_iterate_clear(mount_t mp)
555 {
556 mp->mnt_lflag &= ~MNT_LITER;
557 if (mp->mnt_lflag & MNT_LITERWAIT) {
558 mp->mnt_lflag &= ~MNT_LITERWAIT;
559 wakeup(mp);
560 }
561 }
562
563
564 int
565 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
566 void *arg)
567 {
568 struct vnode *vp;
569 int vid, retval;
570 int ret = 0;
571
572 mount_lock(mp);
573
574 vnode_iterate_setup(mp);
575
576 /* it is returns 0 then there is nothing to do */
577 retval = vnode_iterate_prepare(mp);
578
579 if (retval == 0) {
580 vnode_iterate_clear(mp);
581 mount_unlock(mp);
582 return(ret);
583 }
584
585 /* iterate over all the vnodes */
586 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
587 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
588 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
589 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
590 vid = vp->v_id;
591 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
592 continue;
593 }
594 mount_unlock(mp);
595
596 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
597 mount_lock(mp);
598 continue;
599 }
600 if (flags & VNODE_RELOAD) {
601 /*
602 * we're reloading the filesystem
603 * cast out any inactive vnodes...
604 */
605 if (vnode_reload(vp)) {
606 /* vnode will be recycled on the refcount drop */
607 vnode_put(vp);
608 mount_lock(mp);
609 continue;
610 }
611 }
612
613 retval = callout(vp, arg);
614
615 switch (retval) {
616 case VNODE_RETURNED:
617 case VNODE_RETURNED_DONE:
618 vnode_put(vp);
619 if (retval == VNODE_RETURNED_DONE) {
620 mount_lock(mp);
621 ret = 0;
622 goto out;
623 }
624 break;
625
626 case VNODE_CLAIMED_DONE:
627 mount_lock(mp);
628 ret = 0;
629 goto out;
630 case VNODE_CLAIMED:
631 default:
632 break;
633 }
634 mount_lock(mp);
635 }
636
637 out:
638 (void)vnode_iterate_reloadq(mp);
639 vnode_iterate_clear(mp);
640 mount_unlock(mp);
641 return (ret);
642 }
643
644 void
645 mount_lock_renames(mount_t mp)
646 {
647 lck_mtx_lock(&mp->mnt_renamelock);
648 }
649
650 void
651 mount_unlock_renames(mount_t mp)
652 {
653 lck_mtx_unlock(&mp->mnt_renamelock);
654 }
655
656 void
657 mount_lock(mount_t mp)
658 {
659 lck_mtx_lock(&mp->mnt_mlock);
660 }
661
662 void
663 mount_lock_spin(mount_t mp)
664 {
665 lck_mtx_lock_spin(&mp->mnt_mlock);
666 }
667
668 void
669 mount_unlock(mount_t mp)
670 {
671 lck_mtx_unlock(&mp->mnt_mlock);
672 }
673
674
675 void
676 mount_ref(mount_t mp, int locked)
677 {
678 if ( !locked)
679 mount_lock_spin(mp);
680
681 mp->mnt_count++;
682
683 if ( !locked)
684 mount_unlock(mp);
685 }
686
687
688 void
689 mount_drop(mount_t mp, int locked)
690 {
691 if ( !locked)
692 mount_lock_spin(mp);
693
694 mp->mnt_count--;
695
696 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
697 wakeup(&mp->mnt_lflag);
698
699 if ( !locked)
700 mount_unlock(mp);
701 }
702
703
704 int
705 mount_iterref(mount_t mp, int locked)
706 {
707 int retval = 0;
708
709 if (!locked)
710 mount_list_lock();
711 if (mp->mnt_iterref < 0) {
712 retval = 1;
713 } else {
714 mp->mnt_iterref++;
715 }
716 if (!locked)
717 mount_list_unlock();
718 return(retval);
719 }
720
721 int
722 mount_isdrained(mount_t mp, int locked)
723 {
724 int retval;
725
726 if (!locked)
727 mount_list_lock();
728 if (mp->mnt_iterref < 0)
729 retval = 1;
730 else
731 retval = 0;
732 if (!locked)
733 mount_list_unlock();
734 return(retval);
735 }
736
737 void
738 mount_iterdrop(mount_t mp)
739 {
740 mount_list_lock();
741 mp->mnt_iterref--;
742 wakeup(&mp->mnt_iterref);
743 mount_list_unlock();
744 }
745
746 void
747 mount_iterdrain(mount_t mp)
748 {
749 mount_list_lock();
750 while (mp->mnt_iterref)
751 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
752 /* mount iterations drained */
753 mp->mnt_iterref = -1;
754 mount_list_unlock();
755 }
756 void
757 mount_iterreset(mount_t mp)
758 {
759 mount_list_lock();
760 if (mp->mnt_iterref == -1)
761 mp->mnt_iterref = 0;
762 mount_list_unlock();
763 }
764
765 /* always called with mount lock held */
766 int
767 mount_refdrain(mount_t mp)
768 {
769 if (mp->mnt_lflag & MNT_LDRAIN)
770 panic("already in drain");
771 mp->mnt_lflag |= MNT_LDRAIN;
772
773 while (mp->mnt_count)
774 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
775
776 if (mp->mnt_vnodelist.tqh_first != NULL)
777 panic("mount_refdrain: dangling vnode");
778
779 mp->mnt_lflag &= ~MNT_LDRAIN;
780
781 return(0);
782 }
783
784
785 /*
786 * Mark a mount point as busy. Used to synchronize access and to delay
787 * unmounting.
788 */
789 int
790 vfs_busy(mount_t mp, int flags)
791 {
792
793 restart:
794 if (mp->mnt_lflag & MNT_LDEAD)
795 return(ENOENT);
796
797 if (mp->mnt_lflag & MNT_LUNMOUNT) {
798 if (flags & LK_NOWAIT)
799 return (ENOENT);
800
801 mount_lock(mp);
802
803 if (mp->mnt_lflag & MNT_LDEAD) {
804 mount_unlock(mp);
805 return(ENOENT);
806 }
807 if (mp->mnt_lflag & MNT_LUNMOUNT) {
808 mp->mnt_lflag |= MNT_LWAIT;
809 /*
810 * Since all busy locks are shared except the exclusive
811 * lock granted when unmounting, the only place that a
812 * wakeup needs to be done is at the release of the
813 * exclusive lock at the end of dounmount.
814 */
815 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
816 return (ENOENT);
817 }
818 mount_unlock(mp);
819 }
820
821 lck_rw_lock_shared(&mp->mnt_rwlock);
822
823 /*
824 * until we are granted the rwlock, it's possible for the mount point to
825 * change state, so reevaluate before granting the vfs_busy
826 */
827 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
828 lck_rw_done(&mp->mnt_rwlock);
829 goto restart;
830 }
831 return (0);
832 }
833
834 /*
835 * Free a busy filesystem.
836 */
837
838 void
839 vfs_unbusy(mount_t mp)
840 {
841 lck_rw_done(&mp->mnt_rwlock);
842 }
843
844
845
846 static void
847 vfs_rootmountfailed(mount_t mp) {
848
849 mount_list_lock();
850 mp->mnt_vtable->vfc_refcount--;
851 mount_list_unlock();
852
853 vfs_unbusy(mp);
854
855 mount_lock_destroy(mp);
856
857 #if CONFIG_MACF
858 mac_mount_label_destroy(mp);
859 #endif
860
861 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
862 }
863
864 /*
865 * Lookup a filesystem type, and if found allocate and initialize
866 * a mount structure for it.
867 *
868 * Devname is usually updated by mount(8) after booting.
869 */
870 static mount_t
871 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
872 {
873 mount_t mp;
874
875 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
876 bzero((char *)mp, sizeof(struct mount));
877
878 /* Initialize the default IO constraints */
879 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
880 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
881 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
882 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
883 mp->mnt_devblocksize = DEV_BSIZE;
884 mp->mnt_alignmentmask = PAGE_MASK;
885 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
886 mp->mnt_ioscale = 1;
887 mp->mnt_ioflags = 0;
888 mp->mnt_realrootvp = NULLVP;
889 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
890
891 mount_lock_init(mp);
892 (void)vfs_busy(mp, LK_NOWAIT);
893
894 TAILQ_INIT(&mp->mnt_vnodelist);
895 TAILQ_INIT(&mp->mnt_workerqueue);
896 TAILQ_INIT(&mp->mnt_newvnodes);
897
898 mp->mnt_vtable = vfsp;
899 mp->mnt_op = vfsp->vfc_vfsops;
900 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
901 mp->mnt_vnodecovered = NULLVP;
902 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
903 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
904
905 mount_list_lock();
906 vfsp->vfc_refcount++;
907 mount_list_unlock();
908
909 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
910 mp->mnt_vfsstat.f_mntonname[0] = '/';
911 /* XXX const poisoning layering violation */
912 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
913
914 #if CONFIG_MACF
915 mac_mount_label_init(mp);
916 mac_mount_label_associate(vfs_context_kernel(), mp);
917 #endif
918 return (mp);
919 }
920
921 errno_t
922 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
923 {
924 struct vfstable *vfsp;
925
926 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
927 if (!strncmp(vfsp->vfc_name, fstypename,
928 sizeof(vfsp->vfc_name)))
929 break;
930 if (vfsp == NULL)
931 return (ENODEV);
932
933 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
934
935 if (*mpp)
936 return (0);
937
938 return (ENOMEM);
939 }
940
941
942 /*
943 * Find an appropriate filesystem to use for the root. If a filesystem
944 * has not been preselected, walk through the list of known filesystems
945 * trying those that have mountroot routines, and try them until one
946 * works or we have tried them all.
947 */
948 extern int (*mountroot)(void);
949
950 int
951 vfs_mountroot(void)
952 {
953 #if CONFIG_MACF
954 struct vnode *vp;
955 #endif
956 struct vfstable *vfsp;
957 vfs_context_t ctx = vfs_context_kernel();
958 struct vfs_attr vfsattr;
959 int error;
960 mount_t mp;
961 vnode_t bdevvp_rootvp;
962
963 if (mountroot != NULL) {
964 /*
965 * used for netboot which follows a different set of rules
966 */
967 error = (*mountroot)();
968 return (error);
969 }
970 if ((error = bdevvp(rootdev, &rootvp))) {
971 printf("vfs_mountroot: can't setup bdevvp\n");
972 return (error);
973 }
974 /*
975 * 4951998 - code we call in vfc_mountroot may replace rootvp
976 * so keep a local copy for some house keeping.
977 */
978 bdevvp_rootvp = rootvp;
979
980 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
981 if (vfsp->vfc_mountroot == NULL)
982 continue;
983
984 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
985 mp->mnt_devvp = rootvp;
986
987 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
988 if ( bdevvp_rootvp != rootvp ) {
989 /*
990 * rootvp changed...
991 * bump the iocount and fix up mnt_devvp for the
992 * new rootvp (it will already have a usecount taken)...
993 * drop the iocount and the usecount on the orignal
994 * since we are no longer going to use it...
995 */
996 vnode_getwithref(rootvp);
997 mp->mnt_devvp = rootvp;
998
999 vnode_rele(bdevvp_rootvp);
1000 vnode_put(bdevvp_rootvp);
1001 }
1002 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1003
1004 vfs_unbusy(mp);
1005
1006 mount_list_add(mp);
1007
1008 /*
1009 * cache the IO attributes for the underlying physical media...
1010 * an error return indicates the underlying driver doesn't
1011 * support all the queries necessary... however, reasonable
1012 * defaults will have been set, so no reason to bail or care
1013 */
1014 vfs_init_io_attributes(rootvp, mp);
1015
1016 /*
1017 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1018 */
1019 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1020 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1021 }
1022 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1023 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1024 }
1025
1026 /*
1027 * Probe root file system for additional features.
1028 */
1029 (void)VFS_START(mp, 0, ctx);
1030
1031 VFSATTR_INIT(&vfsattr);
1032 VFSATTR_WANTED(&vfsattr, f_capabilities);
1033 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1034 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1035 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1036 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1037 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1038 }
1039 #if NAMEDSTREAMS
1040 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1041 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1042 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1043 }
1044 #endif
1045 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1046 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1047 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1048 }
1049 }
1050
1051 /*
1052 * get rid of iocount reference returned
1053 * by bdevvp (or picked up by us on the substitued
1054 * rootvp)... it (or we) will have also taken
1055 * a usecount reference which we want to keep
1056 */
1057 vnode_put(rootvp);
1058
1059 #if CONFIG_MACF
1060 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1061 return (0);
1062
1063 error = VFS_ROOT(mp, &vp, ctx);
1064 if (error) {
1065 printf("%s() VFS_ROOT() returned %d\n",
1066 __func__, error);
1067 dounmount(mp, MNT_FORCE, 0, ctx);
1068 goto fail;
1069 }
1070 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1071 /*
1072 * get rid of reference provided by VFS_ROOT
1073 */
1074 vnode_put(vp);
1075
1076 if (error) {
1077 printf("%s() vnode_label() returned %d\n",
1078 __func__, error);
1079 dounmount(mp, MNT_FORCE, 0, ctx);
1080 goto fail;
1081 }
1082 #endif
1083 return (0);
1084 }
1085 #if CONFIG_MACF
1086 fail:
1087 #endif
1088 vfs_rootmountfailed(mp);
1089
1090 if (error != EINVAL)
1091 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1092 }
1093 return (ENODEV);
1094 }
1095
1096 /*
1097 * Lookup a mount point by filesystem identifier.
1098 */
1099
1100 struct mount *
1101 vfs_getvfs(fsid_t *fsid)
1102 {
1103 return (mount_list_lookupby_fsid(fsid, 0, 0));
1104 }
1105
1106 static struct mount *
1107 vfs_getvfs_locked(fsid_t *fsid)
1108 {
1109 return(mount_list_lookupby_fsid(fsid, 1, 0));
1110 }
1111
1112 struct mount *
1113 vfs_getvfs_by_mntonname(char *path)
1114 {
1115 mount_t retmp = (mount_t)0;
1116 mount_t mp;
1117
1118 mount_list_lock();
1119 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1120 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1121 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1122 retmp = mp;
1123 goto out;
1124 }
1125 }
1126 out:
1127 mount_list_unlock();
1128 return (retmp);
1129 }
1130
1131 /* generation number for creation of new fsids */
1132 u_short mntid_gen = 0;
1133 /*
1134 * Get a new unique fsid
1135 */
1136 void
1137 vfs_getnewfsid(struct mount *mp)
1138 {
1139
1140 fsid_t tfsid;
1141 int mtype;
1142 mount_t nmp;
1143
1144 mount_list_lock();
1145
1146 /* generate a new fsid */
1147 mtype = mp->mnt_vtable->vfc_typenum;
1148 if (++mntid_gen == 0)
1149 mntid_gen++;
1150 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1151 tfsid.val[1] = mtype;
1152
1153 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1154 while (vfs_getvfs_locked(&tfsid)) {
1155 if (++mntid_gen == 0)
1156 mntid_gen++;
1157 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1158 }
1159 }
1160 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1161 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1162 mount_list_unlock();
1163 }
1164
1165 /*
1166 * Routines having to do with the management of the vnode table.
1167 */
1168 extern int (**dead_vnodeop_p)(void *);
1169 long numvnodes, freevnodes, deadvnodes;
1170
1171
1172 /*
1173 * Move a vnode from one mount queue to another.
1174 */
1175 static void
1176 insmntque(vnode_t vp, mount_t mp)
1177 {
1178 mount_t lmp;
1179 /*
1180 * Delete from old mount point vnode list, if on one.
1181 */
1182 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1183 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1184 panic("insmntque: vp not in mount vnode list");
1185 vp->v_lflag &= ~VNAMED_MOUNT;
1186
1187 mount_lock_spin(lmp);
1188
1189 mount_drop(lmp, 1);
1190
1191 if (vp->v_mntvnodes.tqe_next == NULL) {
1192 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1193 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1194 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1195 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1196 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1197 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1198 } else {
1199 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1200 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1201 }
1202 vp->v_mntvnodes.tqe_next = NULL;
1203 vp->v_mntvnodes.tqe_prev = NULL;
1204 mount_unlock(lmp);
1205 return;
1206 }
1207
1208 /*
1209 * Insert into list of vnodes for the new mount point, if available.
1210 */
1211 if ((vp->v_mount = mp) != NULL) {
1212 mount_lock_spin(mp);
1213 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1214 panic("vp already in mount list");
1215 if (mp->mnt_lflag & MNT_LITER)
1216 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1217 else
1218 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1219 if (vp->v_lflag & VNAMED_MOUNT)
1220 panic("insmntque: vp already in mount vnode list");
1221 vp->v_lflag |= VNAMED_MOUNT;
1222 mount_ref(mp, 1);
1223 mount_unlock(mp);
1224 }
1225 }
1226
1227
1228 /*
1229 * Create a vnode for a block device.
1230 * Used for root filesystem, argdev, and swap areas.
1231 * Also used for memory file system special devices.
1232 */
1233 int
1234 bdevvp(dev_t dev, vnode_t *vpp)
1235 {
1236 vnode_t nvp;
1237 int error;
1238 struct vnode_fsparam vfsp;
1239 struct vfs_context context;
1240
1241 if (dev == NODEV) {
1242 *vpp = NULLVP;
1243 return (ENODEV);
1244 }
1245
1246 context.vc_thread = current_thread();
1247 context.vc_ucred = FSCRED;
1248
1249 vfsp.vnfs_mp = (struct mount *)0;
1250 vfsp.vnfs_vtype = VBLK;
1251 vfsp.vnfs_str = "bdevvp";
1252 vfsp.vnfs_dvp = NULL;
1253 vfsp.vnfs_fsnode = NULL;
1254 vfsp.vnfs_cnp = NULL;
1255 vfsp.vnfs_vops = spec_vnodeop_p;
1256 vfsp.vnfs_rdev = dev;
1257 vfsp.vnfs_filesize = 0;
1258
1259 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1260
1261 vfsp.vnfs_marksystem = 0;
1262 vfsp.vnfs_markroot = 0;
1263
1264 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1265 *vpp = NULLVP;
1266 return (error);
1267 }
1268 vnode_lock_spin(nvp);
1269 nvp->v_flag |= VBDEVVP;
1270 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1271 vnode_unlock(nvp);
1272 if ( (error = vnode_ref(nvp)) ) {
1273 panic("bdevvp failed: vnode_ref");
1274 return (error);
1275 }
1276 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1277 panic("bdevvp failed: fsync");
1278 return (error);
1279 }
1280 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1281 panic("bdevvp failed: invalidateblks");
1282 return (error);
1283 }
1284
1285 #if CONFIG_MACF
1286 /*
1287 * XXXMAC: We can't put a MAC check here, the system will
1288 * panic without this vnode.
1289 */
1290 #endif /* MAC */
1291
1292 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1293 panic("bdevvp failed: open");
1294 return (error);
1295 }
1296 *vpp = nvp;
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Check to see if the new vnode represents a special device
1303 * for which we already have a vnode (either because of
1304 * bdevvp() or because of a different vnode representing
1305 * the same block device). If such an alias exists, deallocate
1306 * the existing contents and return the aliased vnode. The
1307 * caller is responsible for filling it with its new contents.
1308 */
1309 static vnode_t
1310 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1311 {
1312 struct vnode *vp;
1313 struct vnode **vpp;
1314 struct specinfo *sin = NULL;
1315 int vid = 0;
1316
1317 vpp = &speclisth[SPECHASH(nvp_rdev)];
1318 loop:
1319 SPECHASH_LOCK();
1320
1321 for (vp = *vpp; vp; vp = vp->v_specnext) {
1322 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1323 vid = vp->v_id;
1324 break;
1325 }
1326 }
1327 SPECHASH_UNLOCK();
1328
1329 if (vp) {
1330 found_alias:
1331 if (vnode_getwithvid(vp,vid)) {
1332 goto loop;
1333 }
1334 /*
1335 * Termination state is checked in vnode_getwithvid
1336 */
1337 vnode_lock(vp);
1338
1339 /*
1340 * Alias, but not in use, so flush it out.
1341 */
1342 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1343 vnode_reclaim_internal(vp, 1, 1, 0);
1344 vnode_put_locked(vp);
1345 vnode_unlock(vp);
1346 goto loop;
1347 }
1348
1349 }
1350 if (vp == NULL || vp->v_tag != VT_NON) {
1351 if (sin == NULL) {
1352 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1353 M_SPECINFO, M_WAITOK);
1354 }
1355
1356 nvp->v_specinfo = sin;
1357 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1358 nvp->v_rdev = nvp_rdev;
1359 nvp->v_specflags = 0;
1360 nvp->v_speclastr = -1;
1361
1362 SPECHASH_LOCK();
1363
1364 /* We dropped the lock, someone could have added */
1365 if (vp == NULLVP) {
1366 for (vp = *vpp; vp; vp = vp->v_specnext) {
1367 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1368 vid = vp->v_id;
1369 SPECHASH_UNLOCK();
1370 goto found_alias;
1371 }
1372 }
1373 }
1374
1375 nvp->v_hashchain = vpp;
1376 nvp->v_specnext = *vpp;
1377 *vpp = nvp;
1378
1379 if (vp != NULLVP) {
1380 nvp->v_specflags |= SI_ALIASED;
1381 vp->v_specflags |= SI_ALIASED;
1382 SPECHASH_UNLOCK();
1383 vnode_put_locked(vp);
1384 vnode_unlock(vp);
1385 } else {
1386 SPECHASH_UNLOCK();
1387 }
1388
1389 return (NULLVP);
1390 }
1391
1392 if (sin) {
1393 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1394 }
1395
1396 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1397 return(vp);
1398
1399 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1400
1401 return (vp);
1402 }
1403
1404
1405 /*
1406 * Get a reference on a particular vnode and lock it if requested.
1407 * If the vnode was on the inactive list, remove it from the list.
1408 * If the vnode was on the free list, remove it from the list and
1409 * move it to inactive list as needed.
1410 * The vnode lock bit is set if the vnode is being eliminated in
1411 * vgone. The process is awakened when the transition is completed,
1412 * and an error returned to indicate that the vnode is no longer
1413 * usable (possibly having been changed to a new file system type).
1414 */
1415 int
1416 vget_internal(vnode_t vp, int vid, int vflags)
1417 {
1418 int error = 0;
1419 int vpid;
1420
1421 vnode_lock_spin(vp);
1422
1423 if (vflags & VNODE_WITHID)
1424 vpid = vid;
1425 else
1426 vpid = vp->v_id; // save off the original v_id
1427
1428 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1429 /*
1430 * vnode to be returned only if it has writers opened
1431 */
1432 error = EINVAL;
1433 else
1434 error = vnode_getiocount(vp, vpid, vflags);
1435
1436 vnode_unlock(vp);
1437
1438 return (error);
1439 }
1440
1441 /*
1442 * Returns: 0 Success
1443 * ENOENT No such file or directory [terminating]
1444 */
1445 int
1446 vnode_ref(vnode_t vp)
1447 {
1448
1449 return (vnode_ref_ext(vp, 0));
1450 }
1451
1452 /*
1453 * Returns: 0 Success
1454 * ENOENT No such file or directory [terminating]
1455 */
1456 int
1457 vnode_ref_ext(vnode_t vp, int fmode)
1458 {
1459 int error = 0;
1460
1461 vnode_lock_spin(vp);
1462
1463 /*
1464 * once all the current call sites have been fixed to insure they have
1465 * taken an iocount, we can toughen this assert up and insist that the
1466 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1467 */
1468 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1469 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1470
1471 /*
1472 * if you are the owner of drain/termination, can acquire usecount
1473 */
1474 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1475 if (vp->v_owner != current_thread()) {
1476 error = ENOENT;
1477 goto out;
1478 }
1479 }
1480 vp->v_usecount++;
1481
1482 if (fmode & FWRITE) {
1483 if (++vp->v_writecount <= 0)
1484 panic("vnode_ref_ext: v_writecount");
1485 }
1486 if (fmode & O_EVTONLY) {
1487 if (++vp->v_kusecount <= 0)
1488 panic("vnode_ref_ext: v_kusecount");
1489 }
1490 if (vp->v_flag & VRAGE) {
1491 struct uthread *ut;
1492
1493 ut = get_bsdthread_info(current_thread());
1494
1495 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1496 !(ut->uu_flag & UT_RAGE_VNODES)) {
1497 /*
1498 * a 'normal' process accessed this vnode
1499 * so make sure its no longer marked
1500 * for rapid aging... also, make sure
1501 * it gets removed from the rage list...
1502 * when v_usecount drops back to 0, it
1503 * will be put back on the real free list
1504 */
1505 vp->v_flag &= ~VRAGE;
1506 vp->v_references = 0;
1507 vnode_list_remove(vp);
1508 }
1509 }
1510 out:
1511 vnode_unlock(vp);
1512
1513 return (error);
1514 }
1515
1516
1517 /*
1518 * put the vnode on appropriate free list.
1519 * called with vnode LOCKED
1520 */
1521 static void
1522 vnode_list_add(vnode_t vp)
1523 {
1524 #if DIAGNOSTIC
1525 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1526 #endif
1527 /*
1528 * if it is already on a list or non zero references return
1529 */
1530 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1531 return;
1532
1533 vnode_list_lock();
1534
1535 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1536 /*
1537 * add the new guy to the appropriate end of the RAGE list
1538 */
1539 if ((vp->v_flag & VAGE))
1540 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1541 else
1542 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1543
1544 vp->v_listflag |= VLIST_RAGE;
1545 ragevnodes++;
1546
1547 /*
1548 * reset the timestamp for the last inserted vp on the RAGE
1549 * queue to let new_vnode know that its not ok to start stealing
1550 * from this list... as long as we're actively adding to this list
1551 * we'll push out the vnodes we want to donate to the real free list
1552 * once we stop pushing, we'll let some time elapse before we start
1553 * stealing them in the new_vnode routine
1554 */
1555 microuptime(&rage_tv);
1556 } else {
1557 /*
1558 * if VL_DEAD, insert it at head of the dead list
1559 * else insert at tail of LRU list or at head if VAGE is set
1560 */
1561 if ( (vp->v_lflag & VL_DEAD)) {
1562 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1563 vp->v_listflag |= VLIST_DEAD;
1564 deadvnodes++;
1565 } else if ((vp->v_flag & VAGE)) {
1566 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1567 vp->v_flag &= ~VAGE;
1568 freevnodes++;
1569 } else {
1570 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1571 freevnodes++;
1572 }
1573 }
1574 vnode_list_unlock();
1575 }
1576
1577
1578 /*
1579 * remove the vnode from appropriate free list.
1580 * called with vnode LOCKED and
1581 * the list lock held
1582 */
1583 static void
1584 vnode_list_remove_locked(vnode_t vp)
1585 {
1586 if (VONLIST(vp)) {
1587 /*
1588 * the v_listflag field is
1589 * protected by the vnode_list_lock
1590 */
1591 if (vp->v_listflag & VLIST_RAGE)
1592 VREMRAGE("vnode_list_remove", vp);
1593 else if (vp->v_listflag & VLIST_DEAD)
1594 VREMDEAD("vnode_list_remove", vp);
1595 else
1596 VREMFREE("vnode_list_remove", vp);
1597 }
1598 }
1599
1600
1601 /*
1602 * remove the vnode from appropriate free list.
1603 * called with vnode LOCKED
1604 */
1605 static void
1606 vnode_list_remove(vnode_t vp)
1607 {
1608 #if DIAGNOSTIC
1609 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1610 #endif
1611 /*
1612 * we want to avoid taking the list lock
1613 * in the case where we're not on the free
1614 * list... this will be true for most
1615 * directories and any currently in use files
1616 *
1617 * we're guaranteed that we can't go from
1618 * the not-on-list state to the on-list
1619 * state since we hold the vnode lock...
1620 * all calls to vnode_list_add are done
1621 * under the vnode lock... so we can
1622 * check for that condition (the prevelant one)
1623 * without taking the list lock
1624 */
1625 if (VONLIST(vp)) {
1626 vnode_list_lock();
1627 /*
1628 * however, we're not guaranteed that
1629 * we won't go from the on-list state
1630 * to the not-on-list state until we
1631 * hold the vnode_list_lock... this
1632 * is due to "new_vnode" removing vnodes
1633 * from the free list uder the list_lock
1634 * w/o the vnode lock... so we need to
1635 * check again whether we're currently
1636 * on the free list
1637 */
1638 vnode_list_remove_locked(vp);
1639
1640 vnode_list_unlock();
1641 }
1642 }
1643
1644
1645 void
1646 vnode_rele(vnode_t vp)
1647 {
1648 vnode_rele_internal(vp, 0, 0, 0);
1649 }
1650
1651
1652 void
1653 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1654 {
1655 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1656 }
1657
1658
1659 void
1660 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1661 {
1662 if ( !locked)
1663 vnode_lock_spin(vp);
1664 #if DIAGNOSTIC
1665 else
1666 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1667 #endif
1668 if (--vp->v_usecount < 0)
1669 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1670
1671 if (fmode & FWRITE) {
1672 if (--vp->v_writecount < 0)
1673 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1674 }
1675 if (fmode & O_EVTONLY) {
1676 if (--vp->v_kusecount < 0)
1677 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1678 }
1679 if (vp->v_kusecount > vp->v_usecount)
1680 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1681
1682 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1683 /*
1684 * vnode is still busy... if we're the last
1685 * usecount, mark for a future call to VNOP_INACTIVE
1686 * when the iocount finally drops to 0
1687 */
1688 if (vp->v_usecount == 0) {
1689 vp->v_lflag |= VL_NEEDINACTIVE;
1690 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1691 }
1692 if ( !locked)
1693 vnode_unlock(vp);
1694 return;
1695 }
1696 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1697
1698 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1699 /*
1700 * vnode is being cleaned, or
1701 * we've requested that we don't reenter
1702 * the filesystem on this release... in
1703 * this case, we'll mark the vnode aged
1704 * if it's been marked for termination
1705 */
1706 if (dont_reenter) {
1707 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1708 vp->v_lflag |= VL_NEEDINACTIVE;
1709 vp->v_flag |= VAGE;
1710 }
1711 vnode_list_add(vp);
1712 if ( !locked)
1713 vnode_unlock(vp);
1714 return;
1715 }
1716 /*
1717 * at this point both the iocount and usecount
1718 * are zero
1719 * pick up an iocount so that we can call
1720 * VNOP_INACTIVE with the vnode lock unheld
1721 */
1722 vp->v_iocount++;
1723 #ifdef JOE_DEBUG
1724 record_vp(vp, 1);
1725 #endif
1726 vp->v_lflag &= ~VL_NEEDINACTIVE;
1727 vnode_unlock(vp);
1728
1729 VNOP_INACTIVE(vp, vfs_context_current());
1730
1731 vnode_lock_spin(vp);
1732 /*
1733 * because we dropped the vnode lock to call VNOP_INACTIVE
1734 * the state of the vnode may have changed... we may have
1735 * picked up an iocount, usecount or the MARKTERM may have
1736 * been set... we need to reevaluate the reference counts
1737 * to determine if we can call vnode_reclaim_internal at
1738 * this point... if the reference counts are up, we'll pick
1739 * up the MARKTERM state when they get subsequently dropped
1740 */
1741 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1742 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1743 struct uthread *ut;
1744
1745 ut = get_bsdthread_info(current_thread());
1746
1747 if (ut->uu_defer_reclaims) {
1748 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1749 ut->uu_vreclaims = vp;
1750 goto defer_reclaim;
1751 }
1752 vnode_lock_convert(vp);
1753 vnode_reclaim_internal(vp, 1, 1, 0);
1754 }
1755 vnode_dropiocount(vp);
1756 vnode_list_add(vp);
1757 defer_reclaim:
1758 if ( !locked)
1759 vnode_unlock(vp);
1760 return;
1761 }
1762
1763 /*
1764 * Remove any vnodes in the vnode table belonging to mount point mp.
1765 *
1766 * If MNT_NOFORCE is specified, there should not be any active ones,
1767 * return error if any are found (nb: this is a user error, not a
1768 * system error). If MNT_FORCE is specified, detach any active vnodes
1769 * that are found.
1770 */
1771 #if DIAGNOSTIC
1772 int busyprt = 0; /* print out busy vnodes */
1773 #if 0
1774 struct ctldebug debug1 = { "busyprt", &busyprt };
1775 #endif /* 0 */
1776 #endif
1777
1778 int
1779 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1780 {
1781 struct vnode *vp;
1782 int busy = 0;
1783 int reclaimed = 0;
1784 int retval;
1785 unsigned int vid;
1786
1787 mount_lock(mp);
1788 vnode_iterate_setup(mp);
1789 /*
1790 * On regular unmounts(not forced) do a
1791 * quick check for vnodes to be in use. This
1792 * preserves the caching of vnodes. automounter
1793 * tries unmounting every so often to see whether
1794 * it is still busy or not.
1795 */
1796 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1797 if (vnode_umount_preflight(mp, skipvp, flags)) {
1798 vnode_iterate_clear(mp);
1799 mount_unlock(mp);
1800 return(EBUSY);
1801 }
1802 }
1803 loop:
1804 /* it is returns 0 then there is nothing to do */
1805 retval = vnode_iterate_prepare(mp);
1806
1807 if (retval == 0) {
1808 vnode_iterate_clear(mp);
1809 mount_unlock(mp);
1810 return(retval);
1811 }
1812
1813 /* iterate over all the vnodes */
1814 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1815
1816 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1817 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1818 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1819
1820 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1821 continue;
1822 }
1823 vid = vp->v_id;
1824 mount_unlock(mp);
1825
1826 vnode_lock_spin(vp);
1827
1828 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1829 vnode_unlock(vp);
1830 mount_lock(mp);
1831 continue;
1832 }
1833
1834 /*
1835 * If requested, skip over vnodes marked VSYSTEM.
1836 * Skip over all vnodes marked VNOFLUSH.
1837 */
1838 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1839 (vp->v_flag & VNOFLUSH))) {
1840 vnode_unlock(vp);
1841 mount_lock(mp);
1842 continue;
1843 }
1844 /*
1845 * If requested, skip over vnodes marked VSWAP.
1846 */
1847 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1848 vnode_unlock(vp);
1849 mount_lock(mp);
1850 continue;
1851 }
1852 /*
1853 * If requested, skip over vnodes marked VROOT.
1854 */
1855 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1856 vnode_unlock(vp);
1857 mount_lock(mp);
1858 continue;
1859 }
1860 /*
1861 * If WRITECLOSE is set, only flush out regular file
1862 * vnodes open for writing.
1863 */
1864 if ((flags & WRITECLOSE) &&
1865 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1866 vnode_unlock(vp);
1867 mount_lock(mp);
1868 continue;
1869 }
1870 /*
1871 * If the real usecount is 0, all we need to do is clear
1872 * out the vnode data structures and we are done.
1873 */
1874 if (((vp->v_usecount == 0) ||
1875 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1876
1877 vnode_lock_convert(vp);
1878 vp->v_iocount++; /* so that drain waits for * other iocounts */
1879 #ifdef JOE_DEBUG
1880 record_vp(vp, 1);
1881 #endif
1882 vnode_reclaim_internal(vp, 1, 1, 0);
1883 vnode_dropiocount(vp);
1884 vnode_list_add(vp);
1885 vnode_unlock(vp);
1886
1887 reclaimed++;
1888 mount_lock(mp);
1889 continue;
1890 }
1891 /*
1892 * If FORCECLOSE is set, forcibly close the vnode.
1893 * For block or character devices, revert to an
1894 * anonymous device. For all other files, just kill them.
1895 */
1896 if (flags & FORCECLOSE) {
1897 vnode_lock_convert(vp);
1898
1899 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1900 vp->v_iocount++; /* so that drain waits * for other iocounts */
1901 #ifdef JOE_DEBUG
1902 record_vp(vp, 1);
1903 #endif
1904 vnode_reclaim_internal(vp, 1, 1, 0);
1905 vnode_dropiocount(vp);
1906 vnode_list_add(vp);
1907 vnode_unlock(vp);
1908 } else {
1909 vclean(vp, 0);
1910 vp->v_lflag &= ~VL_DEAD;
1911 vp->v_op = spec_vnodeop_p;
1912 vp->v_flag |= VDEVFLUSH;
1913 vnode_unlock(vp);
1914 }
1915 mount_lock(mp);
1916 continue;
1917 }
1918 #if DIAGNOSTIC
1919 if (busyprt)
1920 vprint("vflush: busy vnode", vp);
1921 #endif
1922 vnode_unlock(vp);
1923 mount_lock(mp);
1924 busy++;
1925 }
1926
1927 /* At this point the worker queue is completed */
1928 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1929 busy = 0;
1930 reclaimed = 0;
1931 (void)vnode_iterate_reloadq(mp);
1932 /* returned with mount lock held */
1933 goto loop;
1934 }
1935
1936 /* if new vnodes were created in between retry the reclaim */
1937 if ( vnode_iterate_reloadq(mp) != 0) {
1938 if (!(busy && ((flags & FORCECLOSE)==0)))
1939 goto loop;
1940 }
1941 vnode_iterate_clear(mp);
1942 mount_unlock(mp);
1943
1944 if (busy && ((flags & FORCECLOSE)==0))
1945 return (EBUSY);
1946 return (0);
1947 }
1948
1949 long num_recycledvnodes = 0;
1950 /*
1951 * Disassociate the underlying file system from a vnode.
1952 * The vnode lock is held on entry.
1953 */
1954 static void
1955 vclean(vnode_t vp, int flags)
1956 {
1957 vfs_context_t ctx = vfs_context_current();
1958 int active;
1959 int need_inactive;
1960 int already_terminating;
1961 int clflags = 0;
1962 #if NAMEDSTREAMS
1963 int is_namedstream;
1964 #endif
1965
1966 /*
1967 * Check to see if the vnode is in use.
1968 * If so we have to reference it before we clean it out
1969 * so that its count cannot fall to zero and generate a
1970 * race against ourselves to recycle it.
1971 */
1972 active = vp->v_usecount;
1973
1974 /*
1975 * just in case we missed sending a needed
1976 * VNOP_INACTIVE, we'll do it now
1977 */
1978 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
1979
1980 vp->v_lflag &= ~VL_NEEDINACTIVE;
1981
1982 /*
1983 * Prevent the vnode from being recycled or
1984 * brought into use while we clean it out.
1985 */
1986 already_terminating = (vp->v_lflag & VL_TERMINATE);
1987
1988 vp->v_lflag |= VL_TERMINATE;
1989
1990 /*
1991 * remove the vnode from any mount list
1992 * it might be on...
1993 */
1994 insmntque(vp, (struct mount *)0);
1995
1996 #if NAMEDSTREAMS
1997 is_namedstream = vnode_isnamedstream(vp);
1998 #endif
1999
2000 vnode_unlock(vp);
2001
2002 OSAddAtomicLong(1, &num_recycledvnodes);
2003
2004 if (flags & DOCLOSE)
2005 clflags |= IO_NDELAY;
2006 if (flags & REVOKEALL)
2007 clflags |= IO_REVOKE;
2008
2009 if (active && (flags & DOCLOSE))
2010 VNOP_CLOSE(vp, clflags, ctx);
2011
2012 /*
2013 * Clean out any buffers associated with the vnode.
2014 */
2015 if (flags & DOCLOSE) {
2016 #if NFSCLIENT
2017 if (vp->v_tag == VT_NFS)
2018 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2019 else
2020 #endif
2021 {
2022 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2023 buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2024 }
2025 if (UBCINFOEXISTS(vp))
2026 /*
2027 * Clean the pages in VM.
2028 */
2029 (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL);
2030 }
2031 if (active || need_inactive)
2032 VNOP_INACTIVE(vp, ctx);
2033
2034 #if NAMEDSTREAMS
2035 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2036 vnode_t pvp = vp->v_parent;
2037
2038 /* Delete the shadow stream file before we reclaim its vnode */
2039 if (vnode_isshadow(vp)) {
2040 vnode_relenamedstream(pvp, vp, ctx);
2041 }
2042
2043 /*
2044 * No more streams associated with the parent. We
2045 * have a ref on it, so its identity is stable.
2046 * If the parent is on an opaque volume, then we need to know
2047 * whether it has associated named streams.
2048 */
2049 if (vfs_authopaque(pvp->v_mount)) {
2050 vnode_lock_spin(pvp);
2051 pvp->v_lflag &= ~VL_HASSTREAMS;
2052 vnode_unlock(pvp);
2053 }
2054 }
2055 #endif
2056
2057 /*
2058 * Destroy ubc named reference
2059 * cluster_release is done on this path
2060 * along with dropping the reference on the ucred
2061 */
2062 ubc_destroy_named(vp);
2063
2064 /*
2065 * Reclaim the vnode.
2066 */
2067 if (VNOP_RECLAIM(vp, ctx))
2068 panic("vclean: cannot reclaim");
2069
2070 // make sure the name & parent ptrs get cleaned out!
2071 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2072
2073 vnode_lock(vp);
2074
2075 vp->v_mount = dead_mountp;
2076 vp->v_op = dead_vnodeop_p;
2077 vp->v_tag = VT_NON;
2078 vp->v_data = NULL;
2079
2080 vp->v_lflag |= VL_DEAD;
2081
2082 if (already_terminating == 0) {
2083 vp->v_lflag &= ~VL_TERMINATE;
2084 /*
2085 * Done with purge, notify sleepers of the grim news.
2086 */
2087 if (vp->v_lflag & VL_TERMWANT) {
2088 vp->v_lflag &= ~VL_TERMWANT;
2089 wakeup(&vp->v_lflag);
2090 }
2091 }
2092 }
2093
2094 /*
2095 * Eliminate all activity associated with the requested vnode
2096 * and with all vnodes aliased to the requested vnode.
2097 */
2098 int
2099 #if DIAGNOSTIC
2100 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2101 #else
2102 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2103 #endif
2104 {
2105 struct vnode *vq;
2106 int vid;
2107
2108 #if DIAGNOSTIC
2109 if ((flags & REVOKEALL) == 0)
2110 panic("vnop_revoke");
2111 #endif
2112
2113 if (vnode_isaliased(vp)) {
2114 /*
2115 * If a vgone (or vclean) is already in progress,
2116 * return an immediate error
2117 */
2118 if (vp->v_lflag & VL_TERMINATE)
2119 return(ENOENT);
2120
2121 /*
2122 * Ensure that vp will not be vgone'd while we
2123 * are eliminating its aliases.
2124 */
2125 SPECHASH_LOCK();
2126 while ((vp->v_specflags & SI_ALIASED)) {
2127 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2128 if (vq->v_rdev != vp->v_rdev ||
2129 vq->v_type != vp->v_type || vp == vq)
2130 continue;
2131 vid = vq->v_id;
2132 SPECHASH_UNLOCK();
2133 if (vnode_getwithvid(vq,vid)){
2134 SPECHASH_LOCK();
2135 break;
2136 }
2137 vnode_reclaim_internal(vq, 0, 1, 0);
2138 vnode_put(vq);
2139 SPECHASH_LOCK();
2140 break;
2141 }
2142 }
2143 SPECHASH_UNLOCK();
2144 }
2145 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2146
2147 return (0);
2148 }
2149
2150 /*
2151 * Recycle an unused vnode to the front of the free list.
2152 * Release the passed interlock if the vnode will be recycled.
2153 */
2154 int
2155 vnode_recycle(struct vnode *vp)
2156 {
2157 vnode_lock_spin(vp);
2158
2159 if (vp->v_iocount || vp->v_usecount) {
2160 vp->v_lflag |= VL_MARKTERM;
2161 vnode_unlock(vp);
2162 return(0);
2163 }
2164 vnode_lock_convert(vp);
2165 vnode_reclaim_internal(vp, 1, 0, 0);
2166
2167 vnode_unlock(vp);
2168
2169 return (1);
2170 }
2171
2172 static int
2173 vnode_reload(vnode_t vp)
2174 {
2175 vnode_lock_spin(vp);
2176
2177 if ((vp->v_iocount > 1) || vp->v_usecount) {
2178 vnode_unlock(vp);
2179 return(0);
2180 }
2181 if (vp->v_iocount <= 0)
2182 panic("vnode_reload with no iocount %d", vp->v_iocount);
2183
2184 /* mark for release when iocount is dopped */
2185 vp->v_lflag |= VL_MARKTERM;
2186 vnode_unlock(vp);
2187
2188 return (1);
2189 }
2190
2191
2192 static void
2193 vgone(vnode_t vp, int flags)
2194 {
2195 struct vnode *vq;
2196 struct vnode *vx;
2197
2198 /*
2199 * Clean out the filesystem specific data.
2200 * vclean also takes care of removing the
2201 * vnode from any mount list it might be on
2202 */
2203 vclean(vp, flags | DOCLOSE);
2204
2205 /*
2206 * If special device, remove it from special device alias list
2207 * if it is on one.
2208 */
2209 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2210 SPECHASH_LOCK();
2211 if (*vp->v_hashchain == vp) {
2212 *vp->v_hashchain = vp->v_specnext;
2213 } else {
2214 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2215 if (vq->v_specnext != vp)
2216 continue;
2217 vq->v_specnext = vp->v_specnext;
2218 break;
2219 }
2220 if (vq == NULL)
2221 panic("missing bdev");
2222 }
2223 if (vp->v_specflags & SI_ALIASED) {
2224 vx = NULL;
2225 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2226 if (vq->v_rdev != vp->v_rdev ||
2227 vq->v_type != vp->v_type)
2228 continue;
2229 if (vx)
2230 break;
2231 vx = vq;
2232 }
2233 if (vx == NULL)
2234 panic("missing alias");
2235 if (vq == NULL)
2236 vx->v_specflags &= ~SI_ALIASED;
2237 vp->v_specflags &= ~SI_ALIASED;
2238 }
2239 SPECHASH_UNLOCK();
2240 {
2241 struct specinfo *tmp = vp->v_specinfo;
2242 vp->v_specinfo = NULL;
2243 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2244 }
2245 }
2246 }
2247
2248 /*
2249 * Lookup a vnode by device number.
2250 */
2251 int
2252 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2253 {
2254 vnode_t vp;
2255 int rc = 0;
2256 int vid;
2257
2258 loop:
2259 SPECHASH_LOCK();
2260 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2261 if (dev != vp->v_rdev || type != vp->v_type)
2262 continue;
2263 vid = vp->v_id;
2264 SPECHASH_UNLOCK();
2265 if (vnode_getwithvid(vp,vid))
2266 goto loop;
2267 vnode_lock_spin(vp);
2268 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2269 vnode_unlock(vp);
2270 if ((*errorp = vfs_mountedon(vp)) != 0)
2271 rc = 1;
2272 } else
2273 vnode_unlock(vp);
2274 vnode_put(vp);
2275 return(rc);
2276 }
2277 SPECHASH_UNLOCK();
2278 return (0);
2279 }
2280
2281 /*
2282 * Calculate the total number of references to a special device.
2283 */
2284 int
2285 vcount(vnode_t vp)
2286 {
2287 vnode_t vq, vnext;
2288 int count;
2289 int vid;
2290
2291 loop:
2292 if (!vnode_isaliased(vp))
2293 return (vp->v_usecount - vp->v_kusecount);
2294 count = 0;
2295
2296 SPECHASH_LOCK();
2297 /*
2298 * Grab first vnode and its vid.
2299 */
2300 vq = *vp->v_hashchain;
2301 vid = vq ? vq->v_id : 0;
2302
2303 SPECHASH_UNLOCK();
2304
2305 while (vq) {
2306 /*
2307 * Attempt to get the vnode outside the SPECHASH lock.
2308 */
2309 if (vnode_getwithvid(vq, vid)) {
2310 goto loop;
2311 }
2312 vnode_lock(vq);
2313
2314 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2315 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2316 /*
2317 * Alias, but not in use, so flush it out.
2318 */
2319 vnode_reclaim_internal(vq, 1, 1, 0);
2320 vnode_put_locked(vq);
2321 vnode_unlock(vq);
2322 goto loop;
2323 }
2324 count += (vq->v_usecount - vq->v_kusecount);
2325 }
2326 vnode_unlock(vq);
2327
2328 SPECHASH_LOCK();
2329 /*
2330 * must do this with the reference still held on 'vq'
2331 * so that it can't be destroyed while we're poking
2332 * through v_specnext
2333 */
2334 vnext = vq->v_specnext;
2335 vid = vnext ? vnext->v_id : 0;
2336
2337 SPECHASH_UNLOCK();
2338
2339 vnode_put(vq);
2340
2341 vq = vnext;
2342 }
2343
2344 return (count);
2345 }
2346
2347 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2348
2349 /*
2350 * Print out a description of a vnode.
2351 */
2352 static const char *typename[] =
2353 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2354
2355 void
2356 vprint(const char *label, struct vnode *vp)
2357 {
2358 char sbuf[64];
2359
2360 if (label != NULL)
2361 printf("%s: ", label);
2362 printf("type %s, usecount %d, writecount %d",
2363 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2364 sbuf[0] = '\0';
2365 if (vp->v_flag & VROOT)
2366 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2367 if (vp->v_flag & VTEXT)
2368 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2369 if (vp->v_flag & VSYSTEM)
2370 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2371 if (vp->v_flag & VNOFLUSH)
2372 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2373 if (vp->v_flag & VBWAIT)
2374 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2375 if (vnode_isaliased(vp))
2376 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2377 if (sbuf[0] != '\0')
2378 printf(" flags (%s)", &sbuf[1]);
2379 }
2380
2381
2382 int
2383 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2384 {
2385 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2386 }
2387
2388 int
2389 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2390 {
2391 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2392 }
2393
2394 int
2395 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2396 {
2397 return ubc_cs_getcdhash(vp, offset, cdhash);
2398 }
2399
2400
2401 static char *extension_table=NULL;
2402 static int nexts;
2403 static int max_ext_width;
2404
2405 static int
2406 extension_cmp(const void *a, const void *b)
2407 {
2408 return (strlen((const char *)a) - strlen((const char *)b));
2409 }
2410
2411
2412 //
2413 // This is the api LaunchServices uses to inform the kernel
2414 // the list of package extensions to ignore.
2415 //
2416 // Internally we keep the list sorted by the length of the
2417 // the extension (from longest to shortest). We sort the
2418 // list of extensions so that we can speed up our searches
2419 // when comparing file names -- we only compare extensions
2420 // that could possibly fit into the file name, not all of
2421 // them (i.e. a short 8 character name can't have an 8
2422 // character extension).
2423 //
2424 extern lck_mtx_t *pkg_extensions_lck;
2425
2426 __private_extern__ int
2427 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2428 {
2429 char *new_exts, *old_exts;
2430 int error;
2431
2432 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2433 return EINVAL;
2434 }
2435
2436
2437 // allocate one byte extra so we can guarantee null termination
2438 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2439 if (new_exts == NULL) {
2440 return ENOMEM;
2441 }
2442
2443 error = copyin(data, new_exts, nentries * maxwidth);
2444 if (error) {
2445 FREE(new_exts, M_TEMP);
2446 return error;
2447 }
2448
2449 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2450
2451 qsort(new_exts, nentries, maxwidth, extension_cmp);
2452
2453 lck_mtx_lock(pkg_extensions_lck);
2454
2455 old_exts = extension_table;
2456 extension_table = new_exts;
2457 nexts = nentries;
2458 max_ext_width = maxwidth;
2459
2460 lck_mtx_unlock(pkg_extensions_lck);
2461
2462 if (old_exts) {
2463 FREE(old_exts, M_TEMP);
2464 }
2465
2466 return 0;
2467 }
2468
2469
2470 __private_extern__ int
2471 is_package_name(const char *name, int len)
2472 {
2473 int i, extlen;
2474 const char *ptr, *name_ext;
2475
2476 if (len <= 3) {
2477 return 0;
2478 }
2479
2480 name_ext = NULL;
2481 for(ptr=name; *ptr != '\0'; ptr++) {
2482 if (*ptr == '.') {
2483 name_ext = ptr;
2484 }
2485 }
2486
2487 // if there is no "." extension, it can't match
2488 if (name_ext == NULL) {
2489 return 0;
2490 }
2491
2492 // advance over the "."
2493 name_ext++;
2494
2495 lck_mtx_lock(pkg_extensions_lck);
2496
2497 // now iterate over all the extensions to see if any match
2498 ptr = &extension_table[0];
2499 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2500 extlen = strlen(ptr);
2501 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2502 // aha, a match!
2503 lck_mtx_unlock(pkg_extensions_lck);
2504 return 1;
2505 }
2506 }
2507
2508 lck_mtx_unlock(pkg_extensions_lck);
2509
2510 // if we get here, no extension matched
2511 return 0;
2512 }
2513
2514 int
2515 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2516 {
2517 char *ptr, *end;
2518 int comp=0;
2519
2520 *component = -1;
2521 if (*path != '/') {
2522 return EINVAL;
2523 }
2524
2525 end = path + 1;
2526 while(end < path + pathlen && *end != '\0') {
2527 while(end < path + pathlen && *end == '/' && *end != '\0') {
2528 end++;
2529 }
2530
2531 ptr = end;
2532
2533 while(end < path + pathlen && *end != '/' && *end != '\0') {
2534 end++;
2535 }
2536
2537 if (end > path + pathlen) {
2538 // hmm, string wasn't null terminated
2539 return EINVAL;
2540 }
2541
2542 *end = '\0';
2543 if (is_package_name(ptr, end - ptr)) {
2544 *component = comp;
2545 break;
2546 }
2547
2548 end++;
2549 comp++;
2550 }
2551
2552 return 0;
2553 }
2554
2555 /*
2556 * Determine if a name is inappropriate for a searchfs query.
2557 * This list consists of /System currently.
2558 */
2559
2560 int vn_searchfs_inappropriate_name(const char *name, int len) {
2561 const char *bad_names[] = { "System" };
2562 int bad_len[] = { 6 };
2563 int i;
2564
2565 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2566 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2567 return 1;
2568 }
2569 }
2570
2571 // if we get here, no name matched
2572 return 0;
2573 }
2574
2575 /*
2576 * Top level filesystem related information gathering.
2577 */
2578 extern unsigned int vfs_nummntops;
2579
2580 int
2581 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2582 user_addr_t newp, size_t newlen, proc_t p)
2583 {
2584 struct vfstable *vfsp;
2585 int *username;
2586 u_int usernamelen;
2587 int error;
2588 struct vfsconf vfsc;
2589
2590 /* All non VFS_GENERIC and in VFS_GENERIC,
2591 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2592 * needs to have root priv to have modifiers.
2593 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2594 */
2595 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2596 ((name[1] == VFS_MAXTYPENUM) ||
2597 (name[1] == VFS_CONF) ||
2598 (name[1] == VFS_SET_PACKAGE_EXTS)))
2599 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2600 return(error);
2601 }
2602 /*
2603 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2604 * is a VFS generic variable. So now we must check
2605 * namelen so we don't end up covering any UFS
2606 * variables (sinc UFS vfc_typenum is 1).
2607 *
2608 * It should have been:
2609 * name[0]: VFS_GENERIC
2610 * name[1]: VFS_NUMMNTOPS
2611 */
2612 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2613 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2614 }
2615
2616 /* all sysctl names at this level are at least name and field */
2617 if (namelen < 2)
2618 return (EISDIR); /* overloaded */
2619 if (name[0] != VFS_GENERIC) {
2620
2621 mount_list_lock();
2622 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2623 if (vfsp->vfc_typenum == name[0]) {
2624 vfsp->vfc_refcount++;
2625 break;
2626 }
2627 mount_list_unlock();
2628
2629 if (vfsp == NULL)
2630 return (ENOTSUP);
2631
2632 /* XXX current context proxy for proc p? */
2633 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2634 oldp, oldlenp, newp, newlen,
2635 vfs_context_current()));
2636
2637 mount_list_lock();
2638 vfsp->vfc_refcount--;
2639 mount_list_unlock();
2640 return error;
2641 }
2642 switch (name[1]) {
2643 case VFS_MAXTYPENUM:
2644 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2645 case VFS_CONF:
2646 if (namelen < 3)
2647 return (ENOTDIR); /* overloaded */
2648
2649 mount_list_lock();
2650 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2651 if (vfsp->vfc_typenum == name[2])
2652 break;
2653
2654 if (vfsp == NULL) {
2655 mount_list_unlock();
2656 return (ENOTSUP);
2657 }
2658
2659 vfsc.vfc_reserved1 = 0;
2660 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2661 vfsc.vfc_typenum = vfsp->vfc_typenum;
2662 vfsc.vfc_refcount = vfsp->vfc_refcount;
2663 vfsc.vfc_flags = vfsp->vfc_flags;
2664 vfsc.vfc_reserved2 = 0;
2665 vfsc.vfc_reserved3 = 0;
2666
2667 mount_list_unlock();
2668 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2669 sizeof(struct vfsconf)));
2670
2671 case VFS_SET_PACKAGE_EXTS:
2672 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2673 }
2674 /*
2675 * We need to get back into the general MIB, so we need to re-prepend
2676 * CTL_VFS to our name and try userland_sysctl().
2677 */
2678 usernamelen = namelen + 1;
2679 MALLOC(username, int *, usernamelen * sizeof(*username),
2680 M_TEMP, M_WAITOK);
2681 bcopy(name, username + 1, namelen * sizeof(*name));
2682 username[0] = CTL_VFS;
2683 error = userland_sysctl(p, username, usernamelen, oldp,
2684 oldlenp, newp, newlen, oldlenp);
2685 FREE(username, M_TEMP);
2686 return (error);
2687 }
2688
2689 /*
2690 * Dump vnode list (via sysctl) - defunct
2691 * use "pstat" instead
2692 */
2693 /* ARGSUSED */
2694 int
2695 sysctl_vnode
2696 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2697 {
2698 return(EINVAL);
2699 }
2700
2701 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2702 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED,
2703 0, 0, sysctl_vnode, "S,", "");
2704
2705
2706 /*
2707 * Check to see if a filesystem is mounted on a block device.
2708 */
2709 int
2710 vfs_mountedon(struct vnode *vp)
2711 {
2712 struct vnode *vq;
2713 int error = 0;
2714
2715 SPECHASH_LOCK();
2716 if (vp->v_specflags & SI_MOUNTEDON) {
2717 error = EBUSY;
2718 goto out;
2719 }
2720 if (vp->v_specflags & SI_ALIASED) {
2721 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2722 if (vq->v_rdev != vp->v_rdev ||
2723 vq->v_type != vp->v_type)
2724 continue;
2725 if (vq->v_specflags & SI_MOUNTEDON) {
2726 error = EBUSY;
2727 break;
2728 }
2729 }
2730 }
2731 out:
2732 SPECHASH_UNLOCK();
2733 return (error);
2734 }
2735
2736 /*
2737 * Unmount all filesystems. The list is traversed in reverse order
2738 * of mounting to avoid dependencies.
2739 */
2740 __private_extern__ void
2741 vfs_unmountall(void)
2742 {
2743 struct mount *mp;
2744 int error;
2745
2746 /*
2747 * Since this only runs when rebooting, it is not interlocked.
2748 */
2749 mount_list_lock();
2750 while(!TAILQ_EMPTY(&mountlist)) {
2751 mp = TAILQ_LAST(&mountlist, mntlist);
2752 mount_list_unlock();
2753 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2754 if ((error != 0) && (error != EBUSY)) {
2755 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2756 printf("%d)\n", error);
2757 mount_list_lock();
2758 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2759 continue;
2760 } else if (error == EBUSY) {
2761 /* If EBUSY is returned, the unmount was already in progress */
2762 printf("unmount of %p failed (", mp);
2763 printf("BUSY)\n");
2764 }
2765 mount_list_lock();
2766 }
2767 mount_list_unlock();
2768 }
2769
2770
2771 /*
2772 * This routine is called from vnode_pager_deallocate out of the VM
2773 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2774 * on a vnode that has a UBCINFO
2775 */
2776 __private_extern__ void
2777 vnode_pager_vrele(vnode_t vp)
2778 {
2779 struct ubc_info *uip;
2780
2781 vnode_lock_spin(vp);
2782
2783 vp->v_lflag &= ~VNAMED_UBC;
2784
2785 uip = vp->v_ubcinfo;
2786 vp->v_ubcinfo = UBC_INFO_NULL;
2787
2788 vnode_unlock(vp);
2789
2790 ubc_info_deallocate(uip);
2791 }
2792
2793
2794 #include <sys/disk.h>
2795
2796 errno_t
2797 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2798 {
2799 int error;
2800 off_t readblockcnt = 0;
2801 off_t writeblockcnt = 0;
2802 off_t readmaxcnt = 0;
2803 off_t writemaxcnt = 0;
2804 off_t readsegcnt = 0;
2805 off_t writesegcnt = 0;
2806 off_t readsegsize = 0;
2807 off_t writesegsize = 0;
2808 off_t alignment = 0;
2809 off_t ioqueue_depth = 0;
2810 u_int32_t blksize;
2811 u_int64_t temp;
2812 u_int32_t features;
2813 vfs_context_t ctx = vfs_context_current();
2814 int isssd = 0;
2815 int isvirtual = 0;
2816 /*
2817 * determine if this mount point exists on the same device as the root
2818 * partition... if so, then it comes under the hard throttle control
2819 */
2820 int thisunit = -1;
2821 static int rootunit = -1;
2822
2823 if (rootunit == -1) {
2824 if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx))
2825 rootunit = -1;
2826 else if (rootvp == devvp)
2827 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2828 }
2829 if (devvp != rootvp && rootunit != -1) {
2830 if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) {
2831 if (thisunit == rootunit)
2832 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2833 }
2834 }
2835 /*
2836 * force the spec device to re-cache
2837 * the underlying block size in case
2838 * the filesystem overrode the initial value
2839 */
2840 set_fsblocksize(devvp);
2841
2842
2843 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2844 (caddr_t)&blksize, 0, ctx)))
2845 return (error);
2846
2847 mp->mnt_devblocksize = blksize;
2848
2849 /*
2850 * set the maximum possible I/O size
2851 * this may get clipped to a smaller value
2852 * based on which constraints are being advertised
2853 * and if those advertised constraints result in a smaller
2854 * limit for a given I/O
2855 */
2856 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2857 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2858
2859 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2860 if (isvirtual)
2861 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2862 }
2863 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
2864 if (isssd)
2865 mp->mnt_kern_flag |= MNTK_SSD;
2866 }
2867
2868 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2869 (caddr_t)&features, 0, ctx)))
2870 return (error);
2871
2872 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2873 (caddr_t)&readblockcnt, 0, ctx)))
2874 return (error);
2875
2876 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2877 (caddr_t)&writeblockcnt, 0, ctx)))
2878 return (error);
2879
2880 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2881 (caddr_t)&readmaxcnt, 0, ctx)))
2882 return (error);
2883
2884 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2885 (caddr_t)&writemaxcnt, 0, ctx)))
2886 return (error);
2887
2888 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2889 (caddr_t)&readsegcnt, 0, ctx)))
2890 return (error);
2891
2892 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2893 (caddr_t)&writesegcnt, 0, ctx)))
2894 return (error);
2895
2896 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2897 (caddr_t)&readsegsize, 0, ctx)))
2898 return (error);
2899
2900 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2901 (caddr_t)&writesegsize, 0, ctx)))
2902 return (error);
2903
2904 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2905 (caddr_t)&alignment, 0, ctx)))
2906 return (error);
2907
2908 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2909 (caddr_t)&ioqueue_depth, 0, ctx)))
2910 return (error);
2911
2912 if (readmaxcnt)
2913 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2914
2915 if (readblockcnt) {
2916 temp = readblockcnt * blksize;
2917 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2918
2919 if (temp < mp->mnt_maxreadcnt)
2920 mp->mnt_maxreadcnt = (u_int32_t)temp;
2921 }
2922
2923 if (writemaxcnt)
2924 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2925
2926 if (writeblockcnt) {
2927 temp = writeblockcnt * blksize;
2928 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2929
2930 if (temp < mp->mnt_maxwritecnt)
2931 mp->mnt_maxwritecnt = (u_int32_t)temp;
2932 }
2933
2934 if (readsegcnt) {
2935 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2936 } else {
2937 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2938
2939 if (temp > UINT16_MAX)
2940 temp = UINT16_MAX;
2941 }
2942 mp->mnt_segreadcnt = (u_int16_t)temp;
2943
2944 if (writesegcnt) {
2945 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
2946 } else {
2947 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
2948
2949 if (temp > UINT16_MAX)
2950 temp = UINT16_MAX;
2951 }
2952 mp->mnt_segwritecnt = (u_int16_t)temp;
2953
2954 if (readsegsize)
2955 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
2956 else
2957 temp = mp->mnt_maxreadcnt;
2958 mp->mnt_maxsegreadsize = (u_int32_t)temp;
2959
2960 if (writesegsize)
2961 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
2962 else
2963 temp = mp->mnt_maxwritecnt;
2964 mp->mnt_maxsegwritesize = (u_int32_t)temp;
2965
2966 if (alignment)
2967 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
2968 else
2969 temp = 0;
2970 mp->mnt_alignmentmask = temp;
2971
2972
2973 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
2974 temp = ioqueue_depth;
2975 else
2976 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
2977
2978 mp->mnt_ioqueue_depth = temp;
2979 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
2980
2981 if (mp->mnt_ioscale > 1)
2982 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
2983
2984 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
2985 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
2986
2987 return (error);
2988 }
2989
2990 static struct klist fs_klist;
2991 lck_grp_t *fs_klist_lck_grp;
2992 lck_mtx_t *fs_klist_lock;
2993
2994 void
2995 vfs_event_init(void)
2996 {
2997
2998 klist_init(&fs_klist);
2999 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
3000 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
3001 }
3002
3003 void
3004 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3005 {
3006 lck_mtx_lock(fs_klist_lock);
3007 KNOTE(&fs_klist, event);
3008 lck_mtx_unlock(fs_klist_lock);
3009 }
3010
3011 /*
3012 * return the number of mounted filesystems.
3013 */
3014 static int
3015 sysctl_vfs_getvfscnt(void)
3016 {
3017 return(mount_getvfscnt());
3018 }
3019
3020
3021 static int
3022 mount_getvfscnt(void)
3023 {
3024 int ret;
3025
3026 mount_list_lock();
3027 ret = nummounts;
3028 mount_list_unlock();
3029 return (ret);
3030
3031 }
3032
3033
3034
3035 static int
3036 mount_fillfsids(fsid_t *fsidlst, int count)
3037 {
3038 struct mount *mp;
3039 int actual=0;
3040
3041 actual = 0;
3042 mount_list_lock();
3043 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3044 if (actual <= count) {
3045 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3046 actual++;
3047 }
3048 }
3049 mount_list_unlock();
3050 return (actual);
3051
3052 }
3053
3054 /*
3055 * fill in the array of fsid_t's up to a max of 'count', the actual
3056 * number filled in will be set in '*actual'. If there are more fsid_t's
3057 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3058 * have the actual count.
3059 * having *actual filled out even in the error case is depended upon.
3060 */
3061 static int
3062 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3063 {
3064 struct mount *mp;
3065
3066 *actual = 0;
3067 mount_list_lock();
3068 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3069 (*actual)++;
3070 if (*actual <= count)
3071 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3072 }
3073 mount_list_unlock();
3074 return (*actual <= count ? 0 : ENOMEM);
3075 }
3076
3077 static int
3078 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3079 __unused int arg2, struct sysctl_req *req)
3080 {
3081 int actual, error;
3082 size_t space;
3083 fsid_t *fsidlst;
3084
3085 /* This is a readonly node. */
3086 if (req->newptr != USER_ADDR_NULL)
3087 return (EPERM);
3088
3089 /* they are querying us so just return the space required. */
3090 if (req->oldptr == USER_ADDR_NULL) {
3091 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3092 return 0;
3093 }
3094 again:
3095 /*
3096 * Retrieve an accurate count of the amount of space required to copy
3097 * out all the fsids in the system.
3098 */
3099 space = req->oldlen;
3100 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3101
3102 /* they didn't give us enough space. */
3103 if (space < req->oldlen)
3104 return (ENOMEM);
3105
3106 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3107 if (fsidlst == NULL) {
3108 return (ENOMEM);
3109 }
3110
3111 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3112 &actual);
3113 /*
3114 * If we get back ENOMEM, then another mount has been added while we
3115 * slept in malloc above. If this is the case then try again.
3116 */
3117 if (error == ENOMEM) {
3118 FREE(fsidlst, M_TEMP);
3119 req->oldlen = space;
3120 goto again;
3121 }
3122 if (error == 0) {
3123 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3124 }
3125 FREE(fsidlst, M_TEMP);
3126 return (error);
3127 }
3128
3129 /*
3130 * Do a sysctl by fsid.
3131 */
3132 static int
3133 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3134 struct sysctl_req *req)
3135 {
3136 union union_vfsidctl vc;
3137 struct mount *mp;
3138 struct vfsstatfs *sp;
3139 int *name, flags, namelen;
3140 int error=0, gotref=0;
3141 vfs_context_t ctx = vfs_context_current();
3142 proc_t p = req->p; /* XXX req->p != current_proc()? */
3143 boolean_t is_64_bit;
3144
3145 name = arg1;
3146 namelen = arg2;
3147 is_64_bit = proc_is64bit(p);
3148
3149 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3150 if (error)
3151 goto out;
3152 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3153 error = EINVAL;
3154 goto out;
3155 }
3156 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3157 if (mp == NULL) {
3158 error = ENOENT;
3159 goto out;
3160 }
3161 gotref = 1;
3162 /* reset so that the fs specific code can fetch it. */
3163 req->newidx = 0;
3164 /*
3165 * Note if this is a VFS_CTL then we pass the actual sysctl req
3166 * in for "oldp" so that the lower layer can DTRT and use the
3167 * SYSCTL_IN/OUT routines.
3168 */
3169 if (mp->mnt_op->vfs_sysctl != NULL) {
3170 if (is_64_bit) {
3171 if (vfs_64bitready(mp)) {
3172 error = mp->mnt_op->vfs_sysctl(name, namelen,
3173 CAST_USER_ADDR_T(req),
3174 NULL, USER_ADDR_NULL, 0,
3175 ctx);
3176 }
3177 else {
3178 error = ENOTSUP;
3179 }
3180 }
3181 else {
3182 error = mp->mnt_op->vfs_sysctl(name, namelen,
3183 CAST_USER_ADDR_T(req),
3184 NULL, USER_ADDR_NULL, 0,
3185 ctx);
3186 }
3187 if (error != ENOTSUP) {
3188 goto out;
3189 }
3190 }
3191 switch (name[0]) {
3192 case VFS_CTL_UMOUNT:
3193 req->newidx = 0;
3194 if (is_64_bit) {
3195 req->newptr = vc.vc64.vc_ptr;
3196 req->newlen = (size_t)vc.vc64.vc_len;
3197 }
3198 else {
3199 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3200 req->newlen = vc.vc32.vc_len;
3201 }
3202 error = SYSCTL_IN(req, &flags, sizeof(flags));
3203 if (error)
3204 break;
3205
3206 mount_ref(mp, 0);
3207 mount_iterdrop(mp);
3208 gotref = 0;
3209 /* safedounmount consumes a ref */
3210 error = safedounmount(mp, flags, ctx);
3211 break;
3212 case VFS_CTL_STATFS:
3213 req->newidx = 0;
3214 if (is_64_bit) {
3215 req->newptr = vc.vc64.vc_ptr;
3216 req->newlen = (size_t)vc.vc64.vc_len;
3217 }
3218 else {
3219 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3220 req->newlen = vc.vc32.vc_len;
3221 }
3222 error = SYSCTL_IN(req, &flags, sizeof(flags));
3223 if (error)
3224 break;
3225 sp = &mp->mnt_vfsstat;
3226 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3227 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3228 goto out;
3229 if (is_64_bit) {
3230 struct user64_statfs sfs;
3231 bzero(&sfs, sizeof(sfs));
3232 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3233 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3234 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3235 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3236 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3237 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3238 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3239 sfs.f_files = (user64_long_t)sp->f_files;
3240 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3241 sfs.f_fsid = sp->f_fsid;
3242 sfs.f_owner = sp->f_owner;
3243
3244 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3245 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3246 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3247
3248 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3249 }
3250 else {
3251 struct user32_statfs sfs;
3252 bzero(&sfs, sizeof(sfs));
3253 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3254 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3255
3256 /*
3257 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3258 * have to fudge the numbers here in that case. We inflate the blocksize in order
3259 * to reflect the filesystem size as best we can.
3260 */
3261 if (sp->f_blocks > INT_MAX) {
3262 int shift;
3263
3264 /*
3265 * Work out how far we have to shift the block count down to make it fit.
3266 * Note that it's possible to have to shift so far that the resulting
3267 * blocksize would be unreportably large. At that point, we will clip
3268 * any values that don't fit.
3269 *
3270 * For safety's sake, we also ensure that f_iosize is never reported as
3271 * being smaller than f_bsize.
3272 */
3273 for (shift = 0; shift < 32; shift++) {
3274 if ((sp->f_blocks >> shift) <= INT_MAX)
3275 break;
3276 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3277 break;
3278 }
3279 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3280 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3281 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3282 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3283 #undef __SHIFT_OR_CLIP
3284 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3285 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3286 } else {
3287 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3288 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3289 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3290 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3291 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3292 }
3293 sfs.f_files = (user32_long_t)sp->f_files;
3294 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3295 sfs.f_fsid = sp->f_fsid;
3296 sfs.f_owner = sp->f_owner;
3297
3298 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3299 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3300 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3301
3302 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3303 }
3304 break;
3305 default:
3306 error = ENOTSUP;
3307 goto out;
3308 }
3309 out:
3310 if(gotref != 0)
3311 mount_iterdrop(mp);
3312 return (error);
3313 }
3314
3315 static int filt_fsattach(struct knote *kn);
3316 static void filt_fsdetach(struct knote *kn);
3317 static int filt_fsevent(struct knote *kn, long hint);
3318 struct filterops fs_filtops = {
3319 .f_attach = filt_fsattach,
3320 .f_detach = filt_fsdetach,
3321 .f_event = filt_fsevent,
3322 };
3323
3324 static int
3325 filt_fsattach(struct knote *kn)
3326 {
3327
3328 lck_mtx_lock(fs_klist_lock);
3329 kn->kn_flags |= EV_CLEAR;
3330 KNOTE_ATTACH(&fs_klist, kn);
3331 lck_mtx_unlock(fs_klist_lock);
3332 return (0);
3333 }
3334
3335 static void
3336 filt_fsdetach(struct knote *kn)
3337 {
3338 lck_mtx_lock(fs_klist_lock);
3339 KNOTE_DETACH(&fs_klist, kn);
3340 lck_mtx_unlock(fs_klist_lock);
3341 }
3342
3343 static int
3344 filt_fsevent(struct knote *kn, long hint)
3345 {
3346 /*
3347 * Backwards compatibility:
3348 * Other filters would do nothing if kn->kn_sfflags == 0
3349 */
3350
3351 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3352 kn->kn_fflags |= hint;
3353 }
3354
3355 return (kn->kn_fflags != 0);
3356 }
3357
3358 static int
3359 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3360 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3361 {
3362 int out, error;
3363 pid_t pid;
3364 proc_t p;
3365
3366 /* We need a pid. */
3367 if (req->newptr == USER_ADDR_NULL)
3368 return (EINVAL);
3369
3370 error = SYSCTL_IN(req, &pid, sizeof(pid));
3371 if (error)
3372 return (error);
3373
3374 p = proc_find(pid < 0 ? -pid : pid);
3375 if (p == NULL)
3376 return (ESRCH);
3377
3378 /*
3379 * Fetching the value is ok, but we only fetch if the old
3380 * pointer is given.
3381 */
3382 if (req->oldptr != USER_ADDR_NULL) {
3383 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3384 proc_rele(p);
3385 error = SYSCTL_OUT(req, &out, sizeof(out));
3386 return (error);
3387 }
3388
3389 /* cansignal offers us enough security. */
3390 if (p != req->p && proc_suser(req->p) != 0) {
3391 proc_rele(p);
3392 return (EPERM);
3393 }
3394
3395 if (pid < 0)
3396 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3397 else
3398 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3399 proc_rele(p);
3400
3401 return (0);
3402 }
3403
3404 /* the vfs.generic. branch. */
3405 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3406 /* retreive a list of mounted filesystem fsid_t */
3407 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD,
3408 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3409 /* perform operations on filesystem via fsid_t */
3410 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED,
3411 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3412 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY,
3413 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3414
3415
3416 long num_reusedvnodes = 0;
3417
3418 static int
3419 new_vnode(vnode_t *vpp)
3420 {
3421 vnode_t vp;
3422 int retries = 0; /* retry incase of tablefull */
3423 int force_alloc = 0, walk_count = 0;
3424 unsigned int vpid;
3425 struct timespec ts;
3426 struct timeval current_tv;
3427 #ifndef __LP64__
3428 struct unsafe_fsnode *l_unsafefs = 0;
3429 #endif /* __LP64__ */
3430 proc_t curproc = current_proc();
3431
3432 retry:
3433 microuptime(&current_tv);
3434
3435 vp = NULLVP;
3436
3437 vnode_list_lock();
3438
3439 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3440 /*
3441 * Can always reuse a dead one
3442 */
3443 vp = TAILQ_FIRST(&vnode_dead_list);
3444 goto steal_this_vp;
3445 }
3446 /*
3447 * no dead vnodes available... if we're under
3448 * the limit, we'll create a new vnode
3449 */
3450 if (numvnodes < desiredvnodes || force_alloc) {
3451 numvnodes++;
3452 vnode_list_unlock();
3453
3454 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3455 bzero((char *)vp, sizeof(*vp));
3456 VLISTNONE(vp); /* avoid double queue removal */
3457 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3458
3459 klist_init(&vp->v_knotes);
3460 nanouptime(&ts);
3461 vp->v_id = ts.tv_nsec;
3462 vp->v_flag = VSTANDARD;
3463
3464 #if CONFIG_MACF
3465 if (mac_vnode_label_init_needed(vp))
3466 mac_vnode_label_init(vp);
3467 #endif /* MAC */
3468
3469 vp->v_iocount = 1;
3470 goto done;
3471 }
3472
3473 #define MAX_WALK_COUNT 1000
3474
3475 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3476 (ragevnodes >= rage_limit ||
3477 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3478
3479 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3480 if ( !(vp->v_listflag & VLIST_RAGE))
3481 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3482
3483 // if we're a dependency-capable process, skip vnodes that can
3484 // cause recycling deadlocks. (i.e. this process is diskimages
3485 // helper and the vnode is in a disk image).
3486 //
3487 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3488 break;
3489 }
3490
3491 // don't iterate more than MAX_WALK_COUNT vnodes to
3492 // avoid keeping the vnode list lock held for too long.
3493 if (walk_count++ > MAX_WALK_COUNT) {
3494 vp = NULL;
3495 break;
3496 }
3497 }
3498
3499 }
3500
3501 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3502 /*
3503 * Pick the first vp for possible reuse
3504 */
3505 walk_count = 0;
3506 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3507 // if we're a dependency-capable process, skip vnodes that can
3508 // cause recycling deadlocks. (i.e. this process is diskimages
3509 // helper and the vnode is in a disk image)
3510 //
3511 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3512 break;
3513 }
3514
3515 // don't iterate more than MAX_WALK_COUNT vnodes to
3516 // avoid keeping the vnode list lock held for too long.
3517 if (walk_count++ > MAX_WALK_COUNT) {
3518 vp = NULL;
3519 break;
3520 }
3521 }
3522
3523 }
3524
3525 //
3526 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3527 // then we're trying to create a vnode on behalf of a
3528 // process like diskimages-helper that has file systems
3529 // mounted on top of itself (and thus we can't reclaim
3530 // vnodes in the file systems on top of us). if we can't
3531 // find a vnode to reclaim then we'll just have to force
3532 // the allocation.
3533 //
3534 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3535 force_alloc = 1;
3536 vnode_list_unlock();
3537 goto retry;
3538 }
3539
3540 if (vp == NULL) {
3541 /*
3542 * we've reached the system imposed maximum number of vnodes
3543 * but there isn't a single one available
3544 * wait a bit and then retry... if we can't get a vnode
3545 * after 100 retries, than log a complaint
3546 */
3547 if (++retries <= 100) {
3548 vnode_list_unlock();
3549 delay_for_interval(1, 1000 * 1000);
3550 goto retry;
3551 }
3552
3553 vnode_list_unlock();
3554 tablefull("vnode");
3555 log(LOG_EMERG, "%d desired, %d numvnodes, "
3556 "%d free, %d dead, %d rage\n",
3557 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3558 #if CONFIG_EMBEDDED
3559 /*
3560 * Running out of vnodes tends to make a system unusable. Start killing
3561 * processes that jetsam knows are killable.
3562 */
3563 if (jetsam_kill_top_proc() < 0) {
3564 /*
3565 * If jetsam can't find any more processes to kill and there
3566 * still aren't any free vnodes, panic. Hopefully we'll get a
3567 * panic log to tell us why we ran out.
3568 */
3569 panic("vnode table is full\n");
3570 }
3571
3572 delay_for_interval(1, 1000 * 1000);
3573 goto retry;
3574 #endif
3575
3576 *vpp = NULL;
3577 return (ENFILE);
3578 }
3579 steal_this_vp:
3580 vpid = vp->v_id;
3581
3582 vnode_list_remove_locked(vp);
3583
3584 vnode_list_unlock();
3585
3586 vnode_lock_spin(vp);
3587
3588 /*
3589 * We could wait for the vnode_lock after removing the vp from the freelist
3590 * and the vid is bumped only at the very end of reclaim. So it is possible
3591 * that we are looking at a vnode that is being terminated. If so skip it.
3592 */
3593 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3594 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3595 /*
3596 * we lost the race between dropping the list lock
3597 * and picking up the vnode_lock... someone else
3598 * used this vnode and it is now in a new state
3599 * so we need to go back and try again
3600 */
3601 vnode_unlock(vp);
3602 goto retry;
3603 }
3604 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3605 /*
3606 * we did a vnode_rele_ext that asked for
3607 * us not to reenter the filesystem during
3608 * the release even though VL_NEEDINACTIVE was
3609 * set... we'll do it here by doing a
3610 * vnode_get/vnode_put
3611 *
3612 * pick up an iocount so that we can call
3613 * vnode_put and drive the VNOP_INACTIVE...
3614 * vnode_put will either leave us off
3615 * the freelist if a new ref comes in,
3616 * or put us back on the end of the freelist
3617 * or recycle us if we were marked for termination...
3618 * so we'll just go grab a new candidate
3619 */
3620 vp->v_iocount++;
3621 #ifdef JOE_DEBUG
3622 record_vp(vp, 1);
3623 #endif
3624 vnode_put_locked(vp);
3625 vnode_unlock(vp);
3626 goto retry;
3627 }
3628 OSAddAtomicLong(1, &num_reusedvnodes);
3629
3630 /* Checks for anyone racing us for recycle */
3631 if (vp->v_type != VBAD) {
3632 if (vp->v_lflag & VL_DEAD)
3633 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3634 vnode_lock_convert(vp);
3635 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3636
3637 if ((VONLIST(vp)))
3638 panic("new_vnode(%p): vp on list", vp);
3639 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3640 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3641 panic("new_vnode(%p): free vnode still referenced", vp);
3642 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3643 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3644 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3645 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3646 }
3647
3648 #ifndef __LP64__
3649 if (vp->v_unsafefs) {
3650 l_unsafefs = vp->v_unsafefs;
3651 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3652 }
3653 #endif /* __LP64__ */
3654
3655 #if CONFIG_MACF
3656 /*
3657 * We should never see VL_LABELWAIT or VL_LABEL here.
3658 * as those operations hold a reference.
3659 */
3660 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3661 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3662 if (vp->v_lflag & VL_LABELED) {
3663 vnode_lock_convert(vp);
3664 mac_vnode_label_recycle(vp);
3665 } else if (mac_vnode_label_init_needed(vp)) {
3666 vnode_lock_convert(vp);
3667 mac_vnode_label_init(vp);
3668 }
3669
3670 #endif /* MAC */
3671
3672 vp->v_iocount = 1;
3673 vp->v_lflag = 0;
3674 vp->v_writecount = 0;
3675 vp->v_references = 0;
3676 vp->v_iterblkflags = 0;
3677 vp->v_flag = VSTANDARD;
3678 /* vbad vnodes can point to dead_mountp */
3679 vp->v_mount = NULL;
3680 vp->v_defer_reclaimlist = (vnode_t)0;
3681
3682 vnode_unlock(vp);
3683
3684 #ifndef __LP64__
3685 if (l_unsafefs) {
3686 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3687 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3688 }
3689 #endif /* __LP64__ */
3690
3691 done:
3692 *vpp = vp;
3693
3694 return (0);
3695 }
3696
3697 void
3698 vnode_lock(vnode_t vp)
3699 {
3700 lck_mtx_lock(&vp->v_lock);
3701 }
3702
3703 void
3704 vnode_lock_spin(vnode_t vp)
3705 {
3706 lck_mtx_lock_spin(&vp->v_lock);
3707 }
3708
3709 void
3710 vnode_unlock(vnode_t vp)
3711 {
3712 lck_mtx_unlock(&vp->v_lock);
3713 }
3714
3715
3716
3717 int
3718 vnode_get(struct vnode *vp)
3719 {
3720 int retval;
3721
3722 vnode_lock_spin(vp);
3723 retval = vnode_get_locked(vp);
3724 vnode_unlock(vp);
3725
3726 return(retval);
3727 }
3728
3729 int
3730 vnode_get_locked(struct vnode *vp)
3731 {
3732 #if DIAGNOSTIC
3733 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3734 #endif
3735 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3736 return(ENOENT);
3737 }
3738 vp->v_iocount++;
3739 #ifdef JOE_DEBUG
3740 record_vp(vp, 1);
3741 #endif
3742 return (0);
3743 }
3744
3745 int
3746 vnode_getwithvid(vnode_t vp, uint32_t vid)
3747 {
3748 return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
3749 }
3750
3751 int
3752 vnode_getwithref(vnode_t vp)
3753 {
3754 return(vget_internal(vp, 0, 0));
3755 }
3756
3757
3758 __private_extern__ int
3759 vnode_getalways(vnode_t vp)
3760 {
3761 return(vget_internal(vp, 0, VNODE_ALWAYS));
3762 }
3763
3764 int
3765 vnode_put(vnode_t vp)
3766 {
3767 int retval;
3768
3769 vnode_lock_spin(vp);
3770 retval = vnode_put_locked(vp);
3771 vnode_unlock(vp);
3772
3773 return(retval);
3774 }
3775
3776 int
3777 vnode_put_locked(vnode_t vp)
3778 {
3779 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3780
3781 #if DIAGNOSTIC
3782 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3783 #endif
3784 retry:
3785 if (vp->v_iocount < 1)
3786 panic("vnode_put(%p): iocount < 1", vp);
3787
3788 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3789 vnode_dropiocount(vp);
3790 return(0);
3791 }
3792 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3793
3794 vp->v_lflag &= ~VL_NEEDINACTIVE;
3795 vnode_unlock(vp);
3796
3797 VNOP_INACTIVE(vp, ctx);
3798
3799 vnode_lock_spin(vp);
3800 /*
3801 * because we had to drop the vnode lock before calling
3802 * VNOP_INACTIVE, the state of this vnode may have changed...
3803 * we may pick up both VL_MARTERM and either
3804 * an iocount or a usecount while in the VNOP_INACTIVE call
3805 * we don't want to call vnode_reclaim_internal on a vnode
3806 * that has active references on it... so loop back around
3807 * and reevaluate the state
3808 */
3809 goto retry;
3810 }
3811 vp->v_lflag &= ~VL_NEEDINACTIVE;
3812
3813 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3814 vnode_lock_convert(vp);
3815 vnode_reclaim_internal(vp, 1, 1, 0);
3816 }
3817 vnode_dropiocount(vp);
3818 vnode_list_add(vp);
3819
3820 return(0);
3821 }
3822
3823 /* is vnode_t in use by others? */
3824 int
3825 vnode_isinuse(vnode_t vp, int refcnt)
3826 {
3827 return(vnode_isinuse_locked(vp, refcnt, 0));
3828 }
3829
3830
3831 static int
3832 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3833 {
3834 int retval = 0;
3835
3836 if (!locked)
3837 vnode_lock_spin(vp);
3838 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3839 retval = 1;
3840 goto out;
3841 }
3842 if (vp->v_type == VREG) {
3843 retval = ubc_isinuse_locked(vp, refcnt, 1);
3844 }
3845
3846 out:
3847 if (!locked)
3848 vnode_unlock(vp);
3849 return(retval);
3850 }
3851
3852
3853 /* resume vnode_t */
3854 errno_t
3855 vnode_resume(vnode_t vp)
3856 {
3857 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3858
3859 vnode_lock_spin(vp);
3860 vp->v_lflag &= ~VL_SUSPENDED;
3861 vp->v_owner = NULL;
3862 vnode_unlock(vp);
3863
3864 wakeup(&vp->v_iocount);
3865 }
3866 return(0);
3867 }
3868
3869 /* suspend vnode_t
3870 * Please do not use on more than one vnode at a time as it may
3871 * cause deadlocks.
3872 * xxx should we explicity prevent this from happening?
3873 */
3874
3875 errno_t
3876 vnode_suspend(vnode_t vp)
3877 {
3878 if (vp->v_lflag & VL_SUSPENDED) {
3879 return(EBUSY);
3880 }
3881
3882 vnode_lock_spin(vp);
3883
3884 /*
3885 * xxx is this sufficient to check if a vnode_drain is
3886 * progress?
3887 */
3888
3889 if (vp->v_owner == NULL) {
3890 vp->v_lflag |= VL_SUSPENDED;
3891 vp->v_owner = current_thread();
3892 }
3893 vnode_unlock(vp);
3894
3895 return(0);
3896 }
3897
3898
3899
3900 static errno_t
3901 vnode_drain(vnode_t vp)
3902 {
3903
3904 if (vp->v_lflag & VL_DRAIN) {
3905 panic("vnode_drain: recursuve drain");
3906 return(ENOENT);
3907 }
3908 vp->v_lflag |= VL_DRAIN;
3909 vp->v_owner = current_thread();
3910
3911 while (vp->v_iocount > 1)
3912 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
3913 return(0);
3914 }
3915
3916
3917 /*
3918 * if the number of recent references via vnode_getwithvid or vnode_getwithref
3919 * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from
3920 * the LRU list if it's currently on it... once the iocount and usecount both drop
3921 * to 0, it will get put back on the end of the list, effectively making it younger
3922 * this allows us to keep actively referenced vnodes in the list without having
3923 * to constantly remove and add to the list each time a vnode w/o a usecount is
3924 * referenced which costs us taking and dropping a global lock twice.
3925 */
3926 #define UNAGE_THRESHHOLD 25
3927
3928 static errno_t
3929 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
3930 {
3931 int nodead = vflags & VNODE_NODEAD;
3932 int nosusp = vflags & VNODE_NOSUSPEND;
3933 int always = vflags & VNODE_ALWAYS;
3934
3935 for (;;) {
3936 /*
3937 * if it is a dead vnode with deadfs
3938 */
3939 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
3940 return(ENOENT);
3941 }
3942 /*
3943 * will return VL_DEAD ones
3944 */
3945 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
3946 break;
3947 }
3948 /*
3949 * if suspended vnodes are to be failed
3950 */
3951 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
3952 return(ENOENT);
3953 }
3954 /*
3955 * if you are the owner of drain/suspend/termination , can acquire iocount
3956 * check for VL_TERMINATE; it does not set owner
3957 */
3958 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
3959 (vp->v_owner == current_thread())) {
3960 break;
3961 }
3962
3963 if (always != 0)
3964 break;
3965 vnode_lock_convert(vp);
3966
3967 if (vp->v_lflag & VL_TERMINATE) {
3968 vp->v_lflag |= VL_TERMWANT;
3969
3970 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
3971 } else
3972 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
3973 }
3974 if (vid != vp->v_id) {
3975 return(ENOENT);
3976 }
3977 if (++vp->v_references >= UNAGE_THRESHHOLD) {
3978 vp->v_references = 0;
3979 vnode_list_remove(vp);
3980 }
3981 vp->v_iocount++;
3982 #ifdef JOE_DEBUG
3983 record_vp(vp, 1);
3984 #endif
3985 return(0);
3986 }
3987
3988 static void
3989 vnode_dropiocount (vnode_t vp)
3990 {
3991 if (vp->v_iocount < 1)
3992 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
3993
3994 vp->v_iocount--;
3995 #ifdef JOE_DEBUG
3996 record_vp(vp, -1);
3997 #endif
3998 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
3999 wakeup(&vp->v_iocount);
4000 }
4001
4002
4003 void
4004 vnode_reclaim(struct vnode * vp)
4005 {
4006 vnode_reclaim_internal(vp, 0, 0, 0);
4007 }
4008
4009 __private_extern__
4010 void
4011 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4012 {
4013 int isfifo = 0;
4014
4015 if (!locked)
4016 vnode_lock(vp);
4017
4018 if (vp->v_lflag & VL_TERMINATE) {
4019 panic("vnode reclaim in progress");
4020 }
4021 vp->v_lflag |= VL_TERMINATE;
4022
4023 vn_clearunionwait(vp, 1);
4024
4025 vnode_drain(vp);
4026
4027 isfifo = (vp->v_type == VFIFO);
4028
4029 if (vp->v_type != VBAD)
4030 vgone(vp, flags); /* clean and reclaim the vnode */
4031
4032 /*
4033 * give the vnode a new identity so that vnode_getwithvid will fail
4034 * on any stale cache accesses...
4035 * grab the list_lock so that if we're in "new_vnode"
4036 * behind the list_lock trying to steal this vnode, the v_id is stable...
4037 * once new_vnode drops the list_lock, it will block trying to take
4038 * the vnode lock until we release it... at that point it will evaluate
4039 * whether the v_vid has changed
4040 * also need to make sure that the vnode isn't on a list where "new_vnode"
4041 * can find it after the v_id has been bumped until we are completely done
4042 * with the vnode (i.e. putting it back on a list has to be the very last
4043 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4044 * are holding an io_count on the vnode... they need to drop the io_count
4045 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4046 * they are completely done with the vnode
4047 */
4048 vnode_list_lock();
4049
4050 vnode_list_remove_locked(vp);
4051 vp->v_id++;
4052
4053 vnode_list_unlock();
4054
4055 if (isfifo) {
4056 struct fifoinfo * fip;
4057
4058 fip = vp->v_fifoinfo;
4059 vp->v_fifoinfo = NULL;
4060 FREE(fip, M_TEMP);
4061 }
4062 vp->v_type = VBAD;
4063
4064 if (vp->v_data)
4065 panic("vnode_reclaim_internal: cleaned vnode isn't");
4066 if (vp->v_numoutput)
4067 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4068 if (UBCINFOEXISTS(vp))
4069 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4070 if (vp->v_parent)
4071 panic("vnode_reclaim_internal: vparent not removed");
4072 if (vp->v_name)
4073 panic("vnode_reclaim_internal: vname not removed");
4074
4075 vp->v_socket = NULL;
4076
4077 vp->v_lflag &= ~VL_TERMINATE;
4078 vp->v_lflag &= ~VL_DRAIN;
4079 vp->v_owner = NULL;
4080
4081 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4082
4083 /* Make sure that when we reuse the vnode, no knotes left over */
4084 klist_init(&vp->v_knotes);
4085
4086 if (vp->v_lflag & VL_TERMWANT) {
4087 vp->v_lflag &= ~VL_TERMWANT;
4088 wakeup(&vp->v_lflag);
4089 }
4090 if (!reuse) {
4091 /*
4092 * make sure we get on the
4093 * dead list if appropriate
4094 */
4095 vnode_list_add(vp);
4096 }
4097 if (!locked)
4098 vnode_unlock(vp);
4099 }
4100
4101 /* USAGE:
4102 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4103 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4104 * is obsoleted by this.
4105 * vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
4106 */
4107 int
4108 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4109 {
4110 int error;
4111 int insert = 1;
4112 vnode_t vp;
4113 vnode_t nvp;
4114 vnode_t dvp;
4115 struct uthread *ut;
4116 struct componentname *cnp;
4117 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4118
4119 if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) {
4120 if ( (error = new_vnode(&vp)) ) {
4121 return(error);
4122 } else {
4123 dvp = param->vnfs_dvp;
4124 cnp = param->vnfs_cnp;
4125
4126 vp->v_op = param->vnfs_vops;
4127 vp->v_type = param->vnfs_vtype;
4128 vp->v_data = param->vnfs_fsnode;
4129
4130 if (param->vnfs_markroot)
4131 vp->v_flag |= VROOT;
4132 if (param->vnfs_marksystem)
4133 vp->v_flag |= VSYSTEM;
4134 if (vp->v_type == VREG) {
4135 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4136 if (error) {
4137 #ifdef JOE_DEBUG
4138 record_vp(vp, 1);
4139 #endif
4140 vp->v_mount = NULL;
4141 vp->v_op = dead_vnodeop_p;
4142 vp->v_tag = VT_NON;
4143 vp->v_data = NULL;
4144 vp->v_type = VBAD;
4145 vp->v_lflag |= VL_DEAD;
4146
4147 vnode_put(vp);
4148 return(error);
4149 }
4150 }
4151 #ifdef JOE_DEBUG
4152 record_vp(vp, 1);
4153 #endif
4154 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4155
4156 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4157
4158 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4159 /*
4160 * if checkalias returns a vnode, it will be locked
4161 *
4162 * first get rid of the unneeded vnode we acquired
4163 */
4164 vp->v_data = NULL;
4165 vp->v_op = spec_vnodeop_p;
4166 vp->v_type = VBAD;
4167 vp->v_lflag = VL_DEAD;
4168 vp->v_data = NULL;
4169 vp->v_tag = VT_NON;
4170 vnode_put(vp);
4171
4172 /*
4173 * switch to aliased vnode and finish
4174 * preparing it
4175 */
4176 vp = nvp;
4177
4178 vclean(vp, 0);
4179 vp->v_op = param->vnfs_vops;
4180 vp->v_type = param->vnfs_vtype;
4181 vp->v_data = param->vnfs_fsnode;
4182 vp->v_lflag = 0;
4183 vp->v_mount = NULL;
4184 insmntque(vp, param->vnfs_mp);
4185 insert = 0;
4186 vnode_unlock(vp);
4187 }
4188 }
4189
4190 if (vp->v_type == VFIFO) {
4191 struct fifoinfo *fip;
4192
4193 MALLOC(fip, struct fifoinfo *,
4194 sizeof(*fip), M_TEMP, M_WAITOK);
4195 bzero(fip, sizeof(struct fifoinfo ));
4196 vp->v_fifoinfo = fip;
4197 }
4198 /* The file systems must pass the address of the location where
4199 * they store the vnode pointer. When we add the vnode into the mount
4200 * list and name cache they become discoverable. So the file system node
4201 * must have the connection to vnode setup by then
4202 */
4203 *vpp = vp;
4204
4205 /* Add fs named reference. */
4206 if (param->vnfs_flags & VNFS_ADDFSREF) {
4207 vp->v_lflag |= VNAMED_FSHASH;
4208 }
4209 if (param->vnfs_mp) {
4210 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4211 vp->v_flag |= VLOCKLOCAL;
4212 if (insert) {
4213 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4214 panic("insmntque: vp on the free list\n");
4215 /*
4216 * enter in mount vnode list
4217 */
4218 insmntque(vp, param->vnfs_mp);
4219 }
4220 #ifndef __LP64__
4221 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4222 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4223 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4224 vp->v_unsafefs->fsnode_count = 0;
4225 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4226 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4227 }
4228 #endif /* __LP64__ */
4229 }
4230 if (dvp && vnode_ref(dvp) == 0) {
4231 vp->v_parent = dvp;
4232 }
4233 if (cnp) {
4234 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4235 /*
4236 * enter into name cache
4237 * we've got the info to enter it into the name cache now
4238 * cache_enter_create will pick up an extra reference on
4239 * the name entered into the string cache
4240 */
4241 vp->v_name = cache_enter_create(dvp, vp, cnp);
4242 } else
4243 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4244
4245 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4246 vp->v_flag |= VISUNION;
4247 }
4248 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4249 /*
4250 * this vnode is being created as cacheable in the name cache
4251 * this allows us to re-enter it in the cache
4252 */
4253 vp->v_flag |= VNCACHEABLE;
4254 }
4255 ut = get_bsdthread_info(current_thread());
4256
4257 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4258 (ut->uu_flag & UT_RAGE_VNODES)) {
4259 /*
4260 * process has indicated that it wants any
4261 * vnodes created on its behalf to be rapidly
4262 * aged to reduce the impact on the cached set
4263 * of vnodes
4264 */
4265 vp->v_flag |= VRAGE;
4266 }
4267 return(0);
4268 }
4269 }
4270 return (EINVAL);
4271 }
4272
4273 int
4274 vnode_addfsref(vnode_t vp)
4275 {
4276 vnode_lock_spin(vp);
4277 if (vp->v_lflag & VNAMED_FSHASH)
4278 panic("add_fsref: vp already has named reference");
4279 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4280 panic("addfsref: vp on the free list\n");
4281 vp->v_lflag |= VNAMED_FSHASH;
4282 vnode_unlock(vp);
4283 return(0);
4284
4285 }
4286 int
4287 vnode_removefsref(vnode_t vp)
4288 {
4289 vnode_lock_spin(vp);
4290 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4291 panic("remove_fsref: no named reference");
4292 vp->v_lflag &= ~VNAMED_FSHASH;
4293 vnode_unlock(vp);
4294 return(0);
4295
4296 }
4297
4298
4299 int
4300 vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg)
4301 {
4302 mount_t mp;
4303 int ret = 0;
4304 fsid_t * fsid_list;
4305 int count, actualcount, i;
4306 void * allocmem;
4307
4308 count = mount_getvfscnt();
4309 count += 10;
4310
4311 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4312 allocmem = (void *)fsid_list;
4313
4314 actualcount = mount_fillfsids(fsid_list, count);
4315
4316 for (i=0; i< actualcount; i++) {
4317
4318 /* obtain the mount point with iteration reference */
4319 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4320
4321 if(mp == (struct mount *)0)
4322 continue;
4323 mount_lock(mp);
4324 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4325 mount_unlock(mp);
4326 mount_iterdrop(mp);
4327 continue;
4328
4329 }
4330 mount_unlock(mp);
4331
4332 /* iterate over all the vnodes */
4333 ret = callout(mp, arg);
4334
4335 mount_iterdrop(mp);
4336
4337 switch (ret) {
4338 case VFS_RETURNED:
4339 case VFS_RETURNED_DONE:
4340 if (ret == VFS_RETURNED_DONE) {
4341 ret = 0;
4342 goto out;
4343 }
4344 break;
4345
4346 case VFS_CLAIMED_DONE:
4347 ret = 0;
4348 goto out;
4349 case VFS_CLAIMED:
4350 default:
4351 break;
4352 }
4353 ret = 0;
4354 }
4355
4356 out:
4357 kfree(allocmem, (count * sizeof(fsid_t)));
4358 return (ret);
4359 }
4360
4361 /*
4362 * Update the vfsstatfs structure in the mountpoint.
4363 * MAC: Parameter eventtype added, indicating whether the event that
4364 * triggered this update came from user space, via a system call
4365 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4366 */
4367 int
4368 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4369 {
4370 struct vfs_attr va;
4371 int error;
4372
4373 /*
4374 * Request the attributes we want to propagate into
4375 * the per-mount vfsstat structure.
4376 */
4377 VFSATTR_INIT(&va);
4378 VFSATTR_WANTED(&va, f_iosize);
4379 VFSATTR_WANTED(&va, f_blocks);
4380 VFSATTR_WANTED(&va, f_bfree);
4381 VFSATTR_WANTED(&va, f_bavail);
4382 VFSATTR_WANTED(&va, f_bused);
4383 VFSATTR_WANTED(&va, f_files);
4384 VFSATTR_WANTED(&va, f_ffree);
4385 VFSATTR_WANTED(&va, f_bsize);
4386 VFSATTR_WANTED(&va, f_fssubtype);
4387 #if CONFIG_MACF
4388 if (eventtype == VFS_USER_EVENT) {
4389 error = mac_mount_check_getattr(ctx, mp, &va);
4390 if (error != 0)
4391 return (error);
4392 }
4393 #endif
4394
4395 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4396 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4397 return(error);
4398 }
4399
4400 /*
4401 * Unpack into the per-mount structure.
4402 *
4403 * We only overwrite these fields, which are likely to change:
4404 * f_blocks
4405 * f_bfree
4406 * f_bavail
4407 * f_bused
4408 * f_files
4409 * f_ffree
4410 *
4411 * And these which are not, but which the FS has no other way
4412 * of providing to us:
4413 * f_bsize
4414 * f_iosize
4415 * f_fssubtype
4416 *
4417 */
4418 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4419 /* 4822056 - protect against malformed server mount */
4420 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4421 } else {
4422 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4423 }
4424 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4425 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4426 } else {
4427 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4428 }
4429 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4430 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4431 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4432 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4433 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4434 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4435 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4436 mp->mnt_vfsstat.f_bused = va.f_bused;
4437 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4438 mp->mnt_vfsstat.f_files = va.f_files;
4439 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4440 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4441
4442 /* this is unlikely to change, but has to be queried for */
4443 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4444 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4445
4446 return(0);
4447 }
4448
4449 int
4450 mount_list_add(mount_t mp)
4451 {
4452 int res;
4453
4454 mount_list_lock();
4455 if (system_inshutdown != 0) {
4456 res = -1;
4457 } else {
4458 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4459 nummounts++;
4460 res = 0;
4461 }
4462 mount_list_unlock();
4463
4464 return res;
4465 }
4466
4467 void
4468 mount_list_remove(mount_t mp)
4469 {
4470 mount_list_lock();
4471 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4472 nummounts--;
4473 mp->mnt_list.tqe_next = NULL;
4474 mp->mnt_list.tqe_prev = NULL;
4475 mount_list_unlock();
4476 }
4477
4478 mount_t
4479 mount_lookupby_volfsid(int volfs_id, int withref)
4480 {
4481 mount_t cur_mount = (mount_t)0;
4482 mount_t mp;
4483
4484 mount_list_lock();
4485 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4486 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4487 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4488 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4489 cur_mount = mp;
4490 if (withref) {
4491 if (mount_iterref(cur_mount, 1)) {
4492 cur_mount = (mount_t)0;
4493 mount_list_unlock();
4494 goto out;
4495 }
4496 }
4497 break;
4498 }
4499 }
4500 mount_list_unlock();
4501 if (withref && (cur_mount != (mount_t)0)) {
4502 mp = cur_mount;
4503 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4504 cur_mount = (mount_t)0;
4505 }
4506 mount_iterdrop(mp);
4507 }
4508 out:
4509 return(cur_mount);
4510 }
4511
4512 mount_t
4513 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4514 {
4515 mount_t retmp = (mount_t)0;
4516 mount_t mp;
4517
4518 if (!locked)
4519 mount_list_lock();
4520 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4521 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4522 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4523 retmp = mp;
4524 if (withref) {
4525 if (mount_iterref(retmp, 1))
4526 retmp = (mount_t)0;
4527 }
4528 goto out;
4529 }
4530 out:
4531 if (!locked)
4532 mount_list_unlock();
4533 return (retmp);
4534 }
4535
4536 errno_t
4537 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4538 {
4539 struct nameidata nd;
4540 int error;
4541 u_int32_t ndflags = 0;
4542
4543 if (ctx == NULL) { /* XXX technically an error */
4544 ctx = vfs_context_current();
4545 }
4546
4547 if (flags & VNODE_LOOKUP_NOFOLLOW)
4548 ndflags = NOFOLLOW;
4549 else
4550 ndflags = FOLLOW;
4551
4552 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4553 ndflags |= NOCROSSMOUNT;
4554 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4555 ndflags |= DOWHITEOUT;
4556
4557 /* XXX AUDITVNPATH1 needed ? */
4558 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4559
4560 if ((error = namei(&nd)))
4561 return (error);
4562 *vpp = nd.ni_vp;
4563 nameidone(&nd);
4564
4565 return (0);
4566 }
4567
4568 errno_t
4569 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4570 {
4571 struct nameidata nd;
4572 int error;
4573 u_int32_t ndflags = 0;
4574 int lflags = flags;
4575
4576 if (ctx == NULL) { /* XXX technically an error */
4577 ctx = vfs_context_current();
4578 }
4579
4580 if (fmode & O_NOFOLLOW)
4581 lflags |= VNODE_LOOKUP_NOFOLLOW;
4582
4583 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4584 ndflags = NOFOLLOW;
4585 else
4586 ndflags = FOLLOW;
4587
4588 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4589 ndflags |= NOCROSSMOUNT;
4590 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4591 ndflags |= DOWHITEOUT;
4592
4593 /* XXX AUDITVNPATH1 needed ? */
4594 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4595
4596 if ((error = vn_open(&nd, fmode, cmode)))
4597 *vpp = NULL;
4598 else
4599 *vpp = nd.ni_vp;
4600
4601 return (error);
4602 }
4603
4604 errno_t
4605 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4606 {
4607 int error;
4608
4609 if (ctx == NULL) {
4610 ctx = vfs_context_current();
4611 }
4612
4613 error = vn_close(vp, flags, ctx);
4614 vnode_put(vp);
4615 return (error);
4616 }
4617
4618 /*
4619 * Returns: 0 Success
4620 * vnode_getattr:???
4621 */
4622 errno_t
4623 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4624 {
4625 struct vnode_attr va;
4626 int error;
4627
4628 VATTR_INIT(&va);
4629 VATTR_WANTED(&va, va_data_size);
4630 error = vnode_getattr(vp, &va, ctx);
4631 if (!error)
4632 *sizep = va.va_data_size;
4633 return(error);
4634 }
4635
4636 errno_t
4637 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4638 {
4639 struct vnode_attr va;
4640
4641 VATTR_INIT(&va);
4642 VATTR_SET(&va, va_data_size, size);
4643 va.va_vaflags = ioflag & 0xffff;
4644 return(vnode_setattr(vp, &va, ctx));
4645 }
4646
4647 /*
4648 * Create a filesystem object of arbitrary type with arbitrary attributes in
4649 * the spevied directory with the specified name.
4650 *
4651 * Parameters: dvp Pointer to the vnode of the directory
4652 * in which to create the object.
4653 * vpp Pointer to the area into which to
4654 * return the vnode of the created object.
4655 * cnp Component name pointer from the namei
4656 * data structure, containing the name to
4657 * use for the create object.
4658 * vap Pointer to the vnode_attr structure
4659 * describing the object to be created,
4660 * including the type of object.
4661 * flags VN_* flags controlling ACL inheritance
4662 * and whether or not authorization is to
4663 * be required for the operation.
4664 *
4665 * Returns: 0 Success
4666 * !0 errno value
4667 *
4668 * Implicit: *vpp Contains the vnode of the object that
4669 * was created, if successful.
4670 * *cnp May be modified by the underlying VFS.
4671 * *vap May be modified by the underlying VFS.
4672 * modified by either ACL inheritance or
4673 *
4674 *
4675 * be modified, even if the operation is
4676 *
4677 *
4678 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4679 *
4680 * Modification of '*cnp' and '*vap' by the underlying VFS is
4681 * strongly discouraged.
4682 *
4683 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4684 *
4685 * XXX: We should enummerate the possible errno values here, and where
4686 * in the code they originated.
4687 */
4688 errno_t
4689 vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx)
4690 {
4691 kauth_acl_t oacl, nacl;
4692 int initial_acl;
4693 errno_t error;
4694 vnode_t vp = (vnode_t)0;
4695
4696 error = 0;
4697 oacl = nacl = NULL;
4698 initial_acl = 0;
4699
4700 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4701
4702 /*
4703 * Handle ACL inheritance.
4704 */
4705 if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
4706 /* save the original filesec */
4707 if (VATTR_IS_ACTIVE(vap, va_acl)) {
4708 initial_acl = 1;
4709 oacl = vap->va_acl;
4710 }
4711
4712 vap->va_acl = NULL;
4713 if ((error = kauth_acl_inherit(dvp,
4714 oacl,
4715 &nacl,
4716 vap->va_type == VDIR,
4717 ctx)) != 0) {
4718 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
4719 return(error);
4720 }
4721
4722 /*
4723 * If the generated ACL is NULL, then we can save ourselves some effort
4724 * by clearing the active bit.
4725 */
4726 if (nacl == NULL) {
4727 VATTR_CLEAR_ACTIVE(vap, va_acl);
4728 } else {
4729 VATTR_SET(vap, va_acl, nacl);
4730 }
4731 }
4732
4733 /*
4734 * Check and default new attributes.
4735 * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller
4736 * hasn't supplied them.
4737 */
4738 if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) {
4739 KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error);
4740 goto out;
4741 }
4742
4743
4744 /*
4745 * Create the requested node.
4746 */
4747 switch(vap->va_type) {
4748 case VREG:
4749 error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx);
4750 break;
4751 case VDIR:
4752 error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx);
4753 break;
4754 case VSOCK:
4755 case VFIFO:
4756 case VBLK:
4757 case VCHR:
4758 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4759 break;
4760 default:
4761 panic("vnode_create: unknown vtype %d", vap->va_type);
4762 }
4763 if (error != 0) {
4764 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4765 goto out;
4766 }
4767
4768 vp = *vpp;
4769 #if CONFIG_MACF
4770 if (!(flags & VN_CREATE_NOLABEL)) {
4771 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4772 if (error)
4773 goto error;
4774 }
4775 #endif
4776
4777 /*
4778 * If some of the requested attributes weren't handled by the VNOP,
4779 * use our fallback code.
4780 */
4781 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4782 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4783 error = vnode_setattr_fallback(*vpp, vap, ctx);
4784 }
4785 #if CONFIG_MACF
4786 error:
4787 #endif
4788 if ((error != 0 ) && (vp != (vnode_t)0)) {
4789 *vpp = (vnode_t) 0;
4790 vnode_put(vp);
4791 }
4792
4793 out:
4794 /*
4795 * If the caller supplied a filesec in vap, it has been replaced
4796 * now by the post-inheritance copy. We need to put the original back
4797 * and free the inherited product.
4798 */
4799 if (initial_acl) {
4800 VATTR_SET(vap, va_acl, oacl);
4801 } else {
4802 VATTR_CLEAR_ACTIVE(vap, va_acl);
4803 }
4804 if (nacl != NULL)
4805 kauth_acl_free(nacl);
4806
4807 return(error);
4808 }
4809
4810 static kauth_scope_t vnode_scope;
4811 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4812 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4813 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4814 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4815
4816 typedef struct _vnode_authorize_context {
4817 vnode_t vp;
4818 struct vnode_attr *vap;
4819 vnode_t dvp;
4820 struct vnode_attr *dvap;
4821 vfs_context_t ctx;
4822 int flags;
4823 int flags_valid;
4824 #define _VAC_IS_OWNER (1<<0)
4825 #define _VAC_IN_GROUP (1<<1)
4826 #define _VAC_IS_DIR_OWNER (1<<2)
4827 #define _VAC_IN_DIR_GROUP (1<<3)
4828 } *vauth_ctx;
4829
4830 void
4831 vnode_authorize_init(void)
4832 {
4833 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4834 }
4835
4836 /*
4837 * Authorize an operation on a vnode.
4838 *
4839 * This is KPI, but here because it needs vnode_scope.
4840 *
4841 * Returns: 0 Success
4842 * kauth_authorize_action:EPERM ...
4843 * xlate => EACCES Permission denied
4844 * kauth_authorize_action:0 Success
4845 * kauth_authorize_action: Depends on callback return; this is
4846 * usually only vnode_authorize_callback(),
4847 * but may include other listerners, if any
4848 * exist.
4849 * EROFS
4850 * EACCES
4851 * EPERM
4852 * ???
4853 */
4854 int
4855 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
4856 {
4857 int error, result;
4858
4859 /*
4860 * We can't authorize against a dead vnode; allow all operations through so that
4861 * the correct error can be returned.
4862 */
4863 if (vp->v_type == VBAD)
4864 return(0);
4865
4866 error = 0;
4867 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
4868 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
4869 if (result == EPERM) /* traditional behaviour */
4870 result = EACCES;
4871 /* did the lower layers give a better error return? */
4872 if ((result != 0) && (error != 0))
4873 return(error);
4874 return(result);
4875 }
4876
4877 /*
4878 * Test for vnode immutability.
4879 *
4880 * The 'append' flag is set when the authorization request is constrained
4881 * to operations which only request the right to append to a file.
4882 *
4883 * The 'ignore' flag is set when an operation modifying the immutability flags
4884 * is being authorized. We check the system securelevel to determine which
4885 * immutability flags we can ignore.
4886 */
4887 static int
4888 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
4889 {
4890 int mask;
4891
4892 /* start with all bits precluding the operation */
4893 mask = IMMUTABLE | APPEND;
4894
4895 /* if appending only, remove the append-only bits */
4896 if (append)
4897 mask &= ~APPEND;
4898
4899 /* ignore only set when authorizing flags changes */
4900 if (ignore) {
4901 if (securelevel <= 0) {
4902 /* in insecure state, flags do not inhibit changes */
4903 mask = 0;
4904 } else {
4905 /* in secure state, user flags don't inhibit */
4906 mask &= ~(UF_IMMUTABLE | UF_APPEND);
4907 }
4908 }
4909 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
4910 if ((vap->va_flags & mask) != 0)
4911 return(EPERM);
4912 return(0);
4913 }
4914
4915 static int
4916 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
4917 {
4918 int result;
4919
4920 /* default assumption is not-owner */
4921 result = 0;
4922
4923 /*
4924 * If the filesystem has given us a UID, we treat this as authoritative.
4925 */
4926 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
4927 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
4928 }
4929 /* we could test the owner UUID here if we had a policy for it */
4930
4931 return(result);
4932 }
4933
4934 /*
4935 * vauth_node_group
4936 *
4937 * Description: Ask if a cred is a member of the group owning the vnode object
4938 *
4939 * Parameters: vap vnode attribute
4940 * vap->va_gid group owner of vnode object
4941 * cred credential to check
4942 * ismember pointer to where to put the answer
4943 * idontknow Return this if we can't get an answer
4944 *
4945 * Returns: 0 Success
4946 * idontknow Can't get information
4947 * kauth_cred_ismember_gid:? Error from kauth subsystem
4948 * kauth_cred_ismember_gid:? Error from kauth subsystem
4949 */
4950 static int
4951 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
4952 {
4953 int error;
4954 int result;
4955
4956 error = 0;
4957 result = 0;
4958
4959 /*
4960 * The caller is expected to have asked the filesystem for a group
4961 * at some point prior to calling this function. The answer may
4962 * have been that there is no group ownership supported for the
4963 * vnode object, in which case we return
4964 */
4965 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
4966 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
4967 /*
4968 * Credentials which are opted into external group membership
4969 * resolution which are not known to the external resolver
4970 * will result in an ENOENT error. We translate this into
4971 * the appropriate 'idontknow' response for our caller.
4972 *
4973 * XXX We do not make a distinction here between an ENOENT
4974 * XXX arising from a response from the external resolver,
4975 * XXX and an ENOENT which is internally generated. This is
4976 * XXX a deficiency of the published kauth_cred_ismember_gid()
4977 * XXX KPI which can not be overcome without new KPI. For
4978 * XXX all currently known cases, however, this wil result
4979 * XXX in correct behaviour.
4980 */
4981 if (error == ENOENT)
4982 error = idontknow;
4983 }
4984 /*
4985 * XXX We could test the group UUID here if we had a policy for it,
4986 * XXX but this is problematic from the perspective of synchronizing
4987 * XXX group UUID and POSIX GID ownership of a file and keeping the
4988 * XXX values coherent over time. The problem is that the local
4989 * XXX system will vend transient group UUIDs for unknown POSIX GID
4990 * XXX values, and these are not persistent, whereas storage of values
4991 * XXX is persistent. One potential solution to this is a local
4992 * XXX (persistent) replica of remote directory entries and vended
4993 * XXX local ids in a local directory server (think in terms of a
4994 * XXX caching DNS server).
4995 */
4996
4997 if (!error)
4998 *ismember = result;
4999 return(error);
5000 }
5001
5002 static int
5003 vauth_file_owner(vauth_ctx vcp)
5004 {
5005 int result;
5006
5007 if (vcp->flags_valid & _VAC_IS_OWNER) {
5008 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
5009 } else {
5010 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
5011
5012 /* cache our result */
5013 vcp->flags_valid |= _VAC_IS_OWNER;
5014 if (result) {
5015 vcp->flags |= _VAC_IS_OWNER;
5016 } else {
5017 vcp->flags &= ~_VAC_IS_OWNER;
5018 }
5019 }
5020 return(result);
5021 }
5022
5023
5024 /*
5025 * vauth_file_ingroup
5026 *
5027 * Description: Ask if a user is a member of the group owning the directory
5028 *
5029 * Parameters: vcp The vnode authorization context that
5030 * contains the user and directory info
5031 * vcp->flags_valid Valid flags
5032 * vcp->flags Flags values
5033 * vcp->vap File vnode attributes
5034 * vcp->ctx VFS Context (for user)
5035 * ismember pointer to where to put the answer
5036 * idontknow Return this if we can't get an answer
5037 *
5038 * Returns: 0 Success
5039 * vauth_node_group:? Error from vauth_node_group()
5040 *
5041 * Implicit returns: *ismember 0 The user is not a group member
5042 * 1 The user is a group member
5043 */
5044 static int
5045 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5046 {
5047 int error;
5048
5049 /* Check for a cached answer first, to avoid the check if possible */
5050 if (vcp->flags_valid & _VAC_IN_GROUP) {
5051 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
5052 error = 0;
5053 } else {
5054 /* Otherwise, go look for it */
5055 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
5056
5057 if (!error) {
5058 /* cache our result */
5059 vcp->flags_valid |= _VAC_IN_GROUP;
5060 if (*ismember) {
5061 vcp->flags |= _VAC_IN_GROUP;
5062 } else {
5063 vcp->flags &= ~_VAC_IN_GROUP;
5064 }
5065 }
5066
5067 }
5068 return(error);
5069 }
5070
5071 static int
5072 vauth_dir_owner(vauth_ctx vcp)
5073 {
5074 int result;
5075
5076 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
5077 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
5078 } else {
5079 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
5080
5081 /* cache our result */
5082 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
5083 if (result) {
5084 vcp->flags |= _VAC_IS_DIR_OWNER;
5085 } else {
5086 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5087 }
5088 }
5089 return(result);
5090 }
5091
5092 /*
5093 * vauth_dir_ingroup
5094 *
5095 * Description: Ask if a user is a member of the group owning the directory
5096 *
5097 * Parameters: vcp The vnode authorization context that
5098 * contains the user and directory info
5099 * vcp->flags_valid Valid flags
5100 * vcp->flags Flags values
5101 * vcp->dvap Dir vnode attributes
5102 * vcp->ctx VFS Context (for user)
5103 * ismember pointer to where to put the answer
5104 * idontknow Return this if we can't get an answer
5105 *
5106 * Returns: 0 Success
5107 * vauth_node_group:? Error from vauth_node_group()
5108 *
5109 * Implicit returns: *ismember 0 The user is not a group member
5110 * 1 The user is a group member
5111 */
5112 static int
5113 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5114 {
5115 int error;
5116
5117 /* Check for a cached answer first, to avoid the check if possible */
5118 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5119 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5120 error = 0;
5121 } else {
5122 /* Otherwise, go look for it */
5123 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
5124
5125 if (!error) {
5126 /* cache our result */
5127 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5128 if (*ismember) {
5129 vcp->flags |= _VAC_IN_DIR_GROUP;
5130 } else {
5131 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5132 }
5133 }
5134 }
5135 return(error);
5136 }
5137
5138 /*
5139 * Test the posix permissions in (vap) to determine whether (credential)
5140 * may perform (action)
5141 */
5142 static int
5143 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5144 {
5145 struct vnode_attr *vap;
5146 int needed, error, owner_ok, group_ok, world_ok, ismember;
5147 #ifdef KAUTH_DEBUG_ENABLE
5148 const char *where = "uninitialized";
5149 # define _SETWHERE(c) where = c;
5150 #else
5151 # define _SETWHERE(c)
5152 #endif
5153
5154 /* checking file or directory? */
5155 if (on_dir) {
5156 vap = vcp->dvap;
5157 } else {
5158 vap = vcp->vap;
5159 }
5160
5161 error = 0;
5162
5163 /*
5164 * We want to do as little work here as possible. So first we check
5165 * which sets of permissions grant us the access we need, and avoid checking
5166 * whether specific permissions grant access when more generic ones would.
5167 */
5168
5169 /* owner permissions */
5170 needed = 0;
5171 if (action & VREAD)
5172 needed |= S_IRUSR;
5173 if (action & VWRITE)
5174 needed |= S_IWUSR;
5175 if (action & VEXEC)
5176 needed |= S_IXUSR;
5177 owner_ok = (needed & vap->va_mode) == needed;
5178
5179 /* group permissions */
5180 needed = 0;
5181 if (action & VREAD)
5182 needed |= S_IRGRP;
5183 if (action & VWRITE)
5184 needed |= S_IWGRP;
5185 if (action & VEXEC)
5186 needed |= S_IXGRP;
5187 group_ok = (needed & vap->va_mode) == needed;
5188
5189 /* world permissions */
5190 needed = 0;
5191 if (action & VREAD)
5192 needed |= S_IROTH;
5193 if (action & VWRITE)
5194 needed |= S_IWOTH;
5195 if (action & VEXEC)
5196 needed |= S_IXOTH;
5197 world_ok = (needed & vap->va_mode) == needed;
5198
5199 /* If granted/denied by all three, we're done */
5200 if (owner_ok && group_ok && world_ok) {
5201 _SETWHERE("all");
5202 goto out;
5203 }
5204 if (!owner_ok && !group_ok && !world_ok) {
5205 _SETWHERE("all");
5206 error = EACCES;
5207 goto out;
5208 }
5209
5210 /* Check ownership (relatively cheap) */
5211 if ((on_dir && vauth_dir_owner(vcp)) ||
5212 (!on_dir && vauth_file_owner(vcp))) {
5213 _SETWHERE("user");
5214 if (!owner_ok)
5215 error = EACCES;
5216 goto out;
5217 }
5218
5219 /* Not owner; if group and world both grant it we're done */
5220 if (group_ok && world_ok) {
5221 _SETWHERE("group/world");
5222 goto out;
5223 }
5224 if (!group_ok && !world_ok) {
5225 _SETWHERE("group/world");
5226 error = EACCES;
5227 goto out;
5228 }
5229
5230 /* Check group membership (most expensive) */
5231 ismember = 0; /* Default to allow, if the target has no group owner */
5232
5233 /*
5234 * In the case we can't get an answer about the user from the call to
5235 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
5236 * the side of caution, rather than simply granting access, or we will
5237 * fail to correctly implement exclusion groups, so we set the third
5238 * parameter on the basis of the state of 'group_ok'.
5239 */
5240 if (on_dir) {
5241 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5242 } else {
5243 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5244 }
5245 if (error)
5246 goto out;
5247 if (ismember) {
5248 _SETWHERE("group");
5249 if (!group_ok)
5250 error = EACCES;
5251 goto out;
5252 }
5253
5254 /* Not owner, not in group, use world result */
5255 _SETWHERE("world");
5256 if (!world_ok)
5257 error = EACCES;
5258
5259 /* FALLTHROUGH */
5260
5261 out:
5262 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5263 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5264 (action & VREAD) ? "r" : "-",
5265 (action & VWRITE) ? "w" : "-",
5266 (action & VEXEC) ? "x" : "-",
5267 needed,
5268 (vap->va_mode & S_IRUSR) ? "r" : "-",
5269 (vap->va_mode & S_IWUSR) ? "w" : "-",
5270 (vap->va_mode & S_IXUSR) ? "x" : "-",
5271 (vap->va_mode & S_IRGRP) ? "r" : "-",
5272 (vap->va_mode & S_IWGRP) ? "w" : "-",
5273 (vap->va_mode & S_IXGRP) ? "x" : "-",
5274 (vap->va_mode & S_IROTH) ? "r" : "-",
5275 (vap->va_mode & S_IWOTH) ? "w" : "-",
5276 (vap->va_mode & S_IXOTH) ? "x" : "-",
5277 kauth_cred_getuid(vcp->ctx->vc_ucred),
5278 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5279 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5280 return(error);
5281 }
5282
5283 /*
5284 * Authorize the deletion of the node vp from the directory dvp.
5285 *
5286 * We assume that:
5287 * - Neither the node nor the directory are immutable.
5288 * - The user is not the superuser.
5289 *
5290 * Deletion is not permitted if the directory is sticky and the caller is
5291 * not owner of the node or directory.
5292 *
5293 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5294 * the node may be deleted. If neither denies the permission, and the
5295 * caller has Posix write access to the directory, then the node may be
5296 * deleted.
5297 *
5298 * As an optimization, we cache whether or not delete child is permitted
5299 * on directories without the sticky bit set.
5300 */
5301 int
5302 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5303 /*static*/ int
5304 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5305 {
5306 struct vnode_attr *vap = vcp->vap;
5307 struct vnode_attr *dvap = vcp->dvap;
5308 kauth_cred_t cred = vcp->ctx->vc_ucred;
5309 struct kauth_acl_eval eval;
5310 int error, delete_denied, delete_child_denied, ismember;
5311
5312 /* check the ACL on the directory */
5313 delete_child_denied = 0;
5314 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5315 errno_t posix_error;
5316
5317 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5318 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5319 eval.ae_count = dvap->va_acl->acl_entrycount;
5320 eval.ae_options = 0;
5321 if (vauth_dir_owner(vcp))
5322 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5323 /*
5324 * We use ENOENT as a marker to indicate we could not get
5325 * information in order to delay evaluation until after we
5326 * have the ACL evaluation answer. Previously, we would
5327 * always deny the operation at this point.
5328 */
5329 if ((posix_error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5330 return(posix_error);
5331 if (ismember)
5332 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5333 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5334 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5335 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5336 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5337
5338 /*
5339 * If there is no entry, we are going to defer to other
5340 * authorization mechanisms.
5341 */
5342 error = kauth_acl_evaluate(cred, &eval);
5343
5344 if (error != 0) {
5345 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5346 return(error);
5347 }
5348 switch(eval.ae_result) {
5349 case KAUTH_RESULT_DENY:
5350 delete_child_denied = 1;
5351 break;
5352 case KAUTH_RESULT_ALLOW:
5353 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5354 return(0);
5355 case KAUTH_RESULT_DEFER:
5356 /*
5357 * If we don't have a POSIX answer of "yes", and we
5358 * can't get an ACL answer, then we deny it now.
5359 */
5360 if (posix_error == ENOENT) {
5361 delete_child_denied = 1;
5362 break;
5363 }
5364 default:
5365 /* Effectively the same as !delete_child_denied */
5366 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5367 break;
5368 }
5369 }
5370
5371 /* check the ACL on the node */
5372 delete_denied = 0;
5373 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5374 errno_t posix_error;
5375
5376 eval.ae_requested = KAUTH_VNODE_DELETE;
5377 eval.ae_acl = &vap->va_acl->acl_ace[0];
5378 eval.ae_count = vap->va_acl->acl_entrycount;
5379 eval.ae_options = 0;
5380 if (vauth_file_owner(vcp))
5381 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5382 /*
5383 * We use ENOENT as a marker to indicate we could not get
5384 * information in order to delay evaluation until after we
5385 * have the ACL evaluation answer. Previously, we would
5386 * always deny the operation at this point.
5387 */
5388 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5389 return(posix_error);
5390 if (ismember)
5391 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5392 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5393 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5394 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5395 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5396
5397 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5398 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5399 return(error);
5400 }
5401
5402 switch(eval.ae_result) {
5403 case KAUTH_RESULT_DENY:
5404 delete_denied = 1;
5405 break;
5406 case KAUTH_RESULT_ALLOW:
5407 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5408 return(0);
5409 case KAUTH_RESULT_DEFER:
5410 /*
5411 * If we don't have a POSIX answer of "yes", and we
5412 * can't get an ACL answer, then we deny it now.
5413 */
5414 if (posix_error == ENOENT) {
5415 delete_denied = 1;
5416 }
5417 default:
5418 /* Effectively the same as !delete_child_denied */
5419 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
5420 break;
5421 }
5422 }
5423
5424 /* if denied by ACL on directory or node, return denial */
5425 if (delete_denied || delete_child_denied) {
5426 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
5427 return(EACCES);
5428 }
5429
5430 /*
5431 * enforce sticky bit behaviour; the cached_delete_child property will
5432 * be false and the dvap contents valis for sticky bit directories;
5433 * this makes us check the directory each time, but it's unavoidable,
5434 * as sticky bit is an exception to caching.
5435 */
5436 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
5437 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
5438 vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
5439 return(EACCES);
5440 }
5441
5442 /* check the directory */
5443 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
5444 KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
5445 return(error);
5446 }
5447
5448 /* not denied, must be OK */
5449 return(0);
5450 }
5451
5452
5453 /*
5454 * Authorize an operation based on the node's attributes.
5455 */
5456 static int
5457 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
5458 {
5459 struct vnode_attr *vap = vcp->vap;
5460 kauth_cred_t cred = vcp->ctx->vc_ucred;
5461 struct kauth_acl_eval eval;
5462 int error, ismember;
5463 mode_t posix_action;
5464
5465 /*
5466 * If we are the file owner, we automatically have some rights.
5467 *
5468 * Do we need to expand this to support group ownership?
5469 */
5470 if (vauth_file_owner(vcp))
5471 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
5472
5473 /*
5474 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
5475 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
5476 * change ownership to themselves, and WRITE_SECURITY is implicitly
5477 * granted to the owner. We need to do this because at this point
5478 * WRITE_SECURITY may not be granted as the caller is not currently
5479 * the owner.
5480 */
5481 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
5482 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
5483 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
5484
5485 if (acl_rights == 0) {
5486 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
5487 return(0);
5488 }
5489
5490 /* if we have an ACL, evaluate it */
5491 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5492 errno_t posix_error;
5493
5494 eval.ae_requested = acl_rights;
5495 eval.ae_acl = &vap->va_acl->acl_ace[0];
5496 eval.ae_count = vap->va_acl->acl_entrycount;
5497 eval.ae_options = 0;
5498 if (vauth_file_owner(vcp))
5499 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5500 /*
5501 * We use ENOENT as a marker to indicate we could not get
5502 * information in order to delay evaluation until after we
5503 * have the ACL evaluation answer. Previously, we would
5504 * always deny the operation at this point.
5505 */
5506 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5507 return(posix_error);
5508 if (ismember)
5509 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5510 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5511 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5512 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5513 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5514
5515 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5516 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5517 return(error);
5518 }
5519
5520 switch(eval.ae_result) {
5521 case KAUTH_RESULT_DENY:
5522 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
5523 return(EACCES); /* deny, deny, counter-allege */
5524 case KAUTH_RESULT_ALLOW:
5525 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
5526 return(0);
5527 case KAUTH_RESULT_DEFER:
5528 /*
5529 * If we don't have a POSIX answer of "yes", and we
5530 * can't get an ACL answer, then we deny it now.
5531 */
5532 if (posix_error == ENOENT) {
5533 KAUTH_DEBUG("%p DENIED(DEFERRED) - by ACL", vcp->vp);
5534 return(EACCES); /* deny, deny, counter-allege */
5535 }
5536 default:
5537 /* Effectively the same as !delete_child_denied */
5538 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5539 break;
5540 }
5541
5542 *found_deny = eval.ae_found_deny;
5543
5544 /* fall through and evaluate residual rights */
5545 } else {
5546 /* no ACL, everything is residual */
5547 eval.ae_residual = acl_rights;
5548 }
5549
5550 /*
5551 * Grant residual rights that have been pre-authorized.
5552 */
5553 eval.ae_residual &= ~preauth_rights;
5554
5555 /*
5556 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
5557 */
5558 if (vauth_file_owner(vcp))
5559 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
5560
5561 if (eval.ae_residual == 0) {
5562 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
5563 return(0);
5564 }
5565
5566 /*
5567 * Bail if we have residual rights that can't be granted by posix permissions,
5568 * or aren't presumed granted at this point.
5569 *
5570 * XXX these can be collapsed for performance
5571 */
5572 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
5573 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
5574 return(EACCES);
5575 }
5576 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
5577 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
5578 return(EACCES);
5579 }
5580
5581 #if DIAGNOSTIC
5582 if (eval.ae_residual & KAUTH_VNODE_DELETE)
5583 panic("vnode_authorize: can't be checking delete permission here");
5584 #endif
5585
5586 /*
5587 * Compute the fallback posix permissions that will satisfy the remaining
5588 * rights.
5589 */
5590 posix_action = 0;
5591 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
5592 KAUTH_VNODE_LIST_DIRECTORY |
5593 KAUTH_VNODE_READ_EXTATTRIBUTES))
5594 posix_action |= VREAD;
5595 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
5596 KAUTH_VNODE_ADD_FILE |
5597 KAUTH_VNODE_ADD_SUBDIRECTORY |
5598 KAUTH_VNODE_DELETE_CHILD |
5599 KAUTH_VNODE_WRITE_ATTRIBUTES |
5600 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
5601 posix_action |= VWRITE;
5602 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
5603 KAUTH_VNODE_SEARCH))
5604 posix_action |= VEXEC;
5605
5606 if (posix_action != 0) {
5607 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
5608 } else {
5609 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
5610 vcp->vp,
5611 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
5612 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5613 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
5614 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5615 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
5616 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
5617 (eval.ae_residual & KAUTH_VNODE_DELETE)
5618 ? " DELETE" : "",
5619 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
5620 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5621 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
5622 ? " DELETE_CHILD" : "",
5623 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
5624 ? " READ_ATTRIBUTES" : "",
5625 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
5626 ? " WRITE_ATTRIBUTES" : "",
5627 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
5628 ? " READ_EXTATTRIBUTES" : "",
5629 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
5630 ? " WRITE_EXTATTRIBUTES" : "",
5631 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
5632 ? " READ_SECURITY" : "",
5633 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
5634 ? " WRITE_SECURITY" : "",
5635 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
5636 ? " CHECKIMMUTABLE" : "",
5637 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
5638 ? " CHANGE_OWNER" : "");
5639 }
5640
5641 /*
5642 * Lack of required Posix permissions implies no reason to deny access.
5643 */
5644 return(0);
5645 }
5646
5647 /*
5648 * Check for file immutability.
5649 */
5650 static int
5651 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
5652 {
5653 mount_t mp;
5654 int error;
5655 int append;
5656
5657 /*
5658 * Perform immutability checks for operations that change data.
5659 *
5660 * Sockets, fifos and devices require special handling.
5661 */
5662 switch(vp->v_type) {
5663 case VSOCK:
5664 case VFIFO:
5665 case VBLK:
5666 case VCHR:
5667 /*
5668 * Writing to these nodes does not change the filesystem data,
5669 * so forget that it's being tried.
5670 */
5671 rights &= ~KAUTH_VNODE_WRITE_DATA;
5672 break;
5673 default:
5674 break;
5675 }
5676
5677 error = 0;
5678 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
5679
5680 /* check per-filesystem options if possible */
5681 mp = vp->v_mount;
5682 if (mp != NULL) {
5683
5684 /* check for no-EA filesystems */
5685 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
5686 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
5687 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
5688 error = EACCES; /* User attributes disabled */
5689 goto out;
5690 }
5691 }
5692
5693 /*
5694 * check for file immutability. first, check if the requested rights are
5695 * allowable for a UF_APPEND file.
5696 */
5697 append = 0;
5698 if (vp->v_type == VDIR) {
5699 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5700 append = 1;
5701 } else {
5702 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5703 append = 1;
5704 }
5705 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
5706 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
5707 goto out;
5708 }
5709 }
5710 out:
5711 return(error);
5712 }
5713
5714 /*
5715 * Handle authorization actions for filesystems that advertise that the
5716 * server will be enforcing.
5717 *
5718 * Returns: 0 Authorization should be handled locally
5719 * 1 Authorization was handled by the FS
5720 *
5721 * Note: Imputed returns will only occur if the authorization request
5722 * was handled by the FS.
5723 *
5724 * Imputed: *resultp, modified Return code from FS when the request is
5725 * handled by the FS.
5726 * VNOP_ACCESS:???
5727 * VNOP_OPEN:???
5728 */
5729 static int
5730 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
5731 {
5732 int error;
5733
5734 /*
5735 * If the vp is a device node, socket or FIFO it actually represents a local
5736 * endpoint, so we need to handle it locally.
5737 */
5738 switch(vp->v_type) {
5739 case VBLK:
5740 case VCHR:
5741 case VSOCK:
5742 case VFIFO:
5743 return(0);
5744 default:
5745 break;
5746 }
5747
5748 /*
5749 * In the advisory request case, if the filesystem doesn't think it's reliable
5750 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
5751 */
5752 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
5753 return(0);
5754
5755 /*
5756 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
5757 * VNOP_ACCESS, as most will authorise inline with the actual request.
5758 */
5759 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
5760 *resultp = error;
5761 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
5762 return(1);
5763 }
5764
5765 /*
5766 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
5767 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
5768 */
5769 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
5770 /* try a VNOP_OPEN for readonly access */
5771 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
5772 *resultp = error;
5773 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
5774 return(1);
5775 }
5776 VNOP_CLOSE(vp, FREAD, ctx);
5777 }
5778
5779 /*
5780 * We don't have any reason to believe that the request has to be denied at this point,
5781 * so go ahead and allow it.
5782 */
5783 *resultp = 0;
5784 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
5785 return(1);
5786 }
5787
5788
5789
5790
5791 /*
5792 * Returns: KAUTH_RESULT_ALLOW
5793 * KAUTH_RESULT_DENY
5794 *
5795 * Imputed: *arg3, modified Error code in the deny case
5796 * EROFS Read-only file system
5797 * EACCES Permission denied
5798 * EPERM Operation not permitted [no execute]
5799 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
5800 * vnode_getattr:???
5801 * vnode_authorize_opaque:*arg2 ???
5802 * vnode_authorize_checkimmutable:???
5803 * vnode_authorize_delete:???
5804 * vnode_authorize_simple:???
5805 */
5806
5807
5808 static int
5809 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
5810 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5811 {
5812 vfs_context_t ctx;
5813 vnode_t cvp = NULLVP;
5814 vnode_t vp, dvp;
5815 int result = KAUTH_RESULT_DENY;
5816 int parent_iocount = 0;
5817 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
5818
5819 ctx = (vfs_context_t)arg0;
5820 vp = (vnode_t)arg1;
5821 dvp = (vnode_t)arg2;
5822
5823 /*
5824 * if there are 2 vnodes passed in, we don't know at
5825 * this point which rights to look at based on the
5826 * combined action being passed in... defer until later...
5827 * otherwise check the kauth 'rights' cache hung
5828 * off of the vnode we're interested in... if we've already
5829 * been granted the right we're currently interested in,
5830 * we can just return success... otherwise we'll go through
5831 * the process of authorizing the requested right(s)... if that
5832 * succeeds, we'll add the right(s) to the cache.
5833 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
5834 */
5835 if (dvp && vp)
5836 goto defer;
5837 if (dvp) {
5838 cvp = dvp;
5839 } else {
5840 /*
5841 * For named streams on local-authorization volumes, rights are cached on the parent;
5842 * authorization is determined by looking at the parent's properties anyway, so storing
5843 * on the parent means that we don't recompute for the named stream and that if
5844 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
5845 * stream to flush its cache separately. If we miss in the cache, then we authorize
5846 * as if there were no cached rights (passing the named stream vnode and desired rights to
5847 * vnode_authorize_callback_int()).
5848 *
5849 * On an opaquely authorized volume, we don't know the relationship between the
5850 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
5851 * on such a volume are authorized directly (rather than using the parent) and have their
5852 * own caches. When a named stream vnode is created, we mark the parent as having a named
5853 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
5854 * find the stream and flush its cache.
5855 */
5856 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
5857 cvp = vp->v_parent;
5858 if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
5859 parent_iocount = 1;
5860 } else {
5861 cvp = NULL;
5862 goto defer; /* If we can't use the parent, take the slow path */
5863 }
5864
5865 /* Have to translate some actions */
5866 parent_action = action;
5867 if (parent_action & KAUTH_VNODE_READ_DATA) {
5868 parent_action &= ~KAUTH_VNODE_READ_DATA;
5869 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5870 }
5871 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
5872 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
5873 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5874 }
5875
5876 } else {
5877 cvp = vp;
5878 }
5879 }
5880
5881 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
5882 result = KAUTH_RESULT_ALLOW;
5883 goto out;
5884 }
5885 defer:
5886 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
5887
5888 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
5889 vnode_cache_authorized_action(cvp, ctx, action);
5890
5891 out:
5892 if (parent_iocount) {
5893 vnode_put(cvp);
5894 }
5895
5896 return result;
5897 }
5898
5899
5900 static int
5901 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
5902 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5903 {
5904 struct _vnode_authorize_context auth_context;
5905 vauth_ctx vcp;
5906 vfs_context_t ctx;
5907 vnode_t vp, dvp;
5908 kauth_cred_t cred;
5909 kauth_ace_rights_t rights;
5910 struct vnode_attr va, dva;
5911 int result;
5912 int *errorp;
5913 int noimmutable;
5914 boolean_t parent_authorized_for_delete_child = FALSE;
5915 boolean_t found_deny = FALSE;
5916 boolean_t parent_ref= FALSE;
5917
5918 vcp = &auth_context;
5919 ctx = vcp->ctx = (vfs_context_t)arg0;
5920 vp = vcp->vp = (vnode_t)arg1;
5921 dvp = vcp->dvp = (vnode_t)arg2;
5922 errorp = (int *)arg3;
5923 /*
5924 * Note that we authorize against the context, not the passed cred
5925 * (the same thing anyway)
5926 */
5927 cred = ctx->vc_ucred;
5928
5929 VATTR_INIT(&va);
5930 vcp->vap = &va;
5931 VATTR_INIT(&dva);
5932 vcp->dvap = &dva;
5933
5934 vcp->flags = vcp->flags_valid = 0;
5935
5936 #if DIAGNOSTIC
5937 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
5938 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
5939 #endif
5940
5941 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
5942 vp, vfs_context_proc(ctx)->p_comm,
5943 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
5944 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5945 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5946 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
5947 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
5948 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5949 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
5950 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
5951 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
5952 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
5953 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
5954 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
5955 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
5956 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
5957 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
5958 vnode_isdir(vp) ? "directory" : "file",
5959 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
5960
5961 /*
5962 * Extract the control bits from the action, everything else is
5963 * requested rights.
5964 */
5965 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
5966 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
5967
5968 if (rights & KAUTH_VNODE_DELETE) {
5969 #if DIAGNOSTIC
5970 if (dvp == NULL)
5971 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
5972 #endif
5973 /*
5974 * check to see if we've already authorized the parent
5975 * directory for deletion of its children... if so, we
5976 * can skip a whole bunch of work... we will still have to
5977 * authorize that this specific child can be removed
5978 */
5979 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
5980 parent_authorized_for_delete_child = TRUE;
5981 } else {
5982 dvp = NULL;
5983 }
5984
5985 /*
5986 * Check for read-only filesystems.
5987 */
5988 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
5989 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
5990 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
5991 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
5992 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
5993 result = EROFS;
5994 goto out;
5995 }
5996
5997 /*
5998 * Check for noexec filesystems.
5999 */
6000 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
6001 result = EACCES;
6002 goto out;
6003 }
6004
6005 /*
6006 * Handle cases related to filesystems with non-local enforcement.
6007 * This call can return 0, in which case we will fall through to perform a
6008 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
6009 * an appropriate result, at which point we can return immediately.
6010 */
6011 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
6012 goto out;
6013
6014 /*
6015 * Get vnode attributes and extended security information for the vnode
6016 * and directory if required.
6017 */
6018 VATTR_WANTED(&va, va_mode);
6019 VATTR_WANTED(&va, va_uid);
6020 VATTR_WANTED(&va, va_gid);
6021 VATTR_WANTED(&va, va_flags);
6022 VATTR_WANTED(&va, va_acl);
6023 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
6024 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
6025 goto out;
6026 }
6027 if (dvp && parent_authorized_for_delete_child == FALSE) {
6028 VATTR_WANTED(&dva, va_mode);
6029 VATTR_WANTED(&dva, va_uid);
6030 VATTR_WANTED(&dva, va_gid);
6031 VATTR_WANTED(&dva, va_flags);
6032 VATTR_WANTED(&dva, va_acl);
6033 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
6034 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
6035 goto out;
6036 }
6037 }
6038
6039 /*
6040 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
6041 * *_EXTATTRIBUTES.
6042 */
6043 if (vnode_isnamedstream(vp)) {
6044 if (rights & KAUTH_VNODE_READ_DATA) {
6045 rights &= ~KAUTH_VNODE_READ_DATA;
6046 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
6047 }
6048 if (rights & KAUTH_VNODE_WRITE_DATA) {
6049 rights &= ~KAUTH_VNODE_WRITE_DATA;
6050 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
6051 }
6052 }
6053
6054 /*
6055 * Point 'vp' to the resource fork's parent for ACL checking
6056 */
6057 if (vnode_isnamedstream(vp) &&
6058 (vp->v_parent != NULL) &&
6059 (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) {
6060 parent_ref = TRUE;
6061 vcp->vp = vp = vp->v_parent;
6062 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6063 kauth_acl_free(va.va_acl);
6064 VATTR_INIT(&va);
6065 VATTR_WANTED(&va, va_mode);
6066 VATTR_WANTED(&va, va_uid);
6067 VATTR_WANTED(&va, va_gid);
6068 VATTR_WANTED(&va, va_flags);
6069 VATTR_WANTED(&va, va_acl);
6070 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
6071 goto out;
6072 }
6073
6074 /*
6075 * Check for immutability.
6076 *
6077 * In the deletion case, parent directory immutability vetoes specific
6078 * file rights.
6079 */
6080 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
6081 goto out;
6082 if ((rights & KAUTH_VNODE_DELETE) &&
6083 parent_authorized_for_delete_child == FALSE &&
6084 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
6085 goto out;
6086
6087 /*
6088 * Clear rights that have been authorized by reaching this point, bail if nothing left to
6089 * check.
6090 */
6091 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
6092 if (rights == 0)
6093 goto out;
6094
6095 /*
6096 * If we're not the superuser, authorize based on file properties;
6097 * note that even if parent_authorized_for_delete_child is TRUE, we
6098 * need to check on the node itself.
6099 */
6100 if (!vfs_context_issuser(ctx)) {
6101 /* process delete rights */
6102 if ((rights & KAUTH_VNODE_DELETE) &&
6103 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
6104 goto out;
6105
6106 /* process remaining rights */
6107 if ((rights & ~KAUTH_VNODE_DELETE) &&
6108 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
6109 goto out;
6110 } else {
6111
6112 /*
6113 * Execute is only granted to root if one of the x bits is set. This check only
6114 * makes sense if the posix mode bits are actually supported.
6115 */
6116 if ((rights & KAUTH_VNODE_EXECUTE) &&
6117 (vp->v_type == VREG) &&
6118 VATTR_IS_SUPPORTED(&va, va_mode) &&
6119 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
6120 result = EPERM;
6121 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
6122 goto out;
6123 }
6124
6125 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
6126 }
6127 out:
6128 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6129 kauth_acl_free(va.va_acl);
6130 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
6131 kauth_acl_free(dva.va_acl);
6132
6133 if (result) {
6134 if (parent_ref)
6135 vnode_put(vp);
6136 *errorp = result;
6137 KAUTH_DEBUG("%p DENIED - auth denied", vp);
6138 return(KAUTH_RESULT_DENY);
6139 }
6140 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
6141 /*
6142 * if we were successfully granted the right to search this directory
6143 * and there were NO ACL DENYs for search and the posix permissions also don't
6144 * deny execute, we can synthesize a global right that allows anyone to
6145 * traverse this directory during a pathname lookup without having to
6146 * match the credential associated with this cache of rights.
6147 */
6148 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
6149 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
6150 (S_IXUSR | S_IXGRP | S_IXOTH))) {
6151 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
6152 }
6153 }
6154 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
6155 /*
6156 * parent was successfully and newly authorized for content deletions
6157 * add it to the cache, but only if it doesn't have the sticky
6158 * bit set on it. This same check is done earlier guarding
6159 * fetching of dva, and if we jumped to out without having done
6160 * this, we will have returned already because of a non-zero
6161 * 'result' value.
6162 */
6163 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
6164 !(dva.va_mode & (S_ISVTX))) {
6165 /* OK to cache delete rights */
6166 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
6167 }
6168 }
6169 if (parent_ref)
6170 vnode_put(vp);
6171 /*
6172 * Note that this implies that we will allow requests for no rights, as well as
6173 * for rights that we do not recognise. There should be none of these.
6174 */
6175 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
6176 return(KAUTH_RESULT_ALLOW);
6177 }
6178
6179 /*
6180 * Check that the attribute information in vattr can be legally applied to
6181 * a new file by the context.
6182 */
6183 int
6184 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
6185 {
6186 int error;
6187 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6188 kauth_cred_t cred;
6189 guid_t changer;
6190 mount_t dmp;
6191
6192 error = 0;
6193 defaulted_owner = defaulted_group = defaulted_mode = 0;
6194
6195 /*
6196 * Require that the filesystem support extended security to apply any.
6197 */
6198 if (!vfs_extendedsecurity(dvp->v_mount) &&
6199 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6200 error = EINVAL;
6201 goto out;
6202 }
6203
6204 /*
6205 * Default some fields.
6206 */
6207 dmp = dvp->v_mount;
6208
6209 /*
6210 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6211 * owner takes ownership of all new files.
6212 */
6213 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6214 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6215 defaulted_owner = 1;
6216 } else {
6217 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6218 /* default owner is current user */
6219 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6220 defaulted_owner = 1;
6221 }
6222 }
6223
6224 /*
6225 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6226 * group takes ownership of all new files.
6227 */
6228 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6229 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6230 defaulted_group = 1;
6231 } else {
6232 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6233 /* default group comes from parent object, fallback to current user */
6234 struct vnode_attr dva;
6235 VATTR_INIT(&dva);
6236 VATTR_WANTED(&dva, va_gid);
6237 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6238 goto out;
6239 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6240 VATTR_SET(vap, va_gid, dva.va_gid);
6241 } else {
6242 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6243 }
6244 defaulted_group = 1;
6245 }
6246 }
6247
6248 if (!VATTR_IS_ACTIVE(vap, va_flags))
6249 VATTR_SET(vap, va_flags, 0);
6250
6251 /* default mode is everything, masked with current umask */
6252 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6253 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6254 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6255 defaulted_mode = 1;
6256 }
6257 /* set timestamps to now */
6258 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6259 nanotime(&vap->va_create_time);
6260 VATTR_SET_ACTIVE(vap, va_create_time);
6261 }
6262
6263 /*
6264 * Check for attempts to set nonsensical fields.
6265 */
6266 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6267 error = EINVAL;
6268 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6269 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6270 goto out;
6271 }
6272
6273 /*
6274 * Quickly check for the applicability of any enforcement here.
6275 * Tests below maintain the integrity of the local security model.
6276 */
6277 if (vfs_authopaque(dvp->v_mount))
6278 goto out;
6279
6280 /*
6281 * We need to know if the caller is the superuser, or if the work is
6282 * otherwise already authorised.
6283 */
6284 cred = vfs_context_ucred(ctx);
6285 if (noauth) {
6286 /* doing work for the kernel */
6287 has_priv_suser = 1;
6288 } else {
6289 has_priv_suser = vfs_context_issuser(ctx);
6290 }
6291
6292
6293 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6294 if (has_priv_suser) {
6295 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6296 error = EPERM;
6297 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6298 goto out;
6299 }
6300 } else {
6301 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6302 error = EPERM;
6303 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6304 goto out;
6305 }
6306 }
6307 }
6308
6309 /* if not superuser, validate legality of new-item attributes */
6310 if (!has_priv_suser) {
6311 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6312 /* setgid? */
6313 if (vap->va_mode & S_ISGID) {
6314 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6315 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6316 goto out;
6317 }
6318 if (!ismember) {
6319 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6320 error = EPERM;
6321 goto out;
6322 }
6323 }
6324
6325 /* setuid? */
6326 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6327 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6328 error = EPERM;
6329 goto out;
6330 }
6331 }
6332 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6333 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6334 error = EPERM;
6335 goto out;
6336 }
6337 if (!defaulted_group) {
6338 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6339 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6340 goto out;
6341 }
6342 if (!ismember) {
6343 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6344 error = EPERM;
6345 goto out;
6346 }
6347 }
6348
6349 /* initialising owner/group UUID */
6350 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6351 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6352 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6353 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6354 goto out;
6355 }
6356 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6357 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6358 error = EPERM;
6359 goto out;
6360 }
6361 }
6362 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6363 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6364 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6365 goto out;
6366 }
6367 if (!ismember) {
6368 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6369 error = EPERM;
6370 goto out;
6371 }
6372 }
6373 }
6374 out:
6375 return(error);
6376 }
6377
6378 /*
6379 * Check that the attribute information in vap can be legally written by the
6380 * context.
6381 *
6382 * Call this when you're not sure about the vnode_attr; either its contents
6383 * have come from an unknown source, or when they are variable.
6384 *
6385 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6386 * must be authorized to be permitted to write the vattr.
6387 */
6388 int
6389 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6390 {
6391 struct vnode_attr ova;
6392 kauth_action_t required_action;
6393 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6394 guid_t changer;
6395 gid_t group;
6396 uid_t owner;
6397 mode_t newmode;
6398 kauth_cred_t cred;
6399 uint32_t fdelta;
6400
6401 VATTR_INIT(&ova);
6402 required_action = 0;
6403 error = 0;
6404
6405 /*
6406 * Quickly check for enforcement applicability.
6407 */
6408 if (vfs_authopaque(vp->v_mount))
6409 goto out;
6410
6411 /*
6412 * Check for attempts to set nonsensical fields.
6413 */
6414 if (vap->va_active & VNODE_ATTR_RDONLY) {
6415 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
6416 error = EINVAL;
6417 goto out;
6418 }
6419
6420 /*
6421 * We need to know if the caller is the superuser.
6422 */
6423 cred = vfs_context_ucred(ctx);
6424 has_priv_suser = kauth_cred_issuser(cred);
6425
6426 /*
6427 * If any of the following are changing, we need information from the old file:
6428 * va_uid
6429 * va_gid
6430 * va_mode
6431 * va_uuuid
6432 * va_guuid
6433 */
6434 if (VATTR_IS_ACTIVE(vap, va_uid) ||
6435 VATTR_IS_ACTIVE(vap, va_gid) ||
6436 VATTR_IS_ACTIVE(vap, va_mode) ||
6437 VATTR_IS_ACTIVE(vap, va_uuuid) ||
6438 VATTR_IS_ACTIVE(vap, va_guuid)) {
6439 VATTR_WANTED(&ova, va_mode);
6440 VATTR_WANTED(&ova, va_uid);
6441 VATTR_WANTED(&ova, va_gid);
6442 VATTR_WANTED(&ova, va_uuuid);
6443 VATTR_WANTED(&ova, va_guuid);
6444 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
6445 }
6446
6447 /*
6448 * If timestamps are being changed, we need to know who the file is owned
6449 * by.
6450 */
6451 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6452 VATTR_IS_ACTIVE(vap, va_change_time) ||
6453 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6454 VATTR_IS_ACTIVE(vap, va_access_time) ||
6455 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6456
6457 VATTR_WANTED(&ova, va_uid);
6458 #if 0 /* enable this when we support UUIDs as official owners */
6459 VATTR_WANTED(&ova, va_uuuid);
6460 #endif
6461 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
6462 }
6463
6464 /*
6465 * If flags are being changed, we need the old flags.
6466 */
6467 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6468 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
6469 VATTR_WANTED(&ova, va_flags);
6470 }
6471
6472 /*
6473 * If the size is being set, make sure it's not a directory.
6474 */
6475 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6476 /* size is meaningless on a directory, don't permit this */
6477 if (vnode_isdir(vp)) {
6478 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
6479 error = EISDIR;
6480 goto out;
6481 }
6482 }
6483
6484 /*
6485 * Get old data.
6486 */
6487 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
6488 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
6489 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
6490 goto out;
6491 }
6492
6493 /*
6494 * Size changes require write access to the file data.
6495 */
6496 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6497 /* if we can't get the size, or it's different, we need write access */
6498 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
6499 required_action |= KAUTH_VNODE_WRITE_DATA;
6500 }
6501
6502 /*
6503 * Changing timestamps?
6504 *
6505 * Note that we are only called to authorize user-requested time changes;
6506 * side-effect time changes are not authorized. Authorisation is only
6507 * required for existing files.
6508 *
6509 * Non-owners are not permitted to change the time on an existing
6510 * file to anything other than the current time.
6511 */
6512 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6513 VATTR_IS_ACTIVE(vap, va_change_time) ||
6514 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6515 VATTR_IS_ACTIVE(vap, va_access_time) ||
6516 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6517 /*
6518 * The owner and root may set any timestamps they like,
6519 * provided that the file is not immutable. The owner still needs
6520 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
6521 */
6522 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
6523 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
6524 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
6525 } else {
6526 /* just setting the current time? */
6527 if (vap->va_vaflags & VA_UTIMES_NULL) {
6528 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
6529 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6530 } else {
6531 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
6532 error = EACCES;
6533 goto out;
6534 }
6535 }
6536 }
6537
6538 /*
6539 * Changing file mode?
6540 */
6541 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
6542 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
6543
6544 /*
6545 * Mode changes always have the same basic auth requirements.
6546 */
6547 if (has_priv_suser) {
6548 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
6549 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6550 } else {
6551 /* need WRITE_SECURITY */
6552 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
6553 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6554 }
6555
6556 /*
6557 * Can't set the setgid bit if you're not in the group and not root. Have to have
6558 * existing group information in the case we're not setting it right now.
6559 */
6560 if (vap->va_mode & S_ISGID) {
6561 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6562 if (!has_priv_suser) {
6563 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6564 group = vap->va_gid;
6565 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
6566 group = ova.va_gid;
6567 } else {
6568 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
6569 error = EINVAL;
6570 goto out;
6571 }
6572 /*
6573 * This might be too restrictive; WRITE_SECURITY might be implied by
6574 * membership in this case, rather than being an additional requirement.
6575 */
6576 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
6577 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6578 goto out;
6579 }
6580 if (!ismember) {
6581 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
6582 error = EPERM;
6583 goto out;
6584 }
6585 }
6586 }
6587
6588 /*
6589 * Can't set the setuid bit unless you're root or the file's owner.
6590 */
6591 if (vap->va_mode & S_ISUID) {
6592 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6593 if (!has_priv_suser) {
6594 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6595 owner = vap->va_uid;
6596 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
6597 owner = ova.va_uid;
6598 } else {
6599 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
6600 error = EINVAL;
6601 goto out;
6602 }
6603 if (owner != kauth_cred_getuid(cred)) {
6604 /*
6605 * We could allow this if WRITE_SECURITY is permitted, perhaps.
6606 */
6607 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6608 error = EPERM;
6609 goto out;
6610 }
6611 }
6612 }
6613 }
6614
6615 /*
6616 * Validate/mask flags changes. This checks that only the flags in
6617 * the UF_SETTABLE mask are being set, and preserves the flags in
6618 * the SF_SETTABLE case.
6619 *
6620 * Since flags changes may be made in conjunction with other changes,
6621 * we will ask the auth code to ignore immutability in the case that
6622 * the SF_* flags are not set and we are only manipulating the file flags.
6623 *
6624 */
6625 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6626 /* compute changing flags bits */
6627 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
6628 fdelta = vap->va_flags ^ ova.va_flags;
6629 } else {
6630 fdelta = vap->va_flags;
6631 }
6632
6633 if (fdelta != 0) {
6634 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
6635 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6636
6637 /* check that changing bits are legal */
6638 if (has_priv_suser) {
6639 /*
6640 * The immutability check will prevent us from clearing the SF_*
6641 * flags unless the system securelevel permits it, so just check
6642 * for legal flags here.
6643 */
6644 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
6645 error = EPERM;
6646 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6647 goto out;
6648 }
6649 } else {
6650 if (fdelta & ~UF_SETTABLE) {
6651 error = EPERM;
6652 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6653 goto out;
6654 }
6655 }
6656 /*
6657 * If the caller has the ability to manipulate file flags,
6658 * security is not reduced by ignoring them for this operation.
6659 *
6660 * A more complete test here would consider the 'after' states of the flags
6661 * to determine whether it would permit the operation, but this becomes
6662 * very complex.
6663 *
6664 * Ignoring immutability is conditional on securelevel; this does not bypass
6665 * the SF_* flags if securelevel > 0.
6666 */
6667 required_action |= KAUTH_VNODE_NOIMMUTABLE;
6668 }
6669 }
6670
6671 /*
6672 * Validate ownership information.
6673 */
6674 chowner = 0;
6675 chgroup = 0;
6676 clear_suid = 0;
6677 clear_sgid = 0;
6678
6679 /*
6680 * uid changing
6681 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
6682 * support them in general, and will ignore it if/when we try to set it.
6683 * We might want to clear the uid out of vap completely here.
6684 */
6685 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6686 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
6687 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
6688 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
6689 error = EPERM;
6690 goto out;
6691 }
6692 chowner = 1;
6693 }
6694 clear_suid = 1;
6695 }
6696
6697 /*
6698 * gid changing
6699 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
6700 * support them in general, and will ignore it if/when we try to set it.
6701 * We might want to clear the gid out of vap completely here.
6702 */
6703 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6704 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
6705 if (!has_priv_suser) {
6706 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6707 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6708 goto out;
6709 }
6710 if (!ismember) {
6711 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
6712 ova.va_gid, vap->va_gid);
6713 error = EPERM;
6714 goto out;
6715 }
6716 }
6717 chgroup = 1;
6718 }
6719 clear_sgid = 1;
6720 }
6721
6722 /*
6723 * Owner UUID being set or changed.
6724 */
6725 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6726 /* if the owner UUID is not actually changing ... */
6727 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
6728 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
6729 goto no_uuuid_change;
6730
6731 /*
6732 * If the current owner UUID is a null GUID, check
6733 * it against the UUID corresponding to the owner UID.
6734 */
6735 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
6736 VATTR_IS_SUPPORTED(&ova, va_uid)) {
6737 guid_t uid_guid;
6738
6739 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
6740 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
6741 goto no_uuuid_change;
6742 }
6743 }
6744
6745 /*
6746 * The owner UUID cannot be set by a non-superuser to anything other than
6747 * their own or a null GUID (to "unset" the owner UUID).
6748 * Note that file systems must be prepared to handle the
6749 * null UUID case in a manner appropriate for that file
6750 * system.
6751 */
6752 if (!has_priv_suser) {
6753 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6754 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6755 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
6756 goto out;
6757 }
6758 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
6759 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
6760 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
6761 error = EPERM;
6762 goto out;
6763 }
6764 }
6765 chowner = 1;
6766 clear_suid = 1;
6767 }
6768 no_uuuid_change:
6769 /*
6770 * Group UUID being set or changed.
6771 */
6772 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6773 /* if the group UUID is not actually changing ... */
6774 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
6775 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
6776 goto no_guuid_change;
6777
6778 /*
6779 * If the current group UUID is a null UUID, check
6780 * it against the UUID corresponding to the group GID.
6781 */
6782 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
6783 VATTR_IS_SUPPORTED(&ova, va_gid)) {
6784 guid_t gid_guid;
6785
6786 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
6787 kauth_guid_equal(&vap->va_guuid, &gid_guid))
6788 goto no_guuid_change;
6789 }
6790 }
6791
6792 /*
6793 * The group UUID cannot be set by a non-superuser to anything other than
6794 * one of which they are a member or a null GUID (to "unset"
6795 * the group UUID).
6796 * Note that file systems must be prepared to handle the
6797 * null UUID case in a manner appropriate for that file
6798 * system.
6799 */
6800 if (!has_priv_suser) {
6801 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
6802 ismember = 1;
6803 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6804 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6805 goto out;
6806 }
6807 if (!ismember) {
6808 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
6809 error = EPERM;
6810 goto out;
6811 }
6812 }
6813 chgroup = 1;
6814 }
6815 no_guuid_change:
6816
6817 /*
6818 * Compute authorisation for group/ownership changes.
6819 */
6820 if (chowner || chgroup || clear_suid || clear_sgid) {
6821 if (has_priv_suser) {
6822 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
6823 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6824 } else {
6825 if (chowner) {
6826 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
6827 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
6828 }
6829 if (chgroup && !chowner) {
6830 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
6831 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6832 }
6833
6834 /* clear set-uid and set-gid bits as required by Posix */
6835 if (VATTR_IS_ACTIVE(vap, va_mode)) {
6836 newmode = vap->va_mode;
6837 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
6838 newmode = ova.va_mode;
6839 } else {
6840 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
6841 newmode = 0;
6842 }
6843 if (newmode & (S_ISUID | S_ISGID)) {
6844 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
6845 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
6846 }
6847 }
6848 }
6849
6850 /*
6851 * Authorise changes in the ACL.
6852 */
6853 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6854
6855 /* no existing ACL */
6856 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
6857
6858 /* adding an ACL */
6859 if (vap->va_acl != NULL) {
6860 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6861 KAUTH_DEBUG("CHMOD - adding ACL");
6862 }
6863
6864 /* removing an existing ACL */
6865 } else if (vap->va_acl == NULL) {
6866 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6867 KAUTH_DEBUG("CHMOD - removing ACL");
6868
6869 /* updating an existing ACL */
6870 } else {
6871 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
6872 /* entry count changed, must be different */
6873 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6874 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
6875 } else if (vap->va_acl->acl_entrycount > 0) {
6876 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
6877 if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
6878 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
6879 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6880 KAUTH_DEBUG("CHMOD - changing ACL entries");
6881 }
6882 }
6883 }
6884 }
6885
6886 /*
6887 * Other attributes that require authorisation.
6888 */
6889 if (VATTR_IS_ACTIVE(vap, va_encoding))
6890 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6891
6892 out:
6893 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
6894 kauth_acl_free(ova.va_acl);
6895 if (error == 0)
6896 *actionp = required_action;
6897 return(error);
6898 }
6899
6900
6901 void
6902 vfs_setlocklocal(mount_t mp)
6903 {
6904 vnode_t vp;
6905
6906 mount_lock(mp);
6907 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
6908
6909 /*
6910 * We do not expect anyone to be using any vnodes at the
6911 * time this routine is called. So no need for vnode locking
6912 */
6913 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
6914 vp->v_flag |= VLOCKLOCAL;
6915 }
6916 TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) {
6917 vp->v_flag |= VLOCKLOCAL;
6918 }
6919 TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) {
6920 vp->v_flag |= VLOCKLOCAL;
6921 }
6922 mount_unlock(mp);
6923 }
6924
6925 void
6926 vfs_setunmountpreflight(mount_t mp)
6927 {
6928 mount_lock_spin(mp);
6929 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
6930 mount_unlock(mp);
6931 }
6932
6933 void
6934 vn_setunionwait(vnode_t vp)
6935 {
6936 vnode_lock_spin(vp);
6937 vp->v_flag |= VISUNION;
6938 vnode_unlock(vp);
6939 }
6940
6941
6942 void
6943 vn_checkunionwait(vnode_t vp)
6944 {
6945 vnode_lock_spin(vp);
6946 while ((vp->v_flag & VISUNION) == VISUNION)
6947 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
6948 vnode_unlock(vp);
6949 }
6950
6951 void
6952 vn_clearunionwait(vnode_t vp, int locked)
6953 {
6954 if (!locked)
6955 vnode_lock_spin(vp);
6956 if((vp->v_flag & VISUNION) == VISUNION) {
6957 vp->v_flag &= ~VISUNION;
6958 wakeup((caddr_t)&vp->v_flag);
6959 }
6960 if (!locked)
6961 vnode_unlock(vp);
6962 }
6963
6964 /*
6965 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
6966 */
6967 extern int thread_notrigger(void);
6968
6969 int
6970 thread_notrigger(void)
6971 {
6972 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
6973 return (uth->uu_notrigger);
6974 }
6975
6976 /*
6977 * Removes orphaned apple double files during a rmdir
6978 * Works by:
6979 * 1. vnode_suspend().
6980 * 2. Call VNOP_READDIR() till the end of directory is reached.
6981 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
6982 * 4. Continue (2) and (3) till end of directory is reached.
6983 * 5. If all the entries in the directory were files with "._" name, delete all the files.
6984 * 6. vnode_resume()
6985 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
6986 */
6987
6988 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
6989 {
6990
6991 #define UIO_BUFF_SIZE 2048
6992 uio_t auio = NULL;
6993 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
6994 int open_flag = 0, full_erase_flag = 0;
6995 char uio_buf[ UIO_SIZEOF(1) ];
6996 char *rbuf = NULL, *cpos, *cend;
6997 struct nameidata nd_temp;
6998 struct dirent *dp;
6999 errno_t error;
7000
7001 error = vnode_suspend(vp);
7002
7003 /*
7004 * restart_flag is set so that the calling rmdir sleeps and resets
7005 */
7006 if (error == EBUSY)
7007 *restart_flag = 1;
7008 if (error != 0)
7009 goto outsc;
7010
7011 /*
7012 * set up UIO
7013 */
7014 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
7015 if (rbuf)
7016 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
7017 &uio_buf[0], sizeof(uio_buf));
7018 if (!rbuf || !auio) {
7019 error = ENOMEM;
7020 goto outsc;
7021 }
7022
7023 uio_setoffset(auio,0);
7024
7025 eofflag = 0;
7026
7027 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
7028 goto outsc;
7029 else
7030 open_flag = 1;
7031
7032 /*
7033 * First pass checks if all files are appleDouble files.
7034 */
7035
7036 do {
7037 siz = UIO_BUFF_SIZE;
7038 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7039 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7040
7041 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
7042 goto outsc;
7043
7044 if (uio_resid(auio) != 0)
7045 siz -= uio_resid(auio);
7046
7047 /*
7048 * Iterate through directory
7049 */
7050 cpos = rbuf;
7051 cend = rbuf + siz;
7052 dp = (struct dirent*) cpos;
7053
7054 if (cpos == cend)
7055 eofflag = 1;
7056
7057 while ((cpos < cend)) {
7058 /*
7059 * Check for . and .. as well as directories
7060 */
7061 if (dp->d_ino != 0 &&
7062 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7063 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
7064 /*
7065 * Check for irregular files and ._ files
7066 * If there is a ._._ file abort the op
7067 */
7068 if ( dp->d_namlen < 2 ||
7069 strncmp(dp->d_name,"._",2) ||
7070 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
7071 error = ENOTEMPTY;
7072 goto outsc;
7073 }
7074 }
7075 cpos += dp->d_reclen;
7076 dp = (struct dirent*)cpos;
7077 }
7078
7079 /*
7080 * workaround for HFS/NFS setting eofflag before end of file
7081 */
7082 if (vp->v_tag == VT_HFS && nentries > 2)
7083 eofflag=0;
7084
7085 if (vp->v_tag == VT_NFS) {
7086 if (eofflag && !full_erase_flag) {
7087 full_erase_flag = 1;
7088 eofflag = 0;
7089 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7090 }
7091 else if (!eofflag && full_erase_flag)
7092 full_erase_flag = 0;
7093 }
7094
7095 } while (!eofflag);
7096 /*
7097 * If we've made it here all the files in the dir are ._ files.
7098 * We can delete the files even though the node is suspended
7099 * because we are the owner of the file.
7100 */
7101
7102 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7103 eofflag = 0;
7104 full_erase_flag = 0;
7105
7106 do {
7107 siz = UIO_BUFF_SIZE;
7108 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7109 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7110
7111 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
7112
7113 if (error != 0)
7114 goto outsc;
7115
7116 if (uio_resid(auio) != 0)
7117 siz -= uio_resid(auio);
7118
7119 /*
7120 * Iterate through directory
7121 */
7122 cpos = rbuf;
7123 cend = rbuf + siz;
7124 dp = (struct dirent*) cpos;
7125
7126 if (cpos == cend)
7127 eofflag = 1;
7128
7129 while ((cpos < cend)) {
7130 /*
7131 * Check for . and .. as well as directories
7132 */
7133 if (dp->d_ino != 0 &&
7134 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7135 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
7136 ) {
7137
7138 NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
7139 nd_temp.ni_dvp = vp;
7140 error = unlink1(ctx, &nd_temp, 0);
7141 if (error && error != ENOENT) {
7142 goto outsc;
7143 }
7144 }
7145 cpos += dp->d_reclen;
7146 dp = (struct dirent*)cpos;
7147 }
7148
7149 /*
7150 * workaround for HFS/NFS setting eofflag before end of file
7151 */
7152 if (vp->v_tag == VT_HFS && nentries > 2)
7153 eofflag=0;
7154
7155 if (vp->v_tag == VT_NFS) {
7156 if (eofflag && !full_erase_flag) {
7157 full_erase_flag = 1;
7158 eofflag = 0;
7159 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7160 }
7161 else if (!eofflag && full_erase_flag)
7162 full_erase_flag = 0;
7163 }
7164
7165 } while (!eofflag);
7166
7167
7168 error = 0;
7169
7170 outsc:
7171 if (open_flag)
7172 VNOP_CLOSE(vp, FREAD, ctx);
7173
7174 uio_free(auio);
7175 FREE(rbuf, M_TEMP);
7176
7177 vnode_resume(vp);
7178
7179
7180 return(error);
7181
7182 }
7183
7184
7185 void
7186 lock_vnode_and_post(vnode_t vp, int kevent_num)
7187 {
7188 /* Only take the lock if there's something there! */
7189 if (vp->v_knotes.slh_first != NULL) {
7190 vnode_lock(vp);
7191 KNOTE(&vp->v_knotes, kevent_num);
7192 vnode_unlock(vp);
7193 }
7194 }
7195
7196 #ifdef JOE_DEBUG
7197 static void record_vp(vnode_t vp, int count) {
7198 struct uthread *ut;
7199 int i;
7200
7201 if ((vp->v_flag & VSYSTEM))
7202 return;
7203
7204 ut = get_bsdthread_info(current_thread());
7205 ut->uu_iocount += count;
7206
7207 if (ut->uu_vpindex < 32) {
7208 for (i = 0; i < ut->uu_vpindex; i++) {
7209 if (ut->uu_vps[i] == vp)
7210 return;
7211 }
7212 ut->uu_vps[ut->uu_vpindex] = vp;
7213 ut->uu_vpindex++;
7214 }
7215 }
7216 #endif