]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
a8fc4b148074fe984b4f9d3fb835d9b7e9ef0f6e
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/kern_memorystatus.h>
109 #include <miscfs/fifofs/fifo.h>
110
111 #include <string.h>
112 #include <machine/spl.h>
113
114
115 #include <kern/assert.h>
116
117 #include <miscfs/specfs/specdev.h>
118
119 #include <mach/mach_types.h>
120 #include <mach/memory_object_types.h>
121
122 #include <kern/kalloc.h> /* kalloc()/kfree() */
123 #include <kern/clock.h> /* delay_for_interval() */
124 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
125
126
127 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #endif
132
133 extern lck_grp_t *vnode_lck_grp;
134 extern lck_attr_t *vnode_lck_attr;
135
136
137 extern lck_mtx_t * mnt_list_mtx_lock;
138
139 enum vtype iftovt_tab[16] = {
140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[9] = {
144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 S_IFSOCK, S_IFIFO, S_IFMT,
146 };
147
148 /* XXX next protptype should be from <nfs/nfs.h> */
149 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
150
151 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
152 __private_extern__ void qsort(
153 void * array,
154 size_t nmembers,
155 size_t member_size,
156 int (*)(const void *, const void *));
157
158 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
159 __private_extern__ void vntblinit(void);
160 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
161 unsigned int val2);
162 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
163
164 extern int system_inshutdown;
165
166 static void vnode_list_add(vnode_t);
167 static void vnode_list_remove(vnode_t);
168 static void vnode_list_remove_locked(vnode_t);
169
170 static errno_t vnode_drain(vnode_t);
171 static void vgone(vnode_t, int flags);
172 static void vclean(vnode_t vp, int flag);
173 static void vnode_reclaim_internal(vnode_t, int, int, int);
174
175 static void vnode_dropiocount (vnode_t);
176 static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
177
178 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
179 static int vnode_reload(vnode_t);
180 static int vnode_isinuse_locked(vnode_t, int, int);
181
182 static void insmntque(vnode_t vp, mount_t mp);
183 static int mount_getvfscnt(void);
184 static int mount_fillfsids(fsid_t *, int );
185 static void vnode_iterate_setup(mount_t);
186 int vnode_umount_preflight(mount_t, vnode_t, int);
187 static int vnode_iterate_prepare(mount_t);
188 static int vnode_iterate_reloadq(mount_t);
189 static void vnode_iterate_clear(mount_t);
190 static mount_t vfs_getvfs_locked(fsid_t *);
191
192 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
193
194 #ifdef JOE_DEBUG
195 static void record_vp(vnode_t vp, int count);
196 #endif
197
198 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
199 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
200
201 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
202 struct timeval rage_tv;
203 int rage_limit = 0;
204 int ragevnodes = 0;
205
206 #define RAGE_LIMIT_MIN 100
207 #define RAGE_TIME_LIMIT 5
208
209 struct mntlist mountlist; /* mounted filesystem list */
210 static int nummounts = 0;
211
212 #if DIAGNOSTIC
213 #define VLISTCHECK(fun, vp, list) \
214 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
215 panic("%s: %s vnode not on %slist", (fun), (list), (list));
216 #else
217 #define VLISTCHECK(fun, vp, list)
218 #endif /* DIAGNOSTIC */
219
220 #define VLISTNONE(vp) \
221 do { \
222 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
223 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
224 } while(0)
225
226 #define VONLIST(vp) \
227 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
228
229 /* remove a vnode from free vnode list */
230 #define VREMFREE(fun, vp) \
231 do { \
232 VLISTCHECK((fun), (vp), "free"); \
233 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
234 VLISTNONE((vp)); \
235 freevnodes--; \
236 } while(0)
237
238
239
240 /* remove a vnode from dead vnode list */
241 #define VREMDEAD(fun, vp) \
242 do { \
243 VLISTCHECK((fun), (vp), "dead"); \
244 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
245 VLISTNONE((vp)); \
246 vp->v_listflag &= ~VLIST_DEAD; \
247 deadvnodes--; \
248 } while(0)
249
250
251 /* remove a vnode from rage vnode list */
252 #define VREMRAGE(fun, vp) \
253 do { \
254 if ( !(vp->v_listflag & VLIST_RAGE)) \
255 panic("VREMRAGE: vp not on rage list"); \
256 VLISTCHECK((fun), (vp), "rage"); \
257 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
258 VLISTNONE((vp)); \
259 vp->v_listflag &= ~VLIST_RAGE; \
260 ragevnodes--; \
261 } while(0)
262
263
264 /*
265 * vnodetarget hasn't been used in a long time, but
266 * it was exported for some reason... I'm leaving in
267 * place for now... it should be deprecated out of the
268 * exports and removed eventually.
269 */
270 u_int32_t vnodetarget; /* target for vnreclaim() */
271 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
272
273 /*
274 * We need quite a few vnodes on the free list to sustain the
275 * rapid stat() the compilation process does, and still benefit from the name
276 * cache. Having too few vnodes on the free list causes serious disk
277 * thrashing as we cycle through them.
278 */
279 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
280
281 /*
282 * Initialize the vnode management data structures.
283 */
284 __private_extern__ void
285 vntblinit(void)
286 {
287 TAILQ_INIT(&vnode_free_list);
288 TAILQ_INIT(&vnode_rage_list);
289 TAILQ_INIT(&vnode_dead_list);
290 TAILQ_INIT(&mountlist);
291
292 if (!vnodetarget)
293 vnodetarget = VNODE_FREE_TARGET;
294
295 microuptime(&rage_tv);
296 rage_limit = desiredvnodes / 100;
297
298 if (rage_limit < RAGE_LIMIT_MIN)
299 rage_limit = RAGE_LIMIT_MIN;
300
301 /*
302 * Scale the vm_object_cache to accomodate the vnodes
303 * we want to cache
304 */
305 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
306 }
307
308 /* Reset the VM Object Cache with the values passed in */
309 __private_extern__ kern_return_t
310 reset_vmobjectcache(unsigned int val1, unsigned int val2)
311 {
312 vm_size_t oval = val1 - VNODE_FREE_MIN;
313 vm_size_t nval;
314
315 if (val1 == val2) {
316 return KERN_SUCCESS;
317 }
318
319 if(val2 < VNODE_FREE_MIN)
320 nval = 0;
321 else
322 nval = val2 - VNODE_FREE_MIN;
323
324 return(adjust_vm_object_cache(oval, nval));
325 }
326
327
328 /* the timeout is in 10 msecs */
329 int
330 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
331 int error = 0;
332 struct timespec ts;
333
334 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
335
336 if (vp->v_numoutput > output_target) {
337
338 slpflag |= PDROP;
339
340 vnode_lock_spin(vp);
341
342 while ((vp->v_numoutput > output_target) && error == 0) {
343 if (output_target)
344 vp->v_flag |= VTHROTTLED;
345 else
346 vp->v_flag |= VBWAIT;
347
348 ts.tv_sec = (slptimeout/100);
349 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
350 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
351
352 vnode_lock_spin(vp);
353 }
354 vnode_unlock(vp);
355 }
356 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
357
358 return error;
359 }
360
361
362 void
363 vnode_startwrite(vnode_t vp) {
364
365 OSAddAtomic(1, &vp->v_numoutput);
366 }
367
368
369 void
370 vnode_writedone(vnode_t vp)
371 {
372 if (vp) {
373 OSAddAtomic(-1, &vp->v_numoutput);
374
375 if (vp->v_numoutput <= 1) {
376 int need_wakeup = 0;
377
378 vnode_lock_spin(vp);
379
380 if (vp->v_numoutput < 0)
381 panic("vnode_writedone: numoutput < 0");
382
383 if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) {
384 vp->v_flag &= ~VTHROTTLED;
385 need_wakeup = 1;
386 }
387 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
388 vp->v_flag &= ~VBWAIT;
389 need_wakeup = 1;
390 }
391 vnode_unlock(vp);
392
393 if (need_wakeup)
394 wakeup((caddr_t)&vp->v_numoutput);
395 }
396 }
397 }
398
399
400
401 int
402 vnode_hasdirtyblks(vnode_t vp)
403 {
404 struct cl_writebehind *wbp;
405
406 /*
407 * Not taking the buf_mtxp as there is little
408 * point doing it. Even if the lock is taken the
409 * state can change right after that. If their
410 * needs to be a synchronization, it must be driven
411 * by the caller
412 */
413 if (vp->v_dirtyblkhd.lh_first)
414 return (1);
415
416 if (!UBCINFOEXISTS(vp))
417 return (0);
418
419 wbp = vp->v_ubcinfo->cl_wbehind;
420
421 if (wbp && (wbp->cl_number || wbp->cl_scmap))
422 return (1);
423
424 return (0);
425 }
426
427 int
428 vnode_hascleanblks(vnode_t vp)
429 {
430 /*
431 * Not taking the buf_mtxp as there is little
432 * point doing it. Even if the lock is taken the
433 * state can change right after that. If their
434 * needs to be a synchronization, it must be driven
435 * by the caller
436 */
437 if (vp->v_cleanblkhd.lh_first)
438 return (1);
439 return (0);
440 }
441
442 void
443 vnode_iterate_setup(mount_t mp)
444 {
445 while (mp->mnt_lflag & MNT_LITER) {
446 mp->mnt_lflag |= MNT_LITERWAIT;
447 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
448 }
449
450 mp->mnt_lflag |= MNT_LITER;
451
452 }
453
454 int
455 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
456 {
457 vnode_t vp;
458
459 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
460 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
461 if (vp->v_tag == VT_UDF)
462 return 0;
463 if (vp->v_type == VDIR)
464 continue;
465 if (vp == skipvp)
466 continue;
467 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
468 (vp->v_flag & VNOFLUSH)))
469 continue;
470 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
471 continue;
472 if ((flags & WRITECLOSE) &&
473 (vp->v_writecount == 0 || vp->v_type != VREG))
474 continue;
475 /* Look for busy vnode */
476 if (((vp->v_usecount != 0) &&
477 ((vp->v_usecount - vp->v_kusecount) != 0)))
478 return(1);
479 }
480
481 return(0);
482 }
483
484 /*
485 * This routine prepares iteration by moving all the vnodes to worker queue
486 * called with mount lock held
487 */
488 int
489 vnode_iterate_prepare(mount_t mp)
490 {
491 vnode_t vp;
492
493 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
494 /* nothing to do */
495 return (0);
496 }
497
498 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
499 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
500 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
501 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
502
503 TAILQ_INIT(&mp->mnt_vnodelist);
504 if (mp->mnt_newvnodes.tqh_first != NULL)
505 panic("vnode_iterate_prepare: newvnode when entering vnode");
506 TAILQ_INIT(&mp->mnt_newvnodes);
507
508 return (1);
509 }
510
511
512 /* called with mount lock held */
513 int
514 vnode_iterate_reloadq(mount_t mp)
515 {
516 int moved = 0;
517
518 /* add the remaining entries in workerq to the end of mount vnode list */
519 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
520 struct vnode * mvp;
521 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
522
523 /* Joining the workerque entities to mount vnode list */
524 if (mvp)
525 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
526 else
527 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
528 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
529 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
530 TAILQ_INIT(&mp->mnt_workerqueue);
531 }
532
533 /* add the newvnodes to the head of mount vnode list */
534 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
535 struct vnode * nlvp;
536 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
537
538 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
539 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
540 if(mp->mnt_vnodelist.tqh_first)
541 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
542 else
543 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
544 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
545 TAILQ_INIT(&mp->mnt_newvnodes);
546 moved = 1;
547 }
548
549 return(moved);
550 }
551
552
553 void
554 vnode_iterate_clear(mount_t mp)
555 {
556 mp->mnt_lflag &= ~MNT_LITER;
557 if (mp->mnt_lflag & MNT_LITERWAIT) {
558 mp->mnt_lflag &= ~MNT_LITERWAIT;
559 wakeup(mp);
560 }
561 }
562
563
564 int
565 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
566 void *arg)
567 {
568 struct vnode *vp;
569 int vid, retval;
570 int ret = 0;
571
572 mount_lock(mp);
573
574 vnode_iterate_setup(mp);
575
576 /* it is returns 0 then there is nothing to do */
577 retval = vnode_iterate_prepare(mp);
578
579 if (retval == 0) {
580 vnode_iterate_clear(mp);
581 mount_unlock(mp);
582 return(ret);
583 }
584
585 /* iterate over all the vnodes */
586 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
587 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
588 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
589 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
590 vid = vp->v_id;
591 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
592 continue;
593 }
594 mount_unlock(mp);
595
596 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
597 mount_lock(mp);
598 continue;
599 }
600 if (flags & VNODE_RELOAD) {
601 /*
602 * we're reloading the filesystem
603 * cast out any inactive vnodes...
604 */
605 if (vnode_reload(vp)) {
606 /* vnode will be recycled on the refcount drop */
607 vnode_put(vp);
608 mount_lock(mp);
609 continue;
610 }
611 }
612
613 retval = callout(vp, arg);
614
615 switch (retval) {
616 case VNODE_RETURNED:
617 case VNODE_RETURNED_DONE:
618 vnode_put(vp);
619 if (retval == VNODE_RETURNED_DONE) {
620 mount_lock(mp);
621 ret = 0;
622 goto out;
623 }
624 break;
625
626 case VNODE_CLAIMED_DONE:
627 mount_lock(mp);
628 ret = 0;
629 goto out;
630 case VNODE_CLAIMED:
631 default:
632 break;
633 }
634 mount_lock(mp);
635 }
636
637 out:
638 (void)vnode_iterate_reloadq(mp);
639 vnode_iterate_clear(mp);
640 mount_unlock(mp);
641 return (ret);
642 }
643
644 void
645 mount_lock_renames(mount_t mp)
646 {
647 lck_mtx_lock(&mp->mnt_renamelock);
648 }
649
650 void
651 mount_unlock_renames(mount_t mp)
652 {
653 lck_mtx_unlock(&mp->mnt_renamelock);
654 }
655
656 void
657 mount_lock(mount_t mp)
658 {
659 lck_mtx_lock(&mp->mnt_mlock);
660 }
661
662 void
663 mount_lock_spin(mount_t mp)
664 {
665 lck_mtx_lock_spin(&mp->mnt_mlock);
666 }
667
668 void
669 mount_unlock(mount_t mp)
670 {
671 lck_mtx_unlock(&mp->mnt_mlock);
672 }
673
674
675 void
676 mount_ref(mount_t mp, int locked)
677 {
678 if ( !locked)
679 mount_lock_spin(mp);
680
681 mp->mnt_count++;
682
683 if ( !locked)
684 mount_unlock(mp);
685 }
686
687
688 void
689 mount_drop(mount_t mp, int locked)
690 {
691 if ( !locked)
692 mount_lock_spin(mp);
693
694 mp->mnt_count--;
695
696 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
697 wakeup(&mp->mnt_lflag);
698
699 if ( !locked)
700 mount_unlock(mp);
701 }
702
703
704 int
705 mount_iterref(mount_t mp, int locked)
706 {
707 int retval = 0;
708
709 if (!locked)
710 mount_list_lock();
711 if (mp->mnt_iterref < 0) {
712 retval = 1;
713 } else {
714 mp->mnt_iterref++;
715 }
716 if (!locked)
717 mount_list_unlock();
718 return(retval);
719 }
720
721 int
722 mount_isdrained(mount_t mp, int locked)
723 {
724 int retval;
725
726 if (!locked)
727 mount_list_lock();
728 if (mp->mnt_iterref < 0)
729 retval = 1;
730 else
731 retval = 0;
732 if (!locked)
733 mount_list_unlock();
734 return(retval);
735 }
736
737 void
738 mount_iterdrop(mount_t mp)
739 {
740 mount_list_lock();
741 mp->mnt_iterref--;
742 wakeup(&mp->mnt_iterref);
743 mount_list_unlock();
744 }
745
746 void
747 mount_iterdrain(mount_t mp)
748 {
749 mount_list_lock();
750 while (mp->mnt_iterref)
751 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
752 /* mount iterations drained */
753 mp->mnt_iterref = -1;
754 mount_list_unlock();
755 }
756 void
757 mount_iterreset(mount_t mp)
758 {
759 mount_list_lock();
760 if (mp->mnt_iterref == -1)
761 mp->mnt_iterref = 0;
762 mount_list_unlock();
763 }
764
765 /* always called with mount lock held */
766 int
767 mount_refdrain(mount_t mp)
768 {
769 if (mp->mnt_lflag & MNT_LDRAIN)
770 panic("already in drain");
771 mp->mnt_lflag |= MNT_LDRAIN;
772
773 while (mp->mnt_count)
774 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
775
776 if (mp->mnt_vnodelist.tqh_first != NULL)
777 panic("mount_refdrain: dangling vnode");
778
779 mp->mnt_lflag &= ~MNT_LDRAIN;
780
781 return(0);
782 }
783
784
785 /*
786 * Mark a mount point as busy. Used to synchronize access and to delay
787 * unmounting.
788 */
789 int
790 vfs_busy(mount_t mp, int flags)
791 {
792
793 restart:
794 if (mp->mnt_lflag & MNT_LDEAD)
795 return(ENOENT);
796
797 if (mp->mnt_lflag & MNT_LUNMOUNT) {
798 if (flags & LK_NOWAIT)
799 return (ENOENT);
800
801 mount_lock(mp);
802
803 if (mp->mnt_lflag & MNT_LDEAD) {
804 mount_unlock(mp);
805 return(ENOENT);
806 }
807 if (mp->mnt_lflag & MNT_LUNMOUNT) {
808 mp->mnt_lflag |= MNT_LWAIT;
809 /*
810 * Since all busy locks are shared except the exclusive
811 * lock granted when unmounting, the only place that a
812 * wakeup needs to be done is at the release of the
813 * exclusive lock at the end of dounmount.
814 */
815 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
816 return (ENOENT);
817 }
818 mount_unlock(mp);
819 }
820
821 lck_rw_lock_shared(&mp->mnt_rwlock);
822
823 /*
824 * until we are granted the rwlock, it's possible for the mount point to
825 * change state, so reevaluate before granting the vfs_busy
826 */
827 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
828 lck_rw_done(&mp->mnt_rwlock);
829 goto restart;
830 }
831 return (0);
832 }
833
834 /*
835 * Free a busy filesystem.
836 */
837
838 void
839 vfs_unbusy(mount_t mp)
840 {
841 lck_rw_done(&mp->mnt_rwlock);
842 }
843
844
845
846 static void
847 vfs_rootmountfailed(mount_t mp) {
848
849 mount_list_lock();
850 mp->mnt_vtable->vfc_refcount--;
851 mount_list_unlock();
852
853 vfs_unbusy(mp);
854
855 mount_lock_destroy(mp);
856
857 #if CONFIG_MACF
858 mac_mount_label_destroy(mp);
859 #endif
860
861 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
862 }
863
864 /*
865 * Lookup a filesystem type, and if found allocate and initialize
866 * a mount structure for it.
867 *
868 * Devname is usually updated by mount(8) after booting.
869 */
870 static mount_t
871 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
872 {
873 mount_t mp;
874
875 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
876 bzero((char *)mp, sizeof(struct mount));
877
878 /* Initialize the default IO constraints */
879 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
880 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
881 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
882 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
883 mp->mnt_devblocksize = DEV_BSIZE;
884 mp->mnt_alignmentmask = PAGE_MASK;
885 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
886 mp->mnt_ioscale = 1;
887 mp->mnt_ioflags = 0;
888 mp->mnt_realrootvp = NULLVP;
889 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
890
891 mount_lock_init(mp);
892 (void)vfs_busy(mp, LK_NOWAIT);
893
894 TAILQ_INIT(&mp->mnt_vnodelist);
895 TAILQ_INIT(&mp->mnt_workerqueue);
896 TAILQ_INIT(&mp->mnt_newvnodes);
897
898 mp->mnt_vtable = vfsp;
899 mp->mnt_op = vfsp->vfc_vfsops;
900 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
901 mp->mnt_vnodecovered = NULLVP;
902 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
903 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
904
905 mount_list_lock();
906 vfsp->vfc_refcount++;
907 mount_list_unlock();
908
909 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
910 mp->mnt_vfsstat.f_mntonname[0] = '/';
911 /* XXX const poisoning layering violation */
912 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
913
914 #if CONFIG_MACF
915 mac_mount_label_init(mp);
916 mac_mount_label_associate(vfs_context_kernel(), mp);
917 #endif
918 return (mp);
919 }
920
921 errno_t
922 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
923 {
924 struct vfstable *vfsp;
925
926 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
927 if (!strncmp(vfsp->vfc_name, fstypename,
928 sizeof(vfsp->vfc_name)))
929 break;
930 if (vfsp == NULL)
931 return (ENODEV);
932
933 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
934
935 if (*mpp)
936 return (0);
937
938 return (ENOMEM);
939 }
940
941
942 /*
943 * Find an appropriate filesystem to use for the root. If a filesystem
944 * has not been preselected, walk through the list of known filesystems
945 * trying those that have mountroot routines, and try them until one
946 * works or we have tried them all.
947 */
948 extern int (*mountroot)(void);
949
950 int
951 vfs_mountroot(void)
952 {
953 #if CONFIG_MACF
954 struct vnode *vp;
955 #endif
956 struct vfstable *vfsp;
957 vfs_context_t ctx = vfs_context_kernel();
958 struct vfs_attr vfsattr;
959 int error;
960 mount_t mp;
961 vnode_t bdevvp_rootvp;
962
963 if (mountroot != NULL) {
964 /*
965 * used for netboot which follows a different set of rules
966 */
967 error = (*mountroot)();
968 return (error);
969 }
970 if ((error = bdevvp(rootdev, &rootvp))) {
971 printf("vfs_mountroot: can't setup bdevvp\n");
972 return (error);
973 }
974 /*
975 * 4951998 - code we call in vfc_mountroot may replace rootvp
976 * so keep a local copy for some house keeping.
977 */
978 bdevvp_rootvp = rootvp;
979
980 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
981 if (vfsp->vfc_mountroot == NULL)
982 continue;
983
984 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
985 mp->mnt_devvp = rootvp;
986
987 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
988 if ( bdevvp_rootvp != rootvp ) {
989 /*
990 * rootvp changed...
991 * bump the iocount and fix up mnt_devvp for the
992 * new rootvp (it will already have a usecount taken)...
993 * drop the iocount and the usecount on the orignal
994 * since we are no longer going to use it...
995 */
996 vnode_getwithref(rootvp);
997 mp->mnt_devvp = rootvp;
998
999 vnode_rele(bdevvp_rootvp);
1000 vnode_put(bdevvp_rootvp);
1001 }
1002 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1003
1004 vfs_unbusy(mp);
1005
1006 mount_list_add(mp);
1007
1008 /*
1009 * cache the IO attributes for the underlying physical media...
1010 * an error return indicates the underlying driver doesn't
1011 * support all the queries necessary... however, reasonable
1012 * defaults will have been set, so no reason to bail or care
1013 */
1014 vfs_init_io_attributes(rootvp, mp);
1015
1016 /*
1017 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1018 */
1019 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1020 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1021 }
1022 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1023 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1024 }
1025
1026 /*
1027 * Probe root file system for additional features.
1028 */
1029 (void)VFS_START(mp, 0, ctx);
1030
1031 VFSATTR_INIT(&vfsattr);
1032 VFSATTR_WANTED(&vfsattr, f_capabilities);
1033 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1034 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1035 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1036 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1037 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1038 }
1039 #if NAMEDSTREAMS
1040 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1041 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1042 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1043 }
1044 #endif
1045 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1046 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1047 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1048 }
1049 }
1050
1051 /*
1052 * get rid of iocount reference returned
1053 * by bdevvp (or picked up by us on the substitued
1054 * rootvp)... it (or we) will have also taken
1055 * a usecount reference which we want to keep
1056 */
1057 vnode_put(rootvp);
1058
1059 #if CONFIG_MACF
1060 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1061 return (0);
1062
1063 error = VFS_ROOT(mp, &vp, ctx);
1064 if (error) {
1065 printf("%s() VFS_ROOT() returned %d\n",
1066 __func__, error);
1067 dounmount(mp, MNT_FORCE, 0, ctx);
1068 goto fail;
1069 }
1070 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1071 /*
1072 * get rid of reference provided by VFS_ROOT
1073 */
1074 vnode_put(vp);
1075
1076 if (error) {
1077 printf("%s() vnode_label() returned %d\n",
1078 __func__, error);
1079 dounmount(mp, MNT_FORCE, 0, ctx);
1080 goto fail;
1081 }
1082 #endif
1083 return (0);
1084 }
1085 #if CONFIG_MACF
1086 fail:
1087 #endif
1088 vfs_rootmountfailed(mp);
1089
1090 if (error != EINVAL)
1091 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1092 }
1093 return (ENODEV);
1094 }
1095
1096 /*
1097 * Lookup a mount point by filesystem identifier.
1098 */
1099
1100 struct mount *
1101 vfs_getvfs(fsid_t *fsid)
1102 {
1103 return (mount_list_lookupby_fsid(fsid, 0, 0));
1104 }
1105
1106 static struct mount *
1107 vfs_getvfs_locked(fsid_t *fsid)
1108 {
1109 return(mount_list_lookupby_fsid(fsid, 1, 0));
1110 }
1111
1112 struct mount *
1113 vfs_getvfs_by_mntonname(char *path)
1114 {
1115 mount_t retmp = (mount_t)0;
1116 mount_t mp;
1117
1118 mount_list_lock();
1119 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1120 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1121 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1122 retmp = mp;
1123 goto out;
1124 }
1125 }
1126 out:
1127 mount_list_unlock();
1128 return (retmp);
1129 }
1130
1131 /* generation number for creation of new fsids */
1132 u_short mntid_gen = 0;
1133 /*
1134 * Get a new unique fsid
1135 */
1136 void
1137 vfs_getnewfsid(struct mount *mp)
1138 {
1139
1140 fsid_t tfsid;
1141 int mtype;
1142 mount_t nmp;
1143
1144 mount_list_lock();
1145
1146 /* generate a new fsid */
1147 mtype = mp->mnt_vtable->vfc_typenum;
1148 if (++mntid_gen == 0)
1149 mntid_gen++;
1150 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1151 tfsid.val[1] = mtype;
1152
1153 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1154 while (vfs_getvfs_locked(&tfsid)) {
1155 if (++mntid_gen == 0)
1156 mntid_gen++;
1157 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1158 }
1159 }
1160 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1161 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1162 mount_list_unlock();
1163 }
1164
1165 /*
1166 * Routines having to do with the management of the vnode table.
1167 */
1168 extern int (**dead_vnodeop_p)(void *);
1169 long numvnodes, freevnodes, deadvnodes;
1170
1171
1172 /*
1173 * Move a vnode from one mount queue to another.
1174 */
1175 static void
1176 insmntque(vnode_t vp, mount_t mp)
1177 {
1178 mount_t lmp;
1179 /*
1180 * Delete from old mount point vnode list, if on one.
1181 */
1182 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1183 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1184 panic("insmntque: vp not in mount vnode list");
1185 vp->v_lflag &= ~VNAMED_MOUNT;
1186
1187 mount_lock_spin(lmp);
1188
1189 mount_drop(lmp, 1);
1190
1191 if (vp->v_mntvnodes.tqe_next == NULL) {
1192 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1193 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1194 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1195 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1196 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1197 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1198 } else {
1199 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1200 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1201 }
1202 vp->v_mntvnodes.tqe_next = NULL;
1203 vp->v_mntvnodes.tqe_prev = NULL;
1204 mount_unlock(lmp);
1205 return;
1206 }
1207
1208 /*
1209 * Insert into list of vnodes for the new mount point, if available.
1210 */
1211 if ((vp->v_mount = mp) != NULL) {
1212 mount_lock_spin(mp);
1213 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1214 panic("vp already in mount list");
1215 if (mp->mnt_lflag & MNT_LITER)
1216 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1217 else
1218 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1219 if (vp->v_lflag & VNAMED_MOUNT)
1220 panic("insmntque: vp already in mount vnode list");
1221 vp->v_lflag |= VNAMED_MOUNT;
1222 mount_ref(mp, 1);
1223 mount_unlock(mp);
1224 }
1225 }
1226
1227
1228 /*
1229 * Create a vnode for a block device.
1230 * Used for root filesystem, argdev, and swap areas.
1231 * Also used for memory file system special devices.
1232 */
1233 int
1234 bdevvp(dev_t dev, vnode_t *vpp)
1235 {
1236 vnode_t nvp;
1237 int error;
1238 struct vnode_fsparam vfsp;
1239 struct vfs_context context;
1240
1241 if (dev == NODEV) {
1242 *vpp = NULLVP;
1243 return (ENODEV);
1244 }
1245
1246 context.vc_thread = current_thread();
1247 context.vc_ucred = FSCRED;
1248
1249 vfsp.vnfs_mp = (struct mount *)0;
1250 vfsp.vnfs_vtype = VBLK;
1251 vfsp.vnfs_str = "bdevvp";
1252 vfsp.vnfs_dvp = NULL;
1253 vfsp.vnfs_fsnode = NULL;
1254 vfsp.vnfs_cnp = NULL;
1255 vfsp.vnfs_vops = spec_vnodeop_p;
1256 vfsp.vnfs_rdev = dev;
1257 vfsp.vnfs_filesize = 0;
1258
1259 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1260
1261 vfsp.vnfs_marksystem = 0;
1262 vfsp.vnfs_markroot = 0;
1263
1264 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1265 *vpp = NULLVP;
1266 return (error);
1267 }
1268 vnode_lock_spin(nvp);
1269 nvp->v_flag |= VBDEVVP;
1270 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1271 vnode_unlock(nvp);
1272 if ( (error = vnode_ref(nvp)) ) {
1273 panic("bdevvp failed: vnode_ref");
1274 return (error);
1275 }
1276 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1277 panic("bdevvp failed: fsync");
1278 return (error);
1279 }
1280 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1281 panic("bdevvp failed: invalidateblks");
1282 return (error);
1283 }
1284
1285 #if CONFIG_MACF
1286 /*
1287 * XXXMAC: We can't put a MAC check here, the system will
1288 * panic without this vnode.
1289 */
1290 #endif /* MAC */
1291
1292 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1293 panic("bdevvp failed: open");
1294 return (error);
1295 }
1296 *vpp = nvp;
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Check to see if the new vnode represents a special device
1303 * for which we already have a vnode (either because of
1304 * bdevvp() or because of a different vnode representing
1305 * the same block device). If such an alias exists, deallocate
1306 * the existing contents and return the aliased vnode. The
1307 * caller is responsible for filling it with its new contents.
1308 */
1309 static vnode_t
1310 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1311 {
1312 struct vnode *vp;
1313 struct vnode **vpp;
1314 struct specinfo *sin = NULL;
1315 int vid = 0;
1316
1317 vpp = &speclisth[SPECHASH(nvp_rdev)];
1318 loop:
1319 SPECHASH_LOCK();
1320
1321 for (vp = *vpp; vp; vp = vp->v_specnext) {
1322 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1323 vid = vp->v_id;
1324 break;
1325 }
1326 }
1327 SPECHASH_UNLOCK();
1328
1329 if (vp) {
1330 found_alias:
1331 if (vnode_getwithvid(vp,vid)) {
1332 goto loop;
1333 }
1334 /*
1335 * Termination state is checked in vnode_getwithvid
1336 */
1337 vnode_lock(vp);
1338
1339 /*
1340 * Alias, but not in use, so flush it out.
1341 */
1342 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1343 vnode_reclaim_internal(vp, 1, 1, 0);
1344 vnode_put_locked(vp);
1345 vnode_unlock(vp);
1346 goto loop;
1347 }
1348
1349 }
1350 if (vp == NULL || vp->v_tag != VT_NON) {
1351 if (sin == NULL) {
1352 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1353 M_SPECINFO, M_WAITOK);
1354 }
1355
1356 nvp->v_specinfo = sin;
1357 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1358 nvp->v_rdev = nvp_rdev;
1359 nvp->v_specflags = 0;
1360 nvp->v_speclastr = -1;
1361
1362 SPECHASH_LOCK();
1363
1364 /* We dropped the lock, someone could have added */
1365 if (vp == NULLVP) {
1366 for (vp = *vpp; vp; vp = vp->v_specnext) {
1367 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1368 vid = vp->v_id;
1369 SPECHASH_UNLOCK();
1370 goto found_alias;
1371 }
1372 }
1373 }
1374
1375 nvp->v_hashchain = vpp;
1376 nvp->v_specnext = *vpp;
1377 *vpp = nvp;
1378
1379 if (vp != NULLVP) {
1380 nvp->v_specflags |= SI_ALIASED;
1381 vp->v_specflags |= SI_ALIASED;
1382 SPECHASH_UNLOCK();
1383 vnode_put_locked(vp);
1384 vnode_unlock(vp);
1385 } else {
1386 SPECHASH_UNLOCK();
1387 }
1388
1389 return (NULLVP);
1390 }
1391
1392 if (sin) {
1393 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1394 }
1395
1396 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1397 return(vp);
1398
1399 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1400
1401 return (vp);
1402 }
1403
1404
1405 /*
1406 * Get a reference on a particular vnode and lock it if requested.
1407 * If the vnode was on the inactive list, remove it from the list.
1408 * If the vnode was on the free list, remove it from the list and
1409 * move it to inactive list as needed.
1410 * The vnode lock bit is set if the vnode is being eliminated in
1411 * vgone. The process is awakened when the transition is completed,
1412 * and an error returned to indicate that the vnode is no longer
1413 * usable (possibly having been changed to a new file system type).
1414 */
1415 int
1416 vget_internal(vnode_t vp, int vid, int vflags)
1417 {
1418 int error = 0;
1419 int vpid;
1420
1421 vnode_lock_spin(vp);
1422
1423 if (vflags & VNODE_WITHID)
1424 vpid = vid;
1425 else
1426 vpid = vp->v_id; // save off the original v_id
1427
1428 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1429 /*
1430 * vnode to be returned only if it has writers opened
1431 */
1432 error = EINVAL;
1433 else
1434 error = vnode_getiocount(vp, vpid, vflags);
1435
1436 vnode_unlock(vp);
1437
1438 return (error);
1439 }
1440
1441 /*
1442 * Returns: 0 Success
1443 * ENOENT No such file or directory [terminating]
1444 */
1445 int
1446 vnode_ref(vnode_t vp)
1447 {
1448
1449 return (vnode_ref_ext(vp, 0));
1450 }
1451
1452 /*
1453 * Returns: 0 Success
1454 * ENOENT No such file or directory [terminating]
1455 */
1456 int
1457 vnode_ref_ext(vnode_t vp, int fmode)
1458 {
1459 int error = 0;
1460
1461 vnode_lock_spin(vp);
1462
1463 /*
1464 * once all the current call sites have been fixed to insure they have
1465 * taken an iocount, we can toughen this assert up and insist that the
1466 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1467 */
1468 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1469 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1470
1471 /*
1472 * if you are the owner of drain/termination, can acquire usecount
1473 */
1474 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1475 if (vp->v_owner != current_thread()) {
1476 error = ENOENT;
1477 goto out;
1478 }
1479 }
1480 vp->v_usecount++;
1481
1482 if (fmode & FWRITE) {
1483 if (++vp->v_writecount <= 0)
1484 panic("vnode_ref_ext: v_writecount");
1485 }
1486 if (fmode & O_EVTONLY) {
1487 if (++vp->v_kusecount <= 0)
1488 panic("vnode_ref_ext: v_kusecount");
1489 }
1490 if (vp->v_flag & VRAGE) {
1491 struct uthread *ut;
1492
1493 ut = get_bsdthread_info(current_thread());
1494
1495 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1496 !(ut->uu_flag & UT_RAGE_VNODES)) {
1497 /*
1498 * a 'normal' process accessed this vnode
1499 * so make sure its no longer marked
1500 * for rapid aging... also, make sure
1501 * it gets removed from the rage list...
1502 * when v_usecount drops back to 0, it
1503 * will be put back on the real free list
1504 */
1505 vp->v_flag &= ~VRAGE;
1506 vp->v_references = 0;
1507 vnode_list_remove(vp);
1508 }
1509 }
1510 out:
1511 vnode_unlock(vp);
1512
1513 return (error);
1514 }
1515
1516
1517 /*
1518 * put the vnode on appropriate free list.
1519 * called with vnode LOCKED
1520 */
1521 static void
1522 vnode_list_add(vnode_t vp)
1523 {
1524 #if DIAGNOSTIC
1525 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1526 #endif
1527 /*
1528 * if it is already on a list or non zero references return
1529 */
1530 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1531 return;
1532
1533 vnode_list_lock();
1534
1535 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1536 /*
1537 * add the new guy to the appropriate end of the RAGE list
1538 */
1539 if ((vp->v_flag & VAGE))
1540 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1541 else
1542 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1543
1544 vp->v_listflag |= VLIST_RAGE;
1545 ragevnodes++;
1546
1547 /*
1548 * reset the timestamp for the last inserted vp on the RAGE
1549 * queue to let new_vnode know that its not ok to start stealing
1550 * from this list... as long as we're actively adding to this list
1551 * we'll push out the vnodes we want to donate to the real free list
1552 * once we stop pushing, we'll let some time elapse before we start
1553 * stealing them in the new_vnode routine
1554 */
1555 microuptime(&rage_tv);
1556 } else {
1557 /*
1558 * if VL_DEAD, insert it at head of the dead list
1559 * else insert at tail of LRU list or at head if VAGE is set
1560 */
1561 if ( (vp->v_lflag & VL_DEAD)) {
1562 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1563 vp->v_listflag |= VLIST_DEAD;
1564 deadvnodes++;
1565 } else if ((vp->v_flag & VAGE)) {
1566 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1567 vp->v_flag &= ~VAGE;
1568 freevnodes++;
1569 } else {
1570 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1571 freevnodes++;
1572 }
1573 }
1574 vnode_list_unlock();
1575 }
1576
1577
1578 /*
1579 * remove the vnode from appropriate free list.
1580 * called with vnode LOCKED and
1581 * the list lock held
1582 */
1583 static void
1584 vnode_list_remove_locked(vnode_t vp)
1585 {
1586 if (VONLIST(vp)) {
1587 /*
1588 * the v_listflag field is
1589 * protected by the vnode_list_lock
1590 */
1591 if (vp->v_listflag & VLIST_RAGE)
1592 VREMRAGE("vnode_list_remove", vp);
1593 else if (vp->v_listflag & VLIST_DEAD)
1594 VREMDEAD("vnode_list_remove", vp);
1595 else
1596 VREMFREE("vnode_list_remove", vp);
1597 }
1598 }
1599
1600
1601 /*
1602 * remove the vnode from appropriate free list.
1603 * called with vnode LOCKED
1604 */
1605 static void
1606 vnode_list_remove(vnode_t vp)
1607 {
1608 #if DIAGNOSTIC
1609 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1610 #endif
1611 /*
1612 * we want to avoid taking the list lock
1613 * in the case where we're not on the free
1614 * list... this will be true for most
1615 * directories and any currently in use files
1616 *
1617 * we're guaranteed that we can't go from
1618 * the not-on-list state to the on-list
1619 * state since we hold the vnode lock...
1620 * all calls to vnode_list_add are done
1621 * under the vnode lock... so we can
1622 * check for that condition (the prevelant one)
1623 * without taking the list lock
1624 */
1625 if (VONLIST(vp)) {
1626 vnode_list_lock();
1627 /*
1628 * however, we're not guaranteed that
1629 * we won't go from the on-list state
1630 * to the not-on-list state until we
1631 * hold the vnode_list_lock... this
1632 * is due to "new_vnode" removing vnodes
1633 * from the free list uder the list_lock
1634 * w/o the vnode lock... so we need to
1635 * check again whether we're currently
1636 * on the free list
1637 */
1638 vnode_list_remove_locked(vp);
1639
1640 vnode_list_unlock();
1641 }
1642 }
1643
1644
1645 void
1646 vnode_rele(vnode_t vp)
1647 {
1648 vnode_rele_internal(vp, 0, 0, 0);
1649 }
1650
1651
1652 void
1653 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1654 {
1655 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1656 }
1657
1658
1659 void
1660 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1661 {
1662 if ( !locked)
1663 vnode_lock_spin(vp);
1664 #if DIAGNOSTIC
1665 else
1666 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1667 #endif
1668 if (--vp->v_usecount < 0)
1669 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1670
1671 if (fmode & FWRITE) {
1672 if (--vp->v_writecount < 0)
1673 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1674 }
1675 if (fmode & O_EVTONLY) {
1676 if (--vp->v_kusecount < 0)
1677 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1678 }
1679 if (vp->v_kusecount > vp->v_usecount)
1680 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1681
1682 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1683 /*
1684 * vnode is still busy... if we're the last
1685 * usecount, mark for a future call to VNOP_INACTIVE
1686 * when the iocount finally drops to 0
1687 */
1688 if (vp->v_usecount == 0) {
1689 vp->v_lflag |= VL_NEEDINACTIVE;
1690 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1691 }
1692 if ( !locked)
1693 vnode_unlock(vp);
1694 return;
1695 }
1696 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1697
1698 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1699 /*
1700 * vnode is being cleaned, or
1701 * we've requested that we don't reenter
1702 * the filesystem on this release... in
1703 * this case, we'll mark the vnode aged
1704 * if it's been marked for termination
1705 */
1706 if (dont_reenter) {
1707 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1708 vp->v_lflag |= VL_NEEDINACTIVE;
1709 vp->v_flag |= VAGE;
1710 }
1711 vnode_list_add(vp);
1712 if ( !locked)
1713 vnode_unlock(vp);
1714 return;
1715 }
1716 /*
1717 * at this point both the iocount and usecount
1718 * are zero
1719 * pick up an iocount so that we can call
1720 * VNOP_INACTIVE with the vnode lock unheld
1721 */
1722 vp->v_iocount++;
1723 #ifdef JOE_DEBUG
1724 record_vp(vp, 1);
1725 #endif
1726 vp->v_lflag &= ~VL_NEEDINACTIVE;
1727 vnode_unlock(vp);
1728
1729 VNOP_INACTIVE(vp, vfs_context_current());
1730
1731 vnode_lock_spin(vp);
1732 /*
1733 * because we dropped the vnode lock to call VNOP_INACTIVE
1734 * the state of the vnode may have changed... we may have
1735 * picked up an iocount, usecount or the MARKTERM may have
1736 * been set... we need to reevaluate the reference counts
1737 * to determine if we can call vnode_reclaim_internal at
1738 * this point... if the reference counts are up, we'll pick
1739 * up the MARKTERM state when they get subsequently dropped
1740 */
1741 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1742 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1743 struct uthread *ut;
1744
1745 ut = get_bsdthread_info(current_thread());
1746
1747 if (ut->uu_defer_reclaims) {
1748 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1749 ut->uu_vreclaims = vp;
1750 goto defer_reclaim;
1751 }
1752 vnode_lock_convert(vp);
1753 vnode_reclaim_internal(vp, 1, 1, 0);
1754 }
1755 vnode_dropiocount(vp);
1756 vnode_list_add(vp);
1757 defer_reclaim:
1758 if ( !locked)
1759 vnode_unlock(vp);
1760 return;
1761 }
1762
1763 /*
1764 * Remove any vnodes in the vnode table belonging to mount point mp.
1765 *
1766 * If MNT_NOFORCE is specified, there should not be any active ones,
1767 * return error if any are found (nb: this is a user error, not a
1768 * system error). If MNT_FORCE is specified, detach any active vnodes
1769 * that are found.
1770 */
1771 #if DIAGNOSTIC
1772 int busyprt = 0; /* print out busy vnodes */
1773 #if 0
1774 struct ctldebug debug1 = { "busyprt", &busyprt };
1775 #endif /* 0 */
1776 #endif
1777
1778 int
1779 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1780 {
1781 struct vnode *vp;
1782 int busy = 0;
1783 int reclaimed = 0;
1784 int retval;
1785 unsigned int vid;
1786
1787 mount_lock(mp);
1788 vnode_iterate_setup(mp);
1789 /*
1790 * On regular unmounts(not forced) do a
1791 * quick check for vnodes to be in use. This
1792 * preserves the caching of vnodes. automounter
1793 * tries unmounting every so often to see whether
1794 * it is still busy or not.
1795 */
1796 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1797 if (vnode_umount_preflight(mp, skipvp, flags)) {
1798 vnode_iterate_clear(mp);
1799 mount_unlock(mp);
1800 return(EBUSY);
1801 }
1802 }
1803 loop:
1804 /* it is returns 0 then there is nothing to do */
1805 retval = vnode_iterate_prepare(mp);
1806
1807 if (retval == 0) {
1808 vnode_iterate_clear(mp);
1809 mount_unlock(mp);
1810 return(retval);
1811 }
1812
1813 /* iterate over all the vnodes */
1814 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1815
1816 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1817 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1818 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1819
1820 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1821 continue;
1822 }
1823 vid = vp->v_id;
1824 mount_unlock(mp);
1825
1826 vnode_lock_spin(vp);
1827
1828 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1829 vnode_unlock(vp);
1830 mount_lock(mp);
1831 continue;
1832 }
1833
1834 /*
1835 * If requested, skip over vnodes marked VSYSTEM.
1836 * Skip over all vnodes marked VNOFLUSH.
1837 */
1838 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1839 (vp->v_flag & VNOFLUSH))) {
1840 vnode_unlock(vp);
1841 mount_lock(mp);
1842 continue;
1843 }
1844 /*
1845 * If requested, skip over vnodes marked VSWAP.
1846 */
1847 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1848 vnode_unlock(vp);
1849 mount_lock(mp);
1850 continue;
1851 }
1852 /*
1853 * If requested, skip over vnodes marked VROOT.
1854 */
1855 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1856 vnode_unlock(vp);
1857 mount_lock(mp);
1858 continue;
1859 }
1860 /*
1861 * If WRITECLOSE is set, only flush out regular file
1862 * vnodes open for writing.
1863 */
1864 if ((flags & WRITECLOSE) &&
1865 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1866 vnode_unlock(vp);
1867 mount_lock(mp);
1868 continue;
1869 }
1870 /*
1871 * If the real usecount is 0, all we need to do is clear
1872 * out the vnode data structures and we are done.
1873 */
1874 if (((vp->v_usecount == 0) ||
1875 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1876
1877 vnode_lock_convert(vp);
1878 vp->v_iocount++; /* so that drain waits for * other iocounts */
1879 #ifdef JOE_DEBUG
1880 record_vp(vp, 1);
1881 #endif
1882 vnode_reclaim_internal(vp, 1, 1, 0);
1883 vnode_dropiocount(vp);
1884 vnode_list_add(vp);
1885 vnode_unlock(vp);
1886
1887 reclaimed++;
1888 mount_lock(mp);
1889 continue;
1890 }
1891 /*
1892 * If FORCECLOSE is set, forcibly close the vnode.
1893 * For block or character devices, revert to an
1894 * anonymous device. For all other files, just kill them.
1895 */
1896 if (flags & FORCECLOSE) {
1897 vnode_lock_convert(vp);
1898
1899 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1900 vp->v_iocount++; /* so that drain waits * for other iocounts */
1901 #ifdef JOE_DEBUG
1902 record_vp(vp, 1);
1903 #endif
1904 vnode_reclaim_internal(vp, 1, 1, 0);
1905 vnode_dropiocount(vp);
1906 vnode_list_add(vp);
1907 vnode_unlock(vp);
1908 } else {
1909 vclean(vp, 0);
1910 vp->v_lflag &= ~VL_DEAD;
1911 vp->v_op = spec_vnodeop_p;
1912 vp->v_flag |= VDEVFLUSH;
1913 vnode_unlock(vp);
1914 }
1915 mount_lock(mp);
1916 continue;
1917 }
1918 #if DIAGNOSTIC
1919 if (busyprt)
1920 vprint("vflush: busy vnode", vp);
1921 #endif
1922 vnode_unlock(vp);
1923 mount_lock(mp);
1924 busy++;
1925 }
1926
1927 /* At this point the worker queue is completed */
1928 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1929 busy = 0;
1930 reclaimed = 0;
1931 (void)vnode_iterate_reloadq(mp);
1932 /* returned with mount lock held */
1933 goto loop;
1934 }
1935
1936 /* if new vnodes were created in between retry the reclaim */
1937 if ( vnode_iterate_reloadq(mp) != 0) {
1938 if (!(busy && ((flags & FORCECLOSE)==0)))
1939 goto loop;
1940 }
1941 vnode_iterate_clear(mp);
1942 mount_unlock(mp);
1943
1944 if (busy && ((flags & FORCECLOSE)==0))
1945 return (EBUSY);
1946 return (0);
1947 }
1948
1949 long num_recycledvnodes = 0;
1950 /*
1951 * Disassociate the underlying file system from a vnode.
1952 * The vnode lock is held on entry.
1953 */
1954 static void
1955 vclean(vnode_t vp, int flags)
1956 {
1957 vfs_context_t ctx = vfs_context_current();
1958 int active;
1959 int need_inactive;
1960 int already_terminating;
1961 int clflags = 0;
1962 #if NAMEDSTREAMS
1963 int is_namedstream;
1964 #endif
1965
1966 /*
1967 * Check to see if the vnode is in use.
1968 * If so we have to reference it before we clean it out
1969 * so that its count cannot fall to zero and generate a
1970 * race against ourselves to recycle it.
1971 */
1972 active = vp->v_usecount;
1973
1974 /*
1975 * just in case we missed sending a needed
1976 * VNOP_INACTIVE, we'll do it now
1977 */
1978 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
1979
1980 vp->v_lflag &= ~VL_NEEDINACTIVE;
1981
1982 /*
1983 * Prevent the vnode from being recycled or
1984 * brought into use while we clean it out.
1985 */
1986 already_terminating = (vp->v_lflag & VL_TERMINATE);
1987
1988 vp->v_lflag |= VL_TERMINATE;
1989
1990 /*
1991 * remove the vnode from any mount list
1992 * it might be on...
1993 */
1994 insmntque(vp, (struct mount *)0);
1995
1996 #if NAMEDSTREAMS
1997 is_namedstream = vnode_isnamedstream(vp);
1998 #endif
1999
2000 vnode_unlock(vp);
2001
2002 OSAddAtomicLong(1, &num_recycledvnodes);
2003
2004 if (flags & DOCLOSE)
2005 clflags |= IO_NDELAY;
2006 if (flags & REVOKEALL)
2007 clflags |= IO_REVOKE;
2008
2009 if (active && (flags & DOCLOSE))
2010 VNOP_CLOSE(vp, clflags, ctx);
2011
2012 /*
2013 * Clean out any buffers associated with the vnode.
2014 */
2015 if (flags & DOCLOSE) {
2016 #if NFSCLIENT
2017 if (vp->v_tag == VT_NFS)
2018 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2019 else
2020 #endif
2021 {
2022 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2023 buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2024 }
2025 if (UBCINFOEXISTS(vp))
2026 /*
2027 * Clean the pages in VM.
2028 */
2029 (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL);
2030 }
2031 if (active || need_inactive)
2032 VNOP_INACTIVE(vp, ctx);
2033
2034 #if NAMEDSTREAMS
2035 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2036 vnode_t pvp = vp->v_parent;
2037
2038 /* Delete the shadow stream file before we reclaim its vnode */
2039 if (vnode_isshadow(vp)) {
2040 vnode_relenamedstream(pvp, vp, ctx);
2041 }
2042
2043 /*
2044 * Because vclean calls VNOP_INACTIVE prior to calling vnode_relenamedstream, we may not have
2045 * torn down and/or deleted the shadow file yet. On HFS, if the shadow file is sufficiently large
2046 * and occupies a large number of extents, the deletion will be deferred until VNOP_INACTIVE
2047 * and the file treated like an open-unlinked. To rectify this, call VNOP_INACTIVE again
2048 * explicitly to force its removal.
2049 */
2050 if (vnode_isshadow(vp)) {
2051 VNOP_INACTIVE(vp, ctx);
2052 }
2053
2054 /*
2055 * No more streams associated with the parent. We
2056 * have a ref on it, so its identity is stable.
2057 * If the parent is on an opaque volume, then we need to know
2058 * whether it has associated named streams.
2059 */
2060 if (vfs_authopaque(pvp->v_mount)) {
2061 vnode_lock_spin(pvp);
2062 pvp->v_lflag &= ~VL_HASSTREAMS;
2063 vnode_unlock(pvp);
2064 }
2065 }
2066 #endif
2067
2068 /*
2069 * Destroy ubc named reference
2070 * cluster_release is done on this path
2071 * along with dropping the reference on the ucred
2072 */
2073 ubc_destroy_named(vp);
2074
2075 /*
2076 * Reclaim the vnode.
2077 */
2078 if (VNOP_RECLAIM(vp, ctx))
2079 panic("vclean: cannot reclaim");
2080
2081 // make sure the name & parent ptrs get cleaned out!
2082 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2083
2084 vnode_lock(vp);
2085
2086 vp->v_mount = dead_mountp;
2087 vp->v_op = dead_vnodeop_p;
2088 vp->v_tag = VT_NON;
2089 vp->v_data = NULL;
2090
2091 vp->v_lflag |= VL_DEAD;
2092
2093 if (already_terminating == 0) {
2094 vp->v_lflag &= ~VL_TERMINATE;
2095 /*
2096 * Done with purge, notify sleepers of the grim news.
2097 */
2098 if (vp->v_lflag & VL_TERMWANT) {
2099 vp->v_lflag &= ~VL_TERMWANT;
2100 wakeup(&vp->v_lflag);
2101 }
2102 }
2103 }
2104
2105 /*
2106 * Eliminate all activity associated with the requested vnode
2107 * and with all vnodes aliased to the requested vnode.
2108 */
2109 int
2110 #if DIAGNOSTIC
2111 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2112 #else
2113 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2114 #endif
2115 {
2116 struct vnode *vq;
2117 int vid;
2118
2119 #if DIAGNOSTIC
2120 if ((flags & REVOKEALL) == 0)
2121 panic("vnop_revoke");
2122 #endif
2123
2124 if (vnode_isaliased(vp)) {
2125 /*
2126 * If a vgone (or vclean) is already in progress,
2127 * return an immediate error
2128 */
2129 if (vp->v_lflag & VL_TERMINATE)
2130 return(ENOENT);
2131
2132 /*
2133 * Ensure that vp will not be vgone'd while we
2134 * are eliminating its aliases.
2135 */
2136 SPECHASH_LOCK();
2137 while ((vp->v_specflags & SI_ALIASED)) {
2138 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2139 if (vq->v_rdev != vp->v_rdev ||
2140 vq->v_type != vp->v_type || vp == vq)
2141 continue;
2142 vid = vq->v_id;
2143 SPECHASH_UNLOCK();
2144 if (vnode_getwithvid(vq,vid)){
2145 SPECHASH_LOCK();
2146 break;
2147 }
2148 vnode_reclaim_internal(vq, 0, 1, 0);
2149 vnode_put(vq);
2150 SPECHASH_LOCK();
2151 break;
2152 }
2153 }
2154 SPECHASH_UNLOCK();
2155 }
2156 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2157
2158 return (0);
2159 }
2160
2161 /*
2162 * Recycle an unused vnode to the front of the free list.
2163 * Release the passed interlock if the vnode will be recycled.
2164 */
2165 int
2166 vnode_recycle(struct vnode *vp)
2167 {
2168 vnode_lock_spin(vp);
2169
2170 if (vp->v_iocount || vp->v_usecount) {
2171 vp->v_lflag |= VL_MARKTERM;
2172 vnode_unlock(vp);
2173 return(0);
2174 }
2175 vnode_lock_convert(vp);
2176 vnode_reclaim_internal(vp, 1, 0, 0);
2177
2178 vnode_unlock(vp);
2179
2180 return (1);
2181 }
2182
2183 static int
2184 vnode_reload(vnode_t vp)
2185 {
2186 vnode_lock_spin(vp);
2187
2188 if ((vp->v_iocount > 1) || vp->v_usecount) {
2189 vnode_unlock(vp);
2190 return(0);
2191 }
2192 if (vp->v_iocount <= 0)
2193 panic("vnode_reload with no iocount %d", vp->v_iocount);
2194
2195 /* mark for release when iocount is dopped */
2196 vp->v_lflag |= VL_MARKTERM;
2197 vnode_unlock(vp);
2198
2199 return (1);
2200 }
2201
2202
2203 static void
2204 vgone(vnode_t vp, int flags)
2205 {
2206 struct vnode *vq;
2207 struct vnode *vx;
2208
2209 /*
2210 * Clean out the filesystem specific data.
2211 * vclean also takes care of removing the
2212 * vnode from any mount list it might be on
2213 */
2214 vclean(vp, flags | DOCLOSE);
2215
2216 /*
2217 * If special device, remove it from special device alias list
2218 * if it is on one.
2219 */
2220 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2221 SPECHASH_LOCK();
2222 if (*vp->v_hashchain == vp) {
2223 *vp->v_hashchain = vp->v_specnext;
2224 } else {
2225 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2226 if (vq->v_specnext != vp)
2227 continue;
2228 vq->v_specnext = vp->v_specnext;
2229 break;
2230 }
2231 if (vq == NULL)
2232 panic("missing bdev");
2233 }
2234 if (vp->v_specflags & SI_ALIASED) {
2235 vx = NULL;
2236 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2237 if (vq->v_rdev != vp->v_rdev ||
2238 vq->v_type != vp->v_type)
2239 continue;
2240 if (vx)
2241 break;
2242 vx = vq;
2243 }
2244 if (vx == NULL)
2245 panic("missing alias");
2246 if (vq == NULL)
2247 vx->v_specflags &= ~SI_ALIASED;
2248 vp->v_specflags &= ~SI_ALIASED;
2249 }
2250 SPECHASH_UNLOCK();
2251 {
2252 struct specinfo *tmp = vp->v_specinfo;
2253 vp->v_specinfo = NULL;
2254 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2255 }
2256 }
2257 }
2258
2259 /*
2260 * Lookup a vnode by device number.
2261 */
2262 int
2263 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2264 {
2265 vnode_t vp;
2266 int rc = 0;
2267 int vid;
2268
2269 loop:
2270 SPECHASH_LOCK();
2271 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2272 if (dev != vp->v_rdev || type != vp->v_type)
2273 continue;
2274 vid = vp->v_id;
2275 SPECHASH_UNLOCK();
2276 if (vnode_getwithvid(vp,vid))
2277 goto loop;
2278 vnode_lock_spin(vp);
2279 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2280 vnode_unlock(vp);
2281 if ((*errorp = vfs_mountedon(vp)) != 0)
2282 rc = 1;
2283 } else
2284 vnode_unlock(vp);
2285 vnode_put(vp);
2286 return(rc);
2287 }
2288 SPECHASH_UNLOCK();
2289 return (0);
2290 }
2291
2292 /*
2293 * Calculate the total number of references to a special device.
2294 */
2295 int
2296 vcount(vnode_t vp)
2297 {
2298 vnode_t vq, vnext;
2299 int count;
2300 int vid;
2301
2302 loop:
2303 if (!vnode_isaliased(vp))
2304 return (vp->v_usecount - vp->v_kusecount);
2305 count = 0;
2306
2307 SPECHASH_LOCK();
2308 /*
2309 * Grab first vnode and its vid.
2310 */
2311 vq = *vp->v_hashchain;
2312 vid = vq ? vq->v_id : 0;
2313
2314 SPECHASH_UNLOCK();
2315
2316 while (vq) {
2317 /*
2318 * Attempt to get the vnode outside the SPECHASH lock.
2319 */
2320 if (vnode_getwithvid(vq, vid)) {
2321 goto loop;
2322 }
2323 vnode_lock(vq);
2324
2325 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2326 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2327 /*
2328 * Alias, but not in use, so flush it out.
2329 */
2330 vnode_reclaim_internal(vq, 1, 1, 0);
2331 vnode_put_locked(vq);
2332 vnode_unlock(vq);
2333 goto loop;
2334 }
2335 count += (vq->v_usecount - vq->v_kusecount);
2336 }
2337 vnode_unlock(vq);
2338
2339 SPECHASH_LOCK();
2340 /*
2341 * must do this with the reference still held on 'vq'
2342 * so that it can't be destroyed while we're poking
2343 * through v_specnext
2344 */
2345 vnext = vq->v_specnext;
2346 vid = vnext ? vnext->v_id : 0;
2347
2348 SPECHASH_UNLOCK();
2349
2350 vnode_put(vq);
2351
2352 vq = vnext;
2353 }
2354
2355 return (count);
2356 }
2357
2358 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2359
2360 /*
2361 * Print out a description of a vnode.
2362 */
2363 static const char *typename[] =
2364 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2365
2366 void
2367 vprint(const char *label, struct vnode *vp)
2368 {
2369 char sbuf[64];
2370
2371 if (label != NULL)
2372 printf("%s: ", label);
2373 printf("type %s, usecount %d, writecount %d",
2374 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2375 sbuf[0] = '\0';
2376 if (vp->v_flag & VROOT)
2377 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2378 if (vp->v_flag & VTEXT)
2379 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2380 if (vp->v_flag & VSYSTEM)
2381 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2382 if (vp->v_flag & VNOFLUSH)
2383 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2384 if (vp->v_flag & VBWAIT)
2385 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2386 if (vnode_isaliased(vp))
2387 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2388 if (sbuf[0] != '\0')
2389 printf(" flags (%s)", &sbuf[1]);
2390 }
2391
2392
2393 int
2394 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2395 {
2396 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2397 }
2398
2399 int
2400 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2401 {
2402 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2403 }
2404
2405 int
2406 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2407 {
2408 return ubc_cs_getcdhash(vp, offset, cdhash);
2409 }
2410
2411
2412 static char *extension_table=NULL;
2413 static int nexts;
2414 static int max_ext_width;
2415
2416 static int
2417 extension_cmp(const void *a, const void *b)
2418 {
2419 return (strlen((const char *)a) - strlen((const char *)b));
2420 }
2421
2422
2423 //
2424 // This is the api LaunchServices uses to inform the kernel
2425 // the list of package extensions to ignore.
2426 //
2427 // Internally we keep the list sorted by the length of the
2428 // the extension (from longest to shortest). We sort the
2429 // list of extensions so that we can speed up our searches
2430 // when comparing file names -- we only compare extensions
2431 // that could possibly fit into the file name, not all of
2432 // them (i.e. a short 8 character name can't have an 8
2433 // character extension).
2434 //
2435 extern lck_mtx_t *pkg_extensions_lck;
2436
2437 __private_extern__ int
2438 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2439 {
2440 char *new_exts, *old_exts;
2441 int error;
2442
2443 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2444 return EINVAL;
2445 }
2446
2447
2448 // allocate one byte extra so we can guarantee null termination
2449 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2450 if (new_exts == NULL) {
2451 return ENOMEM;
2452 }
2453
2454 error = copyin(data, new_exts, nentries * maxwidth);
2455 if (error) {
2456 FREE(new_exts, M_TEMP);
2457 return error;
2458 }
2459
2460 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2461
2462 qsort(new_exts, nentries, maxwidth, extension_cmp);
2463
2464 lck_mtx_lock(pkg_extensions_lck);
2465
2466 old_exts = extension_table;
2467 extension_table = new_exts;
2468 nexts = nentries;
2469 max_ext_width = maxwidth;
2470
2471 lck_mtx_unlock(pkg_extensions_lck);
2472
2473 if (old_exts) {
2474 FREE(old_exts, M_TEMP);
2475 }
2476
2477 return 0;
2478 }
2479
2480
2481 __private_extern__ int
2482 is_package_name(const char *name, int len)
2483 {
2484 int i, extlen;
2485 const char *ptr, *name_ext;
2486
2487 if (len <= 3) {
2488 return 0;
2489 }
2490
2491 name_ext = NULL;
2492 for(ptr=name; *ptr != '\0'; ptr++) {
2493 if (*ptr == '.') {
2494 name_ext = ptr;
2495 }
2496 }
2497
2498 // if there is no "." extension, it can't match
2499 if (name_ext == NULL) {
2500 return 0;
2501 }
2502
2503 // advance over the "."
2504 name_ext++;
2505
2506 lck_mtx_lock(pkg_extensions_lck);
2507
2508 // now iterate over all the extensions to see if any match
2509 ptr = &extension_table[0];
2510 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2511 extlen = strlen(ptr);
2512 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2513 // aha, a match!
2514 lck_mtx_unlock(pkg_extensions_lck);
2515 return 1;
2516 }
2517 }
2518
2519 lck_mtx_unlock(pkg_extensions_lck);
2520
2521 // if we get here, no extension matched
2522 return 0;
2523 }
2524
2525 int
2526 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2527 {
2528 char *ptr, *end;
2529 int comp=0;
2530
2531 *component = -1;
2532 if (*path != '/') {
2533 return EINVAL;
2534 }
2535
2536 end = path + 1;
2537 while(end < path + pathlen && *end != '\0') {
2538 while(end < path + pathlen && *end == '/' && *end != '\0') {
2539 end++;
2540 }
2541
2542 ptr = end;
2543
2544 while(end < path + pathlen && *end != '/' && *end != '\0') {
2545 end++;
2546 }
2547
2548 if (end > path + pathlen) {
2549 // hmm, string wasn't null terminated
2550 return EINVAL;
2551 }
2552
2553 *end = '\0';
2554 if (is_package_name(ptr, end - ptr)) {
2555 *component = comp;
2556 break;
2557 }
2558
2559 end++;
2560 comp++;
2561 }
2562
2563 return 0;
2564 }
2565
2566 /*
2567 * Determine if a name is inappropriate for a searchfs query.
2568 * This list consists of /System currently.
2569 */
2570
2571 int vn_searchfs_inappropriate_name(const char *name, int len) {
2572 const char *bad_names[] = { "System" };
2573 int bad_len[] = { 6 };
2574 int i;
2575
2576 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2577 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2578 return 1;
2579 }
2580 }
2581
2582 // if we get here, no name matched
2583 return 0;
2584 }
2585
2586 /*
2587 * Top level filesystem related information gathering.
2588 */
2589 extern unsigned int vfs_nummntops;
2590
2591 int
2592 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2593 user_addr_t newp, size_t newlen, proc_t p)
2594 {
2595 struct vfstable *vfsp;
2596 int *username;
2597 u_int usernamelen;
2598 int error;
2599 struct vfsconf vfsc;
2600
2601 /* All non VFS_GENERIC and in VFS_GENERIC,
2602 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2603 * needs to have root priv to have modifiers.
2604 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2605 */
2606 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2607 ((name[1] == VFS_MAXTYPENUM) ||
2608 (name[1] == VFS_CONF) ||
2609 (name[1] == VFS_SET_PACKAGE_EXTS)))
2610 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2611 return(error);
2612 }
2613 /*
2614 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2615 * is a VFS generic variable. So now we must check
2616 * namelen so we don't end up covering any UFS
2617 * variables (sinc UFS vfc_typenum is 1).
2618 *
2619 * It should have been:
2620 * name[0]: VFS_GENERIC
2621 * name[1]: VFS_NUMMNTOPS
2622 */
2623 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2624 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2625 }
2626
2627 /* all sysctl names at this level are at least name and field */
2628 if (namelen < 2)
2629 return (EISDIR); /* overloaded */
2630 if (name[0] != VFS_GENERIC) {
2631
2632 mount_list_lock();
2633 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2634 if (vfsp->vfc_typenum == name[0]) {
2635 vfsp->vfc_refcount++;
2636 break;
2637 }
2638 mount_list_unlock();
2639
2640 if (vfsp == NULL)
2641 return (ENOTSUP);
2642
2643 /* XXX current context proxy for proc p? */
2644 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2645 oldp, oldlenp, newp, newlen,
2646 vfs_context_current()));
2647
2648 mount_list_lock();
2649 vfsp->vfc_refcount--;
2650 mount_list_unlock();
2651 return error;
2652 }
2653 switch (name[1]) {
2654 case VFS_MAXTYPENUM:
2655 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2656 case VFS_CONF:
2657 if (namelen < 3)
2658 return (ENOTDIR); /* overloaded */
2659
2660 mount_list_lock();
2661 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2662 if (vfsp->vfc_typenum == name[2])
2663 break;
2664
2665 if (vfsp == NULL) {
2666 mount_list_unlock();
2667 return (ENOTSUP);
2668 }
2669
2670 vfsc.vfc_reserved1 = 0;
2671 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2672 vfsc.vfc_typenum = vfsp->vfc_typenum;
2673 vfsc.vfc_refcount = vfsp->vfc_refcount;
2674 vfsc.vfc_flags = vfsp->vfc_flags;
2675 vfsc.vfc_reserved2 = 0;
2676 vfsc.vfc_reserved3 = 0;
2677
2678 mount_list_unlock();
2679 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2680 sizeof(struct vfsconf)));
2681
2682 case VFS_SET_PACKAGE_EXTS:
2683 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2684 }
2685 /*
2686 * We need to get back into the general MIB, so we need to re-prepend
2687 * CTL_VFS to our name and try userland_sysctl().
2688 */
2689 usernamelen = namelen + 1;
2690 MALLOC(username, int *, usernamelen * sizeof(*username),
2691 M_TEMP, M_WAITOK);
2692 bcopy(name, username + 1, namelen * sizeof(*name));
2693 username[0] = CTL_VFS;
2694 error = userland_sysctl(p, username, usernamelen, oldp,
2695 oldlenp, newp, newlen, oldlenp);
2696 FREE(username, M_TEMP);
2697 return (error);
2698 }
2699
2700 /*
2701 * Dump vnode list (via sysctl) - defunct
2702 * use "pstat" instead
2703 */
2704 /* ARGSUSED */
2705 int
2706 sysctl_vnode
2707 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2708 {
2709 return(EINVAL);
2710 }
2711
2712 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2713 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED,
2714 0, 0, sysctl_vnode, "S,", "");
2715
2716
2717 /*
2718 * Check to see if a filesystem is mounted on a block device.
2719 */
2720 int
2721 vfs_mountedon(struct vnode *vp)
2722 {
2723 struct vnode *vq;
2724 int error = 0;
2725
2726 SPECHASH_LOCK();
2727 if (vp->v_specflags & SI_MOUNTEDON) {
2728 error = EBUSY;
2729 goto out;
2730 }
2731 if (vp->v_specflags & SI_ALIASED) {
2732 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2733 if (vq->v_rdev != vp->v_rdev ||
2734 vq->v_type != vp->v_type)
2735 continue;
2736 if (vq->v_specflags & SI_MOUNTEDON) {
2737 error = EBUSY;
2738 break;
2739 }
2740 }
2741 }
2742 out:
2743 SPECHASH_UNLOCK();
2744 return (error);
2745 }
2746
2747 /*
2748 * Unmount all filesystems. The list is traversed in reverse order
2749 * of mounting to avoid dependencies.
2750 */
2751 __private_extern__ void
2752 vfs_unmountall(void)
2753 {
2754 struct mount *mp;
2755 int error;
2756
2757 /*
2758 * Since this only runs when rebooting, it is not interlocked.
2759 */
2760 mount_list_lock();
2761 while(!TAILQ_EMPTY(&mountlist)) {
2762 mp = TAILQ_LAST(&mountlist, mntlist);
2763 mount_list_unlock();
2764 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2765 if ((error != 0) && (error != EBUSY)) {
2766 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2767 printf("%d)\n", error);
2768 mount_list_lock();
2769 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2770 continue;
2771 } else if (error == EBUSY) {
2772 /* If EBUSY is returned, the unmount was already in progress */
2773 printf("unmount of %p failed (", mp);
2774 printf("BUSY)\n");
2775 }
2776 mount_list_lock();
2777 }
2778 mount_list_unlock();
2779 }
2780
2781
2782 /*
2783 * This routine is called from vnode_pager_deallocate out of the VM
2784 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2785 * on a vnode that has a UBCINFO
2786 */
2787 __private_extern__ void
2788 vnode_pager_vrele(vnode_t vp)
2789 {
2790 struct ubc_info *uip;
2791
2792 vnode_lock_spin(vp);
2793
2794 vp->v_lflag &= ~VNAMED_UBC;
2795
2796 uip = vp->v_ubcinfo;
2797 vp->v_ubcinfo = UBC_INFO_NULL;
2798
2799 vnode_unlock(vp);
2800
2801 ubc_info_deallocate(uip);
2802 }
2803
2804
2805 #include <sys/disk.h>
2806
2807 errno_t
2808 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2809 {
2810 int error;
2811 off_t readblockcnt = 0;
2812 off_t writeblockcnt = 0;
2813 off_t readmaxcnt = 0;
2814 off_t writemaxcnt = 0;
2815 off_t readsegcnt = 0;
2816 off_t writesegcnt = 0;
2817 off_t readsegsize = 0;
2818 off_t writesegsize = 0;
2819 off_t alignment = 0;
2820 off_t ioqueue_depth = 0;
2821 u_int32_t blksize;
2822 u_int64_t temp;
2823 u_int32_t features;
2824 vfs_context_t ctx = vfs_context_current();
2825 int isssd = 0;
2826 int isvirtual = 0;
2827 /*
2828 * determine if this mount point exists on the same device as the root
2829 * partition... if so, then it comes under the hard throttle control
2830 */
2831 int thisunit = -1;
2832 static int rootunit = -1;
2833
2834 if (rootunit == -1) {
2835 if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx))
2836 rootunit = -1;
2837 else if (rootvp == devvp)
2838 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2839 }
2840 if (devvp != rootvp && rootunit != -1) {
2841 if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) {
2842 if (thisunit == rootunit)
2843 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2844 }
2845 }
2846 /*
2847 * force the spec device to re-cache
2848 * the underlying block size in case
2849 * the filesystem overrode the initial value
2850 */
2851 set_fsblocksize(devvp);
2852
2853
2854 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2855 (caddr_t)&blksize, 0, ctx)))
2856 return (error);
2857
2858 mp->mnt_devblocksize = blksize;
2859
2860 /*
2861 * set the maximum possible I/O size
2862 * this may get clipped to a smaller value
2863 * based on which constraints are being advertised
2864 * and if those advertised constraints result in a smaller
2865 * limit for a given I/O
2866 */
2867 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2868 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2869
2870 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2871 if (isvirtual)
2872 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2873 }
2874 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
2875 if (isssd)
2876 mp->mnt_kern_flag |= MNTK_SSD;
2877 }
2878
2879 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2880 (caddr_t)&features, 0, ctx)))
2881 return (error);
2882
2883 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2884 (caddr_t)&readblockcnt, 0, ctx)))
2885 return (error);
2886
2887 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2888 (caddr_t)&writeblockcnt, 0, ctx)))
2889 return (error);
2890
2891 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2892 (caddr_t)&readmaxcnt, 0, ctx)))
2893 return (error);
2894
2895 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2896 (caddr_t)&writemaxcnt, 0, ctx)))
2897 return (error);
2898
2899 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2900 (caddr_t)&readsegcnt, 0, ctx)))
2901 return (error);
2902
2903 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2904 (caddr_t)&writesegcnt, 0, ctx)))
2905 return (error);
2906
2907 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2908 (caddr_t)&readsegsize, 0, ctx)))
2909 return (error);
2910
2911 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2912 (caddr_t)&writesegsize, 0, ctx)))
2913 return (error);
2914
2915 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2916 (caddr_t)&alignment, 0, ctx)))
2917 return (error);
2918
2919 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2920 (caddr_t)&ioqueue_depth, 0, ctx)))
2921 return (error);
2922
2923 if (readmaxcnt)
2924 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2925
2926 if (readblockcnt) {
2927 temp = readblockcnt * blksize;
2928 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2929
2930 if (temp < mp->mnt_maxreadcnt)
2931 mp->mnt_maxreadcnt = (u_int32_t)temp;
2932 }
2933
2934 if (writemaxcnt)
2935 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2936
2937 if (writeblockcnt) {
2938 temp = writeblockcnt * blksize;
2939 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2940
2941 if (temp < mp->mnt_maxwritecnt)
2942 mp->mnt_maxwritecnt = (u_int32_t)temp;
2943 }
2944
2945 if (readsegcnt) {
2946 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2947 } else {
2948 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2949
2950 if (temp > UINT16_MAX)
2951 temp = UINT16_MAX;
2952 }
2953 mp->mnt_segreadcnt = (u_int16_t)temp;
2954
2955 if (writesegcnt) {
2956 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
2957 } else {
2958 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
2959
2960 if (temp > UINT16_MAX)
2961 temp = UINT16_MAX;
2962 }
2963 mp->mnt_segwritecnt = (u_int16_t)temp;
2964
2965 if (readsegsize)
2966 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
2967 else
2968 temp = mp->mnt_maxreadcnt;
2969 mp->mnt_maxsegreadsize = (u_int32_t)temp;
2970
2971 if (writesegsize)
2972 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
2973 else
2974 temp = mp->mnt_maxwritecnt;
2975 mp->mnt_maxsegwritesize = (u_int32_t)temp;
2976
2977 if (alignment)
2978 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
2979 else
2980 temp = 0;
2981 mp->mnt_alignmentmask = temp;
2982
2983
2984 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
2985 temp = ioqueue_depth;
2986 else
2987 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
2988
2989 mp->mnt_ioqueue_depth = temp;
2990 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
2991
2992 if (mp->mnt_ioscale > 1)
2993 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
2994
2995 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
2996 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
2997
2998 return (error);
2999 }
3000
3001 static struct klist fs_klist;
3002 lck_grp_t *fs_klist_lck_grp;
3003 lck_mtx_t *fs_klist_lock;
3004
3005 void
3006 vfs_event_init(void)
3007 {
3008
3009 klist_init(&fs_klist);
3010 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
3011 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
3012 }
3013
3014 void
3015 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3016 {
3017 lck_mtx_lock(fs_klist_lock);
3018 KNOTE(&fs_klist, event);
3019 lck_mtx_unlock(fs_klist_lock);
3020 }
3021
3022 /*
3023 * return the number of mounted filesystems.
3024 */
3025 static int
3026 sysctl_vfs_getvfscnt(void)
3027 {
3028 return(mount_getvfscnt());
3029 }
3030
3031
3032 static int
3033 mount_getvfscnt(void)
3034 {
3035 int ret;
3036
3037 mount_list_lock();
3038 ret = nummounts;
3039 mount_list_unlock();
3040 return (ret);
3041
3042 }
3043
3044
3045
3046 static int
3047 mount_fillfsids(fsid_t *fsidlst, int count)
3048 {
3049 struct mount *mp;
3050 int actual=0;
3051
3052 actual = 0;
3053 mount_list_lock();
3054 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3055 if (actual <= count) {
3056 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3057 actual++;
3058 }
3059 }
3060 mount_list_unlock();
3061 return (actual);
3062
3063 }
3064
3065 /*
3066 * fill in the array of fsid_t's up to a max of 'count', the actual
3067 * number filled in will be set in '*actual'. If there are more fsid_t's
3068 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3069 * have the actual count.
3070 * having *actual filled out even in the error case is depended upon.
3071 */
3072 static int
3073 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3074 {
3075 struct mount *mp;
3076
3077 *actual = 0;
3078 mount_list_lock();
3079 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3080 (*actual)++;
3081 if (*actual <= count)
3082 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3083 }
3084 mount_list_unlock();
3085 return (*actual <= count ? 0 : ENOMEM);
3086 }
3087
3088 static int
3089 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3090 __unused int arg2, struct sysctl_req *req)
3091 {
3092 int actual, error;
3093 size_t space;
3094 fsid_t *fsidlst;
3095
3096 /* This is a readonly node. */
3097 if (req->newptr != USER_ADDR_NULL)
3098 return (EPERM);
3099
3100 /* they are querying us so just return the space required. */
3101 if (req->oldptr == USER_ADDR_NULL) {
3102 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3103 return 0;
3104 }
3105 again:
3106 /*
3107 * Retrieve an accurate count of the amount of space required to copy
3108 * out all the fsids in the system.
3109 */
3110 space = req->oldlen;
3111 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3112
3113 /* they didn't give us enough space. */
3114 if (space < req->oldlen)
3115 return (ENOMEM);
3116
3117 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3118 if (fsidlst == NULL) {
3119 return (ENOMEM);
3120 }
3121
3122 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3123 &actual);
3124 /*
3125 * If we get back ENOMEM, then another mount has been added while we
3126 * slept in malloc above. If this is the case then try again.
3127 */
3128 if (error == ENOMEM) {
3129 FREE(fsidlst, M_TEMP);
3130 req->oldlen = space;
3131 goto again;
3132 }
3133 if (error == 0) {
3134 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3135 }
3136 FREE(fsidlst, M_TEMP);
3137 return (error);
3138 }
3139
3140 /*
3141 * Do a sysctl by fsid.
3142 */
3143 static int
3144 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3145 struct sysctl_req *req)
3146 {
3147 union union_vfsidctl vc;
3148 struct mount *mp;
3149 struct vfsstatfs *sp;
3150 int *name, flags, namelen;
3151 int error=0, gotref=0;
3152 vfs_context_t ctx = vfs_context_current();
3153 proc_t p = req->p; /* XXX req->p != current_proc()? */
3154 boolean_t is_64_bit;
3155
3156 name = arg1;
3157 namelen = arg2;
3158 is_64_bit = proc_is64bit(p);
3159
3160 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3161 if (error)
3162 goto out;
3163 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3164 error = EINVAL;
3165 goto out;
3166 }
3167 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3168 if (mp == NULL) {
3169 error = ENOENT;
3170 goto out;
3171 }
3172 gotref = 1;
3173 /* reset so that the fs specific code can fetch it. */
3174 req->newidx = 0;
3175 /*
3176 * Note if this is a VFS_CTL then we pass the actual sysctl req
3177 * in for "oldp" so that the lower layer can DTRT and use the
3178 * SYSCTL_IN/OUT routines.
3179 */
3180 if (mp->mnt_op->vfs_sysctl != NULL) {
3181 if (is_64_bit) {
3182 if (vfs_64bitready(mp)) {
3183 error = mp->mnt_op->vfs_sysctl(name, namelen,
3184 CAST_USER_ADDR_T(req),
3185 NULL, USER_ADDR_NULL, 0,
3186 ctx);
3187 }
3188 else {
3189 error = ENOTSUP;
3190 }
3191 }
3192 else {
3193 error = mp->mnt_op->vfs_sysctl(name, namelen,
3194 CAST_USER_ADDR_T(req),
3195 NULL, USER_ADDR_NULL, 0,
3196 ctx);
3197 }
3198 if (error != ENOTSUP) {
3199 goto out;
3200 }
3201 }
3202 switch (name[0]) {
3203 case VFS_CTL_UMOUNT:
3204 req->newidx = 0;
3205 if (is_64_bit) {
3206 req->newptr = vc.vc64.vc_ptr;
3207 req->newlen = (size_t)vc.vc64.vc_len;
3208 }
3209 else {
3210 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3211 req->newlen = vc.vc32.vc_len;
3212 }
3213 error = SYSCTL_IN(req, &flags, sizeof(flags));
3214 if (error)
3215 break;
3216
3217 mount_ref(mp, 0);
3218 mount_iterdrop(mp);
3219 gotref = 0;
3220 /* safedounmount consumes a ref */
3221 error = safedounmount(mp, flags, ctx);
3222 break;
3223 case VFS_CTL_STATFS:
3224 req->newidx = 0;
3225 if (is_64_bit) {
3226 req->newptr = vc.vc64.vc_ptr;
3227 req->newlen = (size_t)vc.vc64.vc_len;
3228 }
3229 else {
3230 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3231 req->newlen = vc.vc32.vc_len;
3232 }
3233 error = SYSCTL_IN(req, &flags, sizeof(flags));
3234 if (error)
3235 break;
3236 sp = &mp->mnt_vfsstat;
3237 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3238 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3239 goto out;
3240 if (is_64_bit) {
3241 struct user64_statfs sfs;
3242 bzero(&sfs, sizeof(sfs));
3243 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3244 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3245 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3246 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3247 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3248 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3249 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3250 sfs.f_files = (user64_long_t)sp->f_files;
3251 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3252 sfs.f_fsid = sp->f_fsid;
3253 sfs.f_owner = sp->f_owner;
3254
3255 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3256 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3257 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3258
3259 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3260 }
3261 else {
3262 struct user32_statfs sfs;
3263 bzero(&sfs, sizeof(sfs));
3264 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3265 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3266
3267 /*
3268 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3269 * have to fudge the numbers here in that case. We inflate the blocksize in order
3270 * to reflect the filesystem size as best we can.
3271 */
3272 if (sp->f_blocks > INT_MAX) {
3273 int shift;
3274
3275 /*
3276 * Work out how far we have to shift the block count down to make it fit.
3277 * Note that it's possible to have to shift so far that the resulting
3278 * blocksize would be unreportably large. At that point, we will clip
3279 * any values that don't fit.
3280 *
3281 * For safety's sake, we also ensure that f_iosize is never reported as
3282 * being smaller than f_bsize.
3283 */
3284 for (shift = 0; shift < 32; shift++) {
3285 if ((sp->f_blocks >> shift) <= INT_MAX)
3286 break;
3287 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3288 break;
3289 }
3290 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3291 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3292 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3293 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3294 #undef __SHIFT_OR_CLIP
3295 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3296 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3297 } else {
3298 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3299 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3300 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3301 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3302 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3303 }
3304 sfs.f_files = (user32_long_t)sp->f_files;
3305 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3306 sfs.f_fsid = sp->f_fsid;
3307 sfs.f_owner = sp->f_owner;
3308
3309 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3310 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3311 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3312
3313 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3314 }
3315 break;
3316 default:
3317 error = ENOTSUP;
3318 goto out;
3319 }
3320 out:
3321 if(gotref != 0)
3322 mount_iterdrop(mp);
3323 return (error);
3324 }
3325
3326 static int filt_fsattach(struct knote *kn);
3327 static void filt_fsdetach(struct knote *kn);
3328 static int filt_fsevent(struct knote *kn, long hint);
3329 struct filterops fs_filtops = {
3330 .f_attach = filt_fsattach,
3331 .f_detach = filt_fsdetach,
3332 .f_event = filt_fsevent,
3333 };
3334
3335 static int
3336 filt_fsattach(struct knote *kn)
3337 {
3338
3339 lck_mtx_lock(fs_klist_lock);
3340 kn->kn_flags |= EV_CLEAR;
3341 KNOTE_ATTACH(&fs_klist, kn);
3342 lck_mtx_unlock(fs_klist_lock);
3343 return (0);
3344 }
3345
3346 static void
3347 filt_fsdetach(struct knote *kn)
3348 {
3349 lck_mtx_lock(fs_klist_lock);
3350 KNOTE_DETACH(&fs_klist, kn);
3351 lck_mtx_unlock(fs_klist_lock);
3352 }
3353
3354 static int
3355 filt_fsevent(struct knote *kn, long hint)
3356 {
3357 /*
3358 * Backwards compatibility:
3359 * Other filters would do nothing if kn->kn_sfflags == 0
3360 */
3361
3362 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3363 kn->kn_fflags |= hint;
3364 }
3365
3366 return (kn->kn_fflags != 0);
3367 }
3368
3369 static int
3370 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3371 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3372 {
3373 int out, error;
3374 pid_t pid;
3375 proc_t p;
3376
3377 /* We need a pid. */
3378 if (req->newptr == USER_ADDR_NULL)
3379 return (EINVAL);
3380
3381 error = SYSCTL_IN(req, &pid, sizeof(pid));
3382 if (error)
3383 return (error);
3384
3385 p = proc_find(pid < 0 ? -pid : pid);
3386 if (p == NULL)
3387 return (ESRCH);
3388
3389 /*
3390 * Fetching the value is ok, but we only fetch if the old
3391 * pointer is given.
3392 */
3393 if (req->oldptr != USER_ADDR_NULL) {
3394 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3395 proc_rele(p);
3396 error = SYSCTL_OUT(req, &out, sizeof(out));
3397 return (error);
3398 }
3399
3400 /* cansignal offers us enough security. */
3401 if (p != req->p && proc_suser(req->p) != 0) {
3402 proc_rele(p);
3403 return (EPERM);
3404 }
3405
3406 if (pid < 0)
3407 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3408 else
3409 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3410 proc_rele(p);
3411
3412 return (0);
3413 }
3414
3415 /* the vfs.generic. branch. */
3416 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3417 /* retreive a list of mounted filesystem fsid_t */
3418 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD,
3419 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3420 /* perform operations on filesystem via fsid_t */
3421 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED,
3422 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3423 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY,
3424 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3425
3426
3427 long num_reusedvnodes = 0;
3428
3429 static int
3430 new_vnode(vnode_t *vpp)
3431 {
3432 vnode_t vp;
3433 int retries = 0; /* retry incase of tablefull */
3434 int force_alloc = 0, walk_count = 0;
3435 unsigned int vpid;
3436 struct timespec ts;
3437 struct timeval current_tv;
3438 #ifndef __LP64__
3439 struct unsafe_fsnode *l_unsafefs = 0;
3440 #endif /* __LP64__ */
3441 proc_t curproc = current_proc();
3442
3443 retry:
3444 microuptime(&current_tv);
3445
3446 vp = NULLVP;
3447
3448 vnode_list_lock();
3449
3450 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3451 /*
3452 * Can always reuse a dead one
3453 */
3454 vp = TAILQ_FIRST(&vnode_dead_list);
3455 goto steal_this_vp;
3456 }
3457 /*
3458 * no dead vnodes available... if we're under
3459 * the limit, we'll create a new vnode
3460 */
3461 if (numvnodes < desiredvnodes || force_alloc) {
3462 numvnodes++;
3463 vnode_list_unlock();
3464
3465 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3466 bzero((char *)vp, sizeof(*vp));
3467 VLISTNONE(vp); /* avoid double queue removal */
3468 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3469
3470 klist_init(&vp->v_knotes);
3471 nanouptime(&ts);
3472 vp->v_id = ts.tv_nsec;
3473 vp->v_flag = VSTANDARD;
3474
3475 #if CONFIG_MACF
3476 if (mac_vnode_label_init_needed(vp))
3477 mac_vnode_label_init(vp);
3478 #endif /* MAC */
3479
3480 vp->v_iocount = 1;
3481 goto done;
3482 }
3483
3484 #define MAX_WALK_COUNT 1000
3485
3486 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3487 (ragevnodes >= rage_limit ||
3488 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3489
3490 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3491 if ( !(vp->v_listflag & VLIST_RAGE))
3492 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3493
3494 // if we're a dependency-capable process, skip vnodes that can
3495 // cause recycling deadlocks. (i.e. this process is diskimages
3496 // helper and the vnode is in a disk image).
3497 //
3498 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3499 break;
3500 }
3501
3502 // don't iterate more than MAX_WALK_COUNT vnodes to
3503 // avoid keeping the vnode list lock held for too long.
3504 if (walk_count++ > MAX_WALK_COUNT) {
3505 vp = NULL;
3506 break;
3507 }
3508 }
3509
3510 }
3511
3512 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3513 /*
3514 * Pick the first vp for possible reuse
3515 */
3516 walk_count = 0;
3517 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3518 // if we're a dependency-capable process, skip vnodes that can
3519 // cause recycling deadlocks. (i.e. this process is diskimages
3520 // helper and the vnode is in a disk image)
3521 //
3522 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3523 break;
3524 }
3525
3526 // don't iterate more than MAX_WALK_COUNT vnodes to
3527 // avoid keeping the vnode list lock held for too long.
3528 if (walk_count++ > MAX_WALK_COUNT) {
3529 vp = NULL;
3530 break;
3531 }
3532 }
3533
3534 }
3535
3536 //
3537 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3538 // then we're trying to create a vnode on behalf of a
3539 // process like diskimages-helper that has file systems
3540 // mounted on top of itself (and thus we can't reclaim
3541 // vnodes in the file systems on top of us). if we can't
3542 // find a vnode to reclaim then we'll just have to force
3543 // the allocation.
3544 //
3545 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3546 force_alloc = 1;
3547 vnode_list_unlock();
3548 goto retry;
3549 }
3550
3551 if (vp == NULL) {
3552 /*
3553 * we've reached the system imposed maximum number of vnodes
3554 * but there isn't a single one available
3555 * wait a bit and then retry... if we can't get a vnode
3556 * after 100 retries, than log a complaint
3557 */
3558 if (++retries <= 100) {
3559 vnode_list_unlock();
3560 delay_for_interval(1, 1000 * 1000);
3561 goto retry;
3562 }
3563
3564 vnode_list_unlock();
3565 tablefull("vnode");
3566 log(LOG_EMERG, "%d desired, %d numvnodes, "
3567 "%d free, %d dead, %d rage\n",
3568 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3569 #if CONFIG_EMBEDDED
3570 /*
3571 * Running out of vnodes tends to make a system unusable. Start killing
3572 * processes that jetsam knows are killable.
3573 */
3574 if (jetsam_kill_top_proc() < 0) {
3575 /*
3576 * If jetsam can't find any more processes to kill and there
3577 * still aren't any free vnodes, panic. Hopefully we'll get a
3578 * panic log to tell us why we ran out.
3579 */
3580 panic("vnode table is full\n");
3581 }
3582
3583 delay_for_interval(1, 1000 * 1000);
3584 goto retry;
3585 #endif
3586
3587 *vpp = NULL;
3588 return (ENFILE);
3589 }
3590 steal_this_vp:
3591 vpid = vp->v_id;
3592
3593 vnode_list_remove_locked(vp);
3594
3595 vnode_list_unlock();
3596
3597 vnode_lock_spin(vp);
3598
3599 /*
3600 * We could wait for the vnode_lock after removing the vp from the freelist
3601 * and the vid is bumped only at the very end of reclaim. So it is possible
3602 * that we are looking at a vnode that is being terminated. If so skip it.
3603 */
3604 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3605 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3606 /*
3607 * we lost the race between dropping the list lock
3608 * and picking up the vnode_lock... someone else
3609 * used this vnode and it is now in a new state
3610 * so we need to go back and try again
3611 */
3612 vnode_unlock(vp);
3613 goto retry;
3614 }
3615 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3616 /*
3617 * we did a vnode_rele_ext that asked for
3618 * us not to reenter the filesystem during
3619 * the release even though VL_NEEDINACTIVE was
3620 * set... we'll do it here by doing a
3621 * vnode_get/vnode_put
3622 *
3623 * pick up an iocount so that we can call
3624 * vnode_put and drive the VNOP_INACTIVE...
3625 * vnode_put will either leave us off
3626 * the freelist if a new ref comes in,
3627 * or put us back on the end of the freelist
3628 * or recycle us if we were marked for termination...
3629 * so we'll just go grab a new candidate
3630 */
3631 vp->v_iocount++;
3632 #ifdef JOE_DEBUG
3633 record_vp(vp, 1);
3634 #endif
3635 vnode_put_locked(vp);
3636 vnode_unlock(vp);
3637 goto retry;
3638 }
3639 OSAddAtomicLong(1, &num_reusedvnodes);
3640
3641 /* Checks for anyone racing us for recycle */
3642 if (vp->v_type != VBAD) {
3643 if (vp->v_lflag & VL_DEAD)
3644 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3645 vnode_lock_convert(vp);
3646 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3647
3648 if ((VONLIST(vp)))
3649 panic("new_vnode(%p): vp on list", vp);
3650 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3651 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3652 panic("new_vnode(%p): free vnode still referenced", vp);
3653 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3654 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3655 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3656 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3657 }
3658
3659 #ifndef __LP64__
3660 if (vp->v_unsafefs) {
3661 l_unsafefs = vp->v_unsafefs;
3662 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3663 }
3664 #endif /* __LP64__ */
3665
3666 #if CONFIG_MACF
3667 /*
3668 * We should never see VL_LABELWAIT or VL_LABEL here.
3669 * as those operations hold a reference.
3670 */
3671 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3672 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3673 if (vp->v_lflag & VL_LABELED) {
3674 vnode_lock_convert(vp);
3675 mac_vnode_label_recycle(vp);
3676 } else if (mac_vnode_label_init_needed(vp)) {
3677 vnode_lock_convert(vp);
3678 mac_vnode_label_init(vp);
3679 }
3680
3681 #endif /* MAC */
3682
3683 vp->v_iocount = 1;
3684 vp->v_lflag = 0;
3685 vp->v_writecount = 0;
3686 vp->v_references = 0;
3687 vp->v_iterblkflags = 0;
3688 vp->v_flag = VSTANDARD;
3689 /* vbad vnodes can point to dead_mountp */
3690 vp->v_mount = NULL;
3691 vp->v_defer_reclaimlist = (vnode_t)0;
3692
3693 vnode_unlock(vp);
3694
3695 #ifndef __LP64__
3696 if (l_unsafefs) {
3697 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3698 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3699 }
3700 #endif /* __LP64__ */
3701
3702 done:
3703 *vpp = vp;
3704
3705 return (0);
3706 }
3707
3708 void
3709 vnode_lock(vnode_t vp)
3710 {
3711 lck_mtx_lock(&vp->v_lock);
3712 }
3713
3714 void
3715 vnode_lock_spin(vnode_t vp)
3716 {
3717 lck_mtx_lock_spin(&vp->v_lock);
3718 }
3719
3720 void
3721 vnode_unlock(vnode_t vp)
3722 {
3723 lck_mtx_unlock(&vp->v_lock);
3724 }
3725
3726
3727
3728 int
3729 vnode_get(struct vnode *vp)
3730 {
3731 int retval;
3732
3733 vnode_lock_spin(vp);
3734 retval = vnode_get_locked(vp);
3735 vnode_unlock(vp);
3736
3737 return(retval);
3738 }
3739
3740 int
3741 vnode_get_locked(struct vnode *vp)
3742 {
3743 #if DIAGNOSTIC
3744 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3745 #endif
3746 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3747 return(ENOENT);
3748 }
3749 vp->v_iocount++;
3750 #ifdef JOE_DEBUG
3751 record_vp(vp, 1);
3752 #endif
3753 return (0);
3754 }
3755
3756 int
3757 vnode_getwithvid(vnode_t vp, uint32_t vid)
3758 {
3759 return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
3760 }
3761
3762 int
3763 vnode_getwithref(vnode_t vp)
3764 {
3765 return(vget_internal(vp, 0, 0));
3766 }
3767
3768
3769 __private_extern__ int
3770 vnode_getalways(vnode_t vp)
3771 {
3772 return(vget_internal(vp, 0, VNODE_ALWAYS));
3773 }
3774
3775 int
3776 vnode_put(vnode_t vp)
3777 {
3778 int retval;
3779
3780 vnode_lock_spin(vp);
3781 retval = vnode_put_locked(vp);
3782 vnode_unlock(vp);
3783
3784 return(retval);
3785 }
3786
3787 int
3788 vnode_put_locked(vnode_t vp)
3789 {
3790 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3791
3792 #if DIAGNOSTIC
3793 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3794 #endif
3795 retry:
3796 if (vp->v_iocount < 1)
3797 panic("vnode_put(%p): iocount < 1", vp);
3798
3799 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3800 vnode_dropiocount(vp);
3801 return(0);
3802 }
3803 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3804
3805 vp->v_lflag &= ~VL_NEEDINACTIVE;
3806 vnode_unlock(vp);
3807
3808 VNOP_INACTIVE(vp, ctx);
3809
3810 vnode_lock_spin(vp);
3811 /*
3812 * because we had to drop the vnode lock before calling
3813 * VNOP_INACTIVE, the state of this vnode may have changed...
3814 * we may pick up both VL_MARTERM and either
3815 * an iocount or a usecount while in the VNOP_INACTIVE call
3816 * we don't want to call vnode_reclaim_internal on a vnode
3817 * that has active references on it... so loop back around
3818 * and reevaluate the state
3819 */
3820 goto retry;
3821 }
3822 vp->v_lflag &= ~VL_NEEDINACTIVE;
3823
3824 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3825 vnode_lock_convert(vp);
3826 vnode_reclaim_internal(vp, 1, 1, 0);
3827 }
3828 vnode_dropiocount(vp);
3829 vnode_list_add(vp);
3830
3831 return(0);
3832 }
3833
3834 /* is vnode_t in use by others? */
3835 int
3836 vnode_isinuse(vnode_t vp, int refcnt)
3837 {
3838 return(vnode_isinuse_locked(vp, refcnt, 0));
3839 }
3840
3841
3842 static int
3843 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3844 {
3845 int retval = 0;
3846
3847 if (!locked)
3848 vnode_lock_spin(vp);
3849 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3850 retval = 1;
3851 goto out;
3852 }
3853 if (vp->v_type == VREG) {
3854 retval = ubc_isinuse_locked(vp, refcnt, 1);
3855 }
3856
3857 out:
3858 if (!locked)
3859 vnode_unlock(vp);
3860 return(retval);
3861 }
3862
3863
3864 /* resume vnode_t */
3865 errno_t
3866 vnode_resume(vnode_t vp)
3867 {
3868 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3869
3870 vnode_lock_spin(vp);
3871 vp->v_lflag &= ~VL_SUSPENDED;
3872 vp->v_owner = NULL;
3873 vnode_unlock(vp);
3874
3875 wakeup(&vp->v_iocount);
3876 }
3877 return(0);
3878 }
3879
3880 /* suspend vnode_t
3881 * Please do not use on more than one vnode at a time as it may
3882 * cause deadlocks.
3883 * xxx should we explicity prevent this from happening?
3884 */
3885
3886 errno_t
3887 vnode_suspend(vnode_t vp)
3888 {
3889 if (vp->v_lflag & VL_SUSPENDED) {
3890 return(EBUSY);
3891 }
3892
3893 vnode_lock_spin(vp);
3894
3895 /*
3896 * xxx is this sufficient to check if a vnode_drain is
3897 * progress?
3898 */
3899
3900 if (vp->v_owner == NULL) {
3901 vp->v_lflag |= VL_SUSPENDED;
3902 vp->v_owner = current_thread();
3903 }
3904 vnode_unlock(vp);
3905
3906 return(0);
3907 }
3908
3909
3910
3911 static errno_t
3912 vnode_drain(vnode_t vp)
3913 {
3914
3915 if (vp->v_lflag & VL_DRAIN) {
3916 panic("vnode_drain: recursuve drain");
3917 return(ENOENT);
3918 }
3919 vp->v_lflag |= VL_DRAIN;
3920 vp->v_owner = current_thread();
3921
3922 while (vp->v_iocount > 1)
3923 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
3924 return(0);
3925 }
3926
3927
3928 /*
3929 * if the number of recent references via vnode_getwithvid or vnode_getwithref
3930 * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from
3931 * the LRU list if it's currently on it... once the iocount and usecount both drop
3932 * to 0, it will get put back on the end of the list, effectively making it younger
3933 * this allows us to keep actively referenced vnodes in the list without having
3934 * to constantly remove and add to the list each time a vnode w/o a usecount is
3935 * referenced which costs us taking and dropping a global lock twice.
3936 */
3937 #define UNAGE_THRESHHOLD 25
3938
3939 static errno_t
3940 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
3941 {
3942 int nodead = vflags & VNODE_NODEAD;
3943 int nosusp = vflags & VNODE_NOSUSPEND;
3944 int always = vflags & VNODE_ALWAYS;
3945
3946 for (;;) {
3947 /*
3948 * if it is a dead vnode with deadfs
3949 */
3950 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
3951 return(ENOENT);
3952 }
3953 /*
3954 * will return VL_DEAD ones
3955 */
3956 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
3957 break;
3958 }
3959 /*
3960 * if suspended vnodes are to be failed
3961 */
3962 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
3963 return(ENOENT);
3964 }
3965 /*
3966 * if you are the owner of drain/suspend/termination , can acquire iocount
3967 * check for VL_TERMINATE; it does not set owner
3968 */
3969 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
3970 (vp->v_owner == current_thread())) {
3971 break;
3972 }
3973
3974 if (always != 0)
3975 break;
3976 vnode_lock_convert(vp);
3977
3978 if (vp->v_lflag & VL_TERMINATE) {
3979 vp->v_lflag |= VL_TERMWANT;
3980
3981 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
3982 } else
3983 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
3984 }
3985 if (vid != vp->v_id) {
3986 return(ENOENT);
3987 }
3988 if (++vp->v_references >= UNAGE_THRESHHOLD) {
3989 vp->v_references = 0;
3990 vnode_list_remove(vp);
3991 }
3992 vp->v_iocount++;
3993 #ifdef JOE_DEBUG
3994 record_vp(vp, 1);
3995 #endif
3996 return(0);
3997 }
3998
3999 static void
4000 vnode_dropiocount (vnode_t vp)
4001 {
4002 if (vp->v_iocount < 1)
4003 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
4004
4005 vp->v_iocount--;
4006 #ifdef JOE_DEBUG
4007 record_vp(vp, -1);
4008 #endif
4009 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
4010 wakeup(&vp->v_iocount);
4011 }
4012
4013
4014 void
4015 vnode_reclaim(struct vnode * vp)
4016 {
4017 vnode_reclaim_internal(vp, 0, 0, 0);
4018 }
4019
4020 __private_extern__
4021 void
4022 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4023 {
4024 int isfifo = 0;
4025
4026 if (!locked)
4027 vnode_lock(vp);
4028
4029 if (vp->v_lflag & VL_TERMINATE) {
4030 panic("vnode reclaim in progress");
4031 }
4032 vp->v_lflag |= VL_TERMINATE;
4033
4034 vn_clearunionwait(vp, 1);
4035
4036 vnode_drain(vp);
4037
4038 isfifo = (vp->v_type == VFIFO);
4039
4040 if (vp->v_type != VBAD)
4041 vgone(vp, flags); /* clean and reclaim the vnode */
4042
4043 /*
4044 * give the vnode a new identity so that vnode_getwithvid will fail
4045 * on any stale cache accesses...
4046 * grab the list_lock so that if we're in "new_vnode"
4047 * behind the list_lock trying to steal this vnode, the v_id is stable...
4048 * once new_vnode drops the list_lock, it will block trying to take
4049 * the vnode lock until we release it... at that point it will evaluate
4050 * whether the v_vid has changed
4051 * also need to make sure that the vnode isn't on a list where "new_vnode"
4052 * can find it after the v_id has been bumped until we are completely done
4053 * with the vnode (i.e. putting it back on a list has to be the very last
4054 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4055 * are holding an io_count on the vnode... they need to drop the io_count
4056 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4057 * they are completely done with the vnode
4058 */
4059 vnode_list_lock();
4060
4061 vnode_list_remove_locked(vp);
4062 vp->v_id++;
4063
4064 vnode_list_unlock();
4065
4066 if (isfifo) {
4067 struct fifoinfo * fip;
4068
4069 fip = vp->v_fifoinfo;
4070 vp->v_fifoinfo = NULL;
4071 FREE(fip, M_TEMP);
4072 }
4073 vp->v_type = VBAD;
4074
4075 if (vp->v_data)
4076 panic("vnode_reclaim_internal: cleaned vnode isn't");
4077 if (vp->v_numoutput)
4078 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4079 if (UBCINFOEXISTS(vp))
4080 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4081 if (vp->v_parent)
4082 panic("vnode_reclaim_internal: vparent not removed");
4083 if (vp->v_name)
4084 panic("vnode_reclaim_internal: vname not removed");
4085
4086 vp->v_socket = NULL;
4087
4088 vp->v_lflag &= ~VL_TERMINATE;
4089 vp->v_lflag &= ~VL_DRAIN;
4090 vp->v_owner = NULL;
4091
4092 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4093
4094 /* Make sure that when we reuse the vnode, no knotes left over */
4095 klist_init(&vp->v_knotes);
4096
4097 if (vp->v_lflag & VL_TERMWANT) {
4098 vp->v_lflag &= ~VL_TERMWANT;
4099 wakeup(&vp->v_lflag);
4100 }
4101 if (!reuse) {
4102 /*
4103 * make sure we get on the
4104 * dead list if appropriate
4105 */
4106 vnode_list_add(vp);
4107 }
4108 if (!locked)
4109 vnode_unlock(vp);
4110 }
4111
4112 /* USAGE:
4113 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4114 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4115 * is obsoleted by this.
4116 * vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
4117 */
4118 int
4119 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4120 {
4121 int error;
4122 int insert = 1;
4123 vnode_t vp;
4124 vnode_t nvp;
4125 vnode_t dvp;
4126 struct uthread *ut;
4127 struct componentname *cnp;
4128 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4129
4130 if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) {
4131 if ( (error = new_vnode(&vp)) ) {
4132 return(error);
4133 } else {
4134 dvp = param->vnfs_dvp;
4135 cnp = param->vnfs_cnp;
4136
4137 vp->v_op = param->vnfs_vops;
4138 vp->v_type = param->vnfs_vtype;
4139 vp->v_data = param->vnfs_fsnode;
4140
4141 if (param->vnfs_markroot)
4142 vp->v_flag |= VROOT;
4143 if (param->vnfs_marksystem)
4144 vp->v_flag |= VSYSTEM;
4145 if (vp->v_type == VREG) {
4146 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4147 if (error) {
4148 #ifdef JOE_DEBUG
4149 record_vp(vp, 1);
4150 #endif
4151 vp->v_mount = NULL;
4152 vp->v_op = dead_vnodeop_p;
4153 vp->v_tag = VT_NON;
4154 vp->v_data = NULL;
4155 vp->v_type = VBAD;
4156 vp->v_lflag |= VL_DEAD;
4157
4158 vnode_put(vp);
4159 return(error);
4160 }
4161 }
4162 #ifdef JOE_DEBUG
4163 record_vp(vp, 1);
4164 #endif
4165 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4166
4167 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4168
4169 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4170 /*
4171 * if checkalias returns a vnode, it will be locked
4172 *
4173 * first get rid of the unneeded vnode we acquired
4174 */
4175 vp->v_data = NULL;
4176 vp->v_op = spec_vnodeop_p;
4177 vp->v_type = VBAD;
4178 vp->v_lflag = VL_DEAD;
4179 vp->v_data = NULL;
4180 vp->v_tag = VT_NON;
4181 vnode_put(vp);
4182
4183 /*
4184 * switch to aliased vnode and finish
4185 * preparing it
4186 */
4187 vp = nvp;
4188
4189 vclean(vp, 0);
4190 vp->v_op = param->vnfs_vops;
4191 vp->v_type = param->vnfs_vtype;
4192 vp->v_data = param->vnfs_fsnode;
4193 vp->v_lflag = 0;
4194 vp->v_mount = NULL;
4195 insmntque(vp, param->vnfs_mp);
4196 insert = 0;
4197 vnode_unlock(vp);
4198 }
4199 }
4200
4201 if (vp->v_type == VFIFO) {
4202 struct fifoinfo *fip;
4203
4204 MALLOC(fip, struct fifoinfo *,
4205 sizeof(*fip), M_TEMP, M_WAITOK);
4206 bzero(fip, sizeof(struct fifoinfo ));
4207 vp->v_fifoinfo = fip;
4208 }
4209 /* The file systems must pass the address of the location where
4210 * they store the vnode pointer. When we add the vnode into the mount
4211 * list and name cache they become discoverable. So the file system node
4212 * must have the connection to vnode setup by then
4213 */
4214 *vpp = vp;
4215
4216 /* Add fs named reference. */
4217 if (param->vnfs_flags & VNFS_ADDFSREF) {
4218 vp->v_lflag |= VNAMED_FSHASH;
4219 }
4220 if (param->vnfs_mp) {
4221 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4222 vp->v_flag |= VLOCKLOCAL;
4223 if (insert) {
4224 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4225 panic("insmntque: vp on the free list\n");
4226 /*
4227 * enter in mount vnode list
4228 */
4229 insmntque(vp, param->vnfs_mp);
4230 }
4231 #ifndef __LP64__
4232 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4233 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4234 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4235 vp->v_unsafefs->fsnode_count = 0;
4236 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4237 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4238 }
4239 #endif /* __LP64__ */
4240 }
4241 if (dvp && vnode_ref(dvp) == 0) {
4242 vp->v_parent = dvp;
4243 }
4244 if (cnp) {
4245 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4246 /*
4247 * enter into name cache
4248 * we've got the info to enter it into the name cache now
4249 * cache_enter_create will pick up an extra reference on
4250 * the name entered into the string cache
4251 */
4252 vp->v_name = cache_enter_create(dvp, vp, cnp);
4253 } else
4254 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4255
4256 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4257 vp->v_flag |= VISUNION;
4258 }
4259 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4260 /*
4261 * this vnode is being created as cacheable in the name cache
4262 * this allows us to re-enter it in the cache
4263 */
4264 vp->v_flag |= VNCACHEABLE;
4265 }
4266 ut = get_bsdthread_info(current_thread());
4267
4268 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4269 (ut->uu_flag & UT_RAGE_VNODES)) {
4270 /*
4271 * process has indicated that it wants any
4272 * vnodes created on its behalf to be rapidly
4273 * aged to reduce the impact on the cached set
4274 * of vnodes
4275 */
4276 vp->v_flag |= VRAGE;
4277 }
4278 return(0);
4279 }
4280 }
4281 return (EINVAL);
4282 }
4283
4284 int
4285 vnode_addfsref(vnode_t vp)
4286 {
4287 vnode_lock_spin(vp);
4288 if (vp->v_lflag & VNAMED_FSHASH)
4289 panic("add_fsref: vp already has named reference");
4290 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4291 panic("addfsref: vp on the free list\n");
4292 vp->v_lflag |= VNAMED_FSHASH;
4293 vnode_unlock(vp);
4294 return(0);
4295
4296 }
4297 int
4298 vnode_removefsref(vnode_t vp)
4299 {
4300 vnode_lock_spin(vp);
4301 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4302 panic("remove_fsref: no named reference");
4303 vp->v_lflag &= ~VNAMED_FSHASH;
4304 vnode_unlock(vp);
4305 return(0);
4306
4307 }
4308
4309
4310 int
4311 vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg)
4312 {
4313 mount_t mp;
4314 int ret = 0;
4315 fsid_t * fsid_list;
4316 int count, actualcount, i;
4317 void * allocmem;
4318
4319 count = mount_getvfscnt();
4320 count += 10;
4321
4322 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4323 allocmem = (void *)fsid_list;
4324
4325 actualcount = mount_fillfsids(fsid_list, count);
4326
4327 for (i=0; i< actualcount; i++) {
4328
4329 /* obtain the mount point with iteration reference */
4330 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4331
4332 if(mp == (struct mount *)0)
4333 continue;
4334 mount_lock(mp);
4335 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4336 mount_unlock(mp);
4337 mount_iterdrop(mp);
4338 continue;
4339
4340 }
4341 mount_unlock(mp);
4342
4343 /* iterate over all the vnodes */
4344 ret = callout(mp, arg);
4345
4346 mount_iterdrop(mp);
4347
4348 switch (ret) {
4349 case VFS_RETURNED:
4350 case VFS_RETURNED_DONE:
4351 if (ret == VFS_RETURNED_DONE) {
4352 ret = 0;
4353 goto out;
4354 }
4355 break;
4356
4357 case VFS_CLAIMED_DONE:
4358 ret = 0;
4359 goto out;
4360 case VFS_CLAIMED:
4361 default:
4362 break;
4363 }
4364 ret = 0;
4365 }
4366
4367 out:
4368 kfree(allocmem, (count * sizeof(fsid_t)));
4369 return (ret);
4370 }
4371
4372 /*
4373 * Update the vfsstatfs structure in the mountpoint.
4374 * MAC: Parameter eventtype added, indicating whether the event that
4375 * triggered this update came from user space, via a system call
4376 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4377 */
4378 int
4379 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4380 {
4381 struct vfs_attr va;
4382 int error;
4383
4384 /*
4385 * Request the attributes we want to propagate into
4386 * the per-mount vfsstat structure.
4387 */
4388 VFSATTR_INIT(&va);
4389 VFSATTR_WANTED(&va, f_iosize);
4390 VFSATTR_WANTED(&va, f_blocks);
4391 VFSATTR_WANTED(&va, f_bfree);
4392 VFSATTR_WANTED(&va, f_bavail);
4393 VFSATTR_WANTED(&va, f_bused);
4394 VFSATTR_WANTED(&va, f_files);
4395 VFSATTR_WANTED(&va, f_ffree);
4396 VFSATTR_WANTED(&va, f_bsize);
4397 VFSATTR_WANTED(&va, f_fssubtype);
4398 #if CONFIG_MACF
4399 if (eventtype == VFS_USER_EVENT) {
4400 error = mac_mount_check_getattr(ctx, mp, &va);
4401 if (error != 0)
4402 return (error);
4403 }
4404 #endif
4405
4406 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4407 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4408 return(error);
4409 }
4410
4411 /*
4412 * Unpack into the per-mount structure.
4413 *
4414 * We only overwrite these fields, which are likely to change:
4415 * f_blocks
4416 * f_bfree
4417 * f_bavail
4418 * f_bused
4419 * f_files
4420 * f_ffree
4421 *
4422 * And these which are not, but which the FS has no other way
4423 * of providing to us:
4424 * f_bsize
4425 * f_iosize
4426 * f_fssubtype
4427 *
4428 */
4429 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4430 /* 4822056 - protect against malformed server mount */
4431 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4432 } else {
4433 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4434 }
4435 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4436 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4437 } else {
4438 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4439 }
4440 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4441 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4442 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4443 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4444 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4445 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4446 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4447 mp->mnt_vfsstat.f_bused = va.f_bused;
4448 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4449 mp->mnt_vfsstat.f_files = va.f_files;
4450 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4451 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4452
4453 /* this is unlikely to change, but has to be queried for */
4454 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4455 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4456
4457 return(0);
4458 }
4459
4460 int
4461 mount_list_add(mount_t mp)
4462 {
4463 int res;
4464
4465 mount_list_lock();
4466 if (system_inshutdown != 0) {
4467 res = -1;
4468 } else {
4469 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4470 nummounts++;
4471 res = 0;
4472 }
4473 mount_list_unlock();
4474
4475 return res;
4476 }
4477
4478 void
4479 mount_list_remove(mount_t mp)
4480 {
4481 mount_list_lock();
4482 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4483 nummounts--;
4484 mp->mnt_list.tqe_next = NULL;
4485 mp->mnt_list.tqe_prev = NULL;
4486 mount_list_unlock();
4487 }
4488
4489 mount_t
4490 mount_lookupby_volfsid(int volfs_id, int withref)
4491 {
4492 mount_t cur_mount = (mount_t)0;
4493 mount_t mp;
4494
4495 mount_list_lock();
4496 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4497 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4498 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4499 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4500 cur_mount = mp;
4501 if (withref) {
4502 if (mount_iterref(cur_mount, 1)) {
4503 cur_mount = (mount_t)0;
4504 mount_list_unlock();
4505 goto out;
4506 }
4507 }
4508 break;
4509 }
4510 }
4511 mount_list_unlock();
4512 if (withref && (cur_mount != (mount_t)0)) {
4513 mp = cur_mount;
4514 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4515 cur_mount = (mount_t)0;
4516 }
4517 mount_iterdrop(mp);
4518 }
4519 out:
4520 return(cur_mount);
4521 }
4522
4523 mount_t
4524 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4525 {
4526 mount_t retmp = (mount_t)0;
4527 mount_t mp;
4528
4529 if (!locked)
4530 mount_list_lock();
4531 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4532 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4533 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4534 retmp = mp;
4535 if (withref) {
4536 if (mount_iterref(retmp, 1))
4537 retmp = (mount_t)0;
4538 }
4539 goto out;
4540 }
4541 out:
4542 if (!locked)
4543 mount_list_unlock();
4544 return (retmp);
4545 }
4546
4547 errno_t
4548 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4549 {
4550 struct nameidata nd;
4551 int error;
4552 u_int32_t ndflags = 0;
4553
4554 if (ctx == NULL) { /* XXX technically an error */
4555 ctx = vfs_context_current();
4556 }
4557
4558 if (flags & VNODE_LOOKUP_NOFOLLOW)
4559 ndflags = NOFOLLOW;
4560 else
4561 ndflags = FOLLOW;
4562
4563 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4564 ndflags |= NOCROSSMOUNT;
4565 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4566 ndflags |= DOWHITEOUT;
4567
4568 /* XXX AUDITVNPATH1 needed ? */
4569 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4570
4571 if ((error = namei(&nd)))
4572 return (error);
4573 *vpp = nd.ni_vp;
4574 nameidone(&nd);
4575
4576 return (0);
4577 }
4578
4579 errno_t
4580 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4581 {
4582 struct nameidata nd;
4583 int error;
4584 u_int32_t ndflags = 0;
4585 int lflags = flags;
4586
4587 if (ctx == NULL) { /* XXX technically an error */
4588 ctx = vfs_context_current();
4589 }
4590
4591 if (fmode & O_NOFOLLOW)
4592 lflags |= VNODE_LOOKUP_NOFOLLOW;
4593
4594 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4595 ndflags = NOFOLLOW;
4596 else
4597 ndflags = FOLLOW;
4598
4599 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4600 ndflags |= NOCROSSMOUNT;
4601 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4602 ndflags |= DOWHITEOUT;
4603
4604 /* XXX AUDITVNPATH1 needed ? */
4605 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4606
4607 if ((error = vn_open(&nd, fmode, cmode)))
4608 *vpp = NULL;
4609 else
4610 *vpp = nd.ni_vp;
4611
4612 return (error);
4613 }
4614
4615 errno_t
4616 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4617 {
4618 int error;
4619
4620 if (ctx == NULL) {
4621 ctx = vfs_context_current();
4622 }
4623
4624 error = vn_close(vp, flags, ctx);
4625 vnode_put(vp);
4626 return (error);
4627 }
4628
4629 /*
4630 * Returns: 0 Success
4631 * vnode_getattr:???
4632 */
4633 errno_t
4634 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4635 {
4636 struct vnode_attr va;
4637 int error;
4638
4639 VATTR_INIT(&va);
4640 VATTR_WANTED(&va, va_data_size);
4641 error = vnode_getattr(vp, &va, ctx);
4642 if (!error)
4643 *sizep = va.va_data_size;
4644 return(error);
4645 }
4646
4647 errno_t
4648 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4649 {
4650 struct vnode_attr va;
4651
4652 VATTR_INIT(&va);
4653 VATTR_SET(&va, va_data_size, size);
4654 va.va_vaflags = ioflag & 0xffff;
4655 return(vnode_setattr(vp, &va, ctx));
4656 }
4657
4658 /*
4659 * Create a filesystem object of arbitrary type with arbitrary attributes in
4660 * the spevied directory with the specified name.
4661 *
4662 * Parameters: dvp Pointer to the vnode of the directory
4663 * in which to create the object.
4664 * vpp Pointer to the area into which to
4665 * return the vnode of the created object.
4666 * cnp Component name pointer from the namei
4667 * data structure, containing the name to
4668 * use for the create object.
4669 * vap Pointer to the vnode_attr structure
4670 * describing the object to be created,
4671 * including the type of object.
4672 * flags VN_* flags controlling ACL inheritance
4673 * and whether or not authorization is to
4674 * be required for the operation.
4675 *
4676 * Returns: 0 Success
4677 * !0 errno value
4678 *
4679 * Implicit: *vpp Contains the vnode of the object that
4680 * was created, if successful.
4681 * *cnp May be modified by the underlying VFS.
4682 * *vap May be modified by the underlying VFS.
4683 * modified by either ACL inheritance or
4684 *
4685 *
4686 * be modified, even if the operation is
4687 *
4688 *
4689 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4690 *
4691 * Modification of '*cnp' and '*vap' by the underlying VFS is
4692 * strongly discouraged.
4693 *
4694 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4695 *
4696 * XXX: We should enummerate the possible errno values here, and where
4697 * in the code they originated.
4698 */
4699 errno_t
4700 vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx)
4701 {
4702 kauth_acl_t oacl, nacl;
4703 int initial_acl;
4704 errno_t error;
4705 vnode_t vp = (vnode_t)0;
4706
4707 error = 0;
4708 oacl = nacl = NULL;
4709 initial_acl = 0;
4710
4711 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4712
4713 /*
4714 * Handle ACL inheritance.
4715 */
4716 if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
4717 /* save the original filesec */
4718 if (VATTR_IS_ACTIVE(vap, va_acl)) {
4719 initial_acl = 1;
4720 oacl = vap->va_acl;
4721 }
4722
4723 vap->va_acl = NULL;
4724 if ((error = kauth_acl_inherit(dvp,
4725 oacl,
4726 &nacl,
4727 vap->va_type == VDIR,
4728 ctx)) != 0) {
4729 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
4730 return(error);
4731 }
4732
4733 /*
4734 * If the generated ACL is NULL, then we can save ourselves some effort
4735 * by clearing the active bit.
4736 */
4737 if (nacl == NULL) {
4738 VATTR_CLEAR_ACTIVE(vap, va_acl);
4739 } else {
4740 VATTR_SET(vap, va_acl, nacl);
4741 }
4742 }
4743
4744 /*
4745 * Check and default new attributes.
4746 * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller
4747 * hasn't supplied them.
4748 */
4749 if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) {
4750 KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error);
4751 goto out;
4752 }
4753
4754
4755 /*
4756 * Create the requested node.
4757 */
4758 switch(vap->va_type) {
4759 case VREG:
4760 error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx);
4761 break;
4762 case VDIR:
4763 error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx);
4764 break;
4765 case VSOCK:
4766 case VFIFO:
4767 case VBLK:
4768 case VCHR:
4769 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4770 break;
4771 default:
4772 panic("vnode_create: unknown vtype %d", vap->va_type);
4773 }
4774 if (error != 0) {
4775 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4776 goto out;
4777 }
4778
4779 vp = *vpp;
4780 #if CONFIG_MACF
4781 if (!(flags & VN_CREATE_NOLABEL)) {
4782 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4783 if (error)
4784 goto error;
4785 }
4786 #endif
4787
4788 /*
4789 * If some of the requested attributes weren't handled by the VNOP,
4790 * use our fallback code.
4791 */
4792 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4793 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4794 error = vnode_setattr_fallback(*vpp, vap, ctx);
4795 }
4796 #if CONFIG_MACF
4797 error:
4798 #endif
4799 if ((error != 0 ) && (vp != (vnode_t)0)) {
4800 *vpp = (vnode_t) 0;
4801 vnode_put(vp);
4802 }
4803
4804 out:
4805 /*
4806 * If the caller supplied a filesec in vap, it has been replaced
4807 * now by the post-inheritance copy. We need to put the original back
4808 * and free the inherited product.
4809 */
4810 if (initial_acl) {
4811 VATTR_SET(vap, va_acl, oacl);
4812 } else {
4813 VATTR_CLEAR_ACTIVE(vap, va_acl);
4814 }
4815 if (nacl != NULL)
4816 kauth_acl_free(nacl);
4817
4818 return(error);
4819 }
4820
4821 static kauth_scope_t vnode_scope;
4822 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4823 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4824 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4825 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4826
4827 typedef struct _vnode_authorize_context {
4828 vnode_t vp;
4829 struct vnode_attr *vap;
4830 vnode_t dvp;
4831 struct vnode_attr *dvap;
4832 vfs_context_t ctx;
4833 int flags;
4834 int flags_valid;
4835 #define _VAC_IS_OWNER (1<<0)
4836 #define _VAC_IN_GROUP (1<<1)
4837 #define _VAC_IS_DIR_OWNER (1<<2)
4838 #define _VAC_IN_DIR_GROUP (1<<3)
4839 } *vauth_ctx;
4840
4841 void
4842 vnode_authorize_init(void)
4843 {
4844 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4845 }
4846
4847 /*
4848 * Authorize an operation on a vnode.
4849 *
4850 * This is KPI, but here because it needs vnode_scope.
4851 *
4852 * Returns: 0 Success
4853 * kauth_authorize_action:EPERM ...
4854 * xlate => EACCES Permission denied
4855 * kauth_authorize_action:0 Success
4856 * kauth_authorize_action: Depends on callback return; this is
4857 * usually only vnode_authorize_callback(),
4858 * but may include other listerners, if any
4859 * exist.
4860 * EROFS
4861 * EACCES
4862 * EPERM
4863 * ???
4864 */
4865 int
4866 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
4867 {
4868 int error, result;
4869
4870 /*
4871 * We can't authorize against a dead vnode; allow all operations through so that
4872 * the correct error can be returned.
4873 */
4874 if (vp->v_type == VBAD)
4875 return(0);
4876
4877 error = 0;
4878 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
4879 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
4880 if (result == EPERM) /* traditional behaviour */
4881 result = EACCES;
4882 /* did the lower layers give a better error return? */
4883 if ((result != 0) && (error != 0))
4884 return(error);
4885 return(result);
4886 }
4887
4888 /*
4889 * Test for vnode immutability.
4890 *
4891 * The 'append' flag is set when the authorization request is constrained
4892 * to operations which only request the right to append to a file.
4893 *
4894 * The 'ignore' flag is set when an operation modifying the immutability flags
4895 * is being authorized. We check the system securelevel to determine which
4896 * immutability flags we can ignore.
4897 */
4898 static int
4899 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
4900 {
4901 int mask;
4902
4903 /* start with all bits precluding the operation */
4904 mask = IMMUTABLE | APPEND;
4905
4906 /* if appending only, remove the append-only bits */
4907 if (append)
4908 mask &= ~APPEND;
4909
4910 /* ignore only set when authorizing flags changes */
4911 if (ignore) {
4912 if (securelevel <= 0) {
4913 /* in insecure state, flags do not inhibit changes */
4914 mask = 0;
4915 } else {
4916 /* in secure state, user flags don't inhibit */
4917 mask &= ~(UF_IMMUTABLE | UF_APPEND);
4918 }
4919 }
4920 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
4921 if ((vap->va_flags & mask) != 0)
4922 return(EPERM);
4923 return(0);
4924 }
4925
4926 static int
4927 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
4928 {
4929 int result;
4930
4931 /* default assumption is not-owner */
4932 result = 0;
4933
4934 /*
4935 * If the filesystem has given us a UID, we treat this as authoritative.
4936 */
4937 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
4938 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
4939 }
4940 /* we could test the owner UUID here if we had a policy for it */
4941
4942 return(result);
4943 }
4944
4945 /*
4946 * vauth_node_group
4947 *
4948 * Description: Ask if a cred is a member of the group owning the vnode object
4949 *
4950 * Parameters: vap vnode attribute
4951 * vap->va_gid group owner of vnode object
4952 * cred credential to check
4953 * ismember pointer to where to put the answer
4954 * idontknow Return this if we can't get an answer
4955 *
4956 * Returns: 0 Success
4957 * idontknow Can't get information
4958 * kauth_cred_ismember_gid:? Error from kauth subsystem
4959 * kauth_cred_ismember_gid:? Error from kauth subsystem
4960 */
4961 static int
4962 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
4963 {
4964 int error;
4965 int result;
4966
4967 error = 0;
4968 result = 0;
4969
4970 /*
4971 * The caller is expected to have asked the filesystem for a group
4972 * at some point prior to calling this function. The answer may
4973 * have been that there is no group ownership supported for the
4974 * vnode object, in which case we return
4975 */
4976 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
4977 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
4978 /*
4979 * Credentials which are opted into external group membership
4980 * resolution which are not known to the external resolver
4981 * will result in an ENOENT error. We translate this into
4982 * the appropriate 'idontknow' response for our caller.
4983 *
4984 * XXX We do not make a distinction here between an ENOENT
4985 * XXX arising from a response from the external resolver,
4986 * XXX and an ENOENT which is internally generated. This is
4987 * XXX a deficiency of the published kauth_cred_ismember_gid()
4988 * XXX KPI which can not be overcome without new KPI. For
4989 * XXX all currently known cases, however, this wil result
4990 * XXX in correct behaviour.
4991 */
4992 if (error == ENOENT)
4993 error = idontknow;
4994 }
4995 /*
4996 * XXX We could test the group UUID here if we had a policy for it,
4997 * XXX but this is problematic from the perspective of synchronizing
4998 * XXX group UUID and POSIX GID ownership of a file and keeping the
4999 * XXX values coherent over time. The problem is that the local
5000 * XXX system will vend transient group UUIDs for unknown POSIX GID
5001 * XXX values, and these are not persistent, whereas storage of values
5002 * XXX is persistent. One potential solution to this is a local
5003 * XXX (persistent) replica of remote directory entries and vended
5004 * XXX local ids in a local directory server (think in terms of a
5005 * XXX caching DNS server).
5006 */
5007
5008 if (!error)
5009 *ismember = result;
5010 return(error);
5011 }
5012
5013 static int
5014 vauth_file_owner(vauth_ctx vcp)
5015 {
5016 int result;
5017
5018 if (vcp->flags_valid & _VAC_IS_OWNER) {
5019 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
5020 } else {
5021 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
5022
5023 /* cache our result */
5024 vcp->flags_valid |= _VAC_IS_OWNER;
5025 if (result) {
5026 vcp->flags |= _VAC_IS_OWNER;
5027 } else {
5028 vcp->flags &= ~_VAC_IS_OWNER;
5029 }
5030 }
5031 return(result);
5032 }
5033
5034
5035 /*
5036 * vauth_file_ingroup
5037 *
5038 * Description: Ask if a user is a member of the group owning the directory
5039 *
5040 * Parameters: vcp The vnode authorization context that
5041 * contains the user and directory info
5042 * vcp->flags_valid Valid flags
5043 * vcp->flags Flags values
5044 * vcp->vap File vnode attributes
5045 * vcp->ctx VFS Context (for user)
5046 * ismember pointer to where to put the answer
5047 * idontknow Return this if we can't get an answer
5048 *
5049 * Returns: 0 Success
5050 * vauth_node_group:? Error from vauth_node_group()
5051 *
5052 * Implicit returns: *ismember 0 The user is not a group member
5053 * 1 The user is a group member
5054 */
5055 static int
5056 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5057 {
5058 int error;
5059
5060 /* Check for a cached answer first, to avoid the check if possible */
5061 if (vcp->flags_valid & _VAC_IN_GROUP) {
5062 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
5063 error = 0;
5064 } else {
5065 /* Otherwise, go look for it */
5066 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
5067
5068 if (!error) {
5069 /* cache our result */
5070 vcp->flags_valid |= _VAC_IN_GROUP;
5071 if (*ismember) {
5072 vcp->flags |= _VAC_IN_GROUP;
5073 } else {
5074 vcp->flags &= ~_VAC_IN_GROUP;
5075 }
5076 }
5077
5078 }
5079 return(error);
5080 }
5081
5082 static int
5083 vauth_dir_owner(vauth_ctx vcp)
5084 {
5085 int result;
5086
5087 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
5088 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
5089 } else {
5090 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
5091
5092 /* cache our result */
5093 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
5094 if (result) {
5095 vcp->flags |= _VAC_IS_DIR_OWNER;
5096 } else {
5097 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5098 }
5099 }
5100 return(result);
5101 }
5102
5103 /*
5104 * vauth_dir_ingroup
5105 *
5106 * Description: Ask if a user is a member of the group owning the directory
5107 *
5108 * Parameters: vcp The vnode authorization context that
5109 * contains the user and directory info
5110 * vcp->flags_valid Valid flags
5111 * vcp->flags Flags values
5112 * vcp->dvap Dir vnode attributes
5113 * vcp->ctx VFS Context (for user)
5114 * ismember pointer to where to put the answer
5115 * idontknow Return this if we can't get an answer
5116 *
5117 * Returns: 0 Success
5118 * vauth_node_group:? Error from vauth_node_group()
5119 *
5120 * Implicit returns: *ismember 0 The user is not a group member
5121 * 1 The user is a group member
5122 */
5123 static int
5124 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5125 {
5126 int error;
5127
5128 /* Check for a cached answer first, to avoid the check if possible */
5129 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5130 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5131 error = 0;
5132 } else {
5133 /* Otherwise, go look for it */
5134 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
5135
5136 if (!error) {
5137 /* cache our result */
5138 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5139 if (*ismember) {
5140 vcp->flags |= _VAC_IN_DIR_GROUP;
5141 } else {
5142 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5143 }
5144 }
5145 }
5146 return(error);
5147 }
5148
5149 /*
5150 * Test the posix permissions in (vap) to determine whether (credential)
5151 * may perform (action)
5152 */
5153 static int
5154 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5155 {
5156 struct vnode_attr *vap;
5157 int needed, error, owner_ok, group_ok, world_ok, ismember;
5158 #ifdef KAUTH_DEBUG_ENABLE
5159 const char *where = "uninitialized";
5160 # define _SETWHERE(c) where = c;
5161 #else
5162 # define _SETWHERE(c)
5163 #endif
5164
5165 /* checking file or directory? */
5166 if (on_dir) {
5167 vap = vcp->dvap;
5168 } else {
5169 vap = vcp->vap;
5170 }
5171
5172 error = 0;
5173
5174 /*
5175 * We want to do as little work here as possible. So first we check
5176 * which sets of permissions grant us the access we need, and avoid checking
5177 * whether specific permissions grant access when more generic ones would.
5178 */
5179
5180 /* owner permissions */
5181 needed = 0;
5182 if (action & VREAD)
5183 needed |= S_IRUSR;
5184 if (action & VWRITE)
5185 needed |= S_IWUSR;
5186 if (action & VEXEC)
5187 needed |= S_IXUSR;
5188 owner_ok = (needed & vap->va_mode) == needed;
5189
5190 /* group permissions */
5191 needed = 0;
5192 if (action & VREAD)
5193 needed |= S_IRGRP;
5194 if (action & VWRITE)
5195 needed |= S_IWGRP;
5196 if (action & VEXEC)
5197 needed |= S_IXGRP;
5198 group_ok = (needed & vap->va_mode) == needed;
5199
5200 /* world permissions */
5201 needed = 0;
5202 if (action & VREAD)
5203 needed |= S_IROTH;
5204 if (action & VWRITE)
5205 needed |= S_IWOTH;
5206 if (action & VEXEC)
5207 needed |= S_IXOTH;
5208 world_ok = (needed & vap->va_mode) == needed;
5209
5210 /* If granted/denied by all three, we're done */
5211 if (owner_ok && group_ok && world_ok) {
5212 _SETWHERE("all");
5213 goto out;
5214 }
5215 if (!owner_ok && !group_ok && !world_ok) {
5216 _SETWHERE("all");
5217 error = EACCES;
5218 goto out;
5219 }
5220
5221 /* Check ownership (relatively cheap) */
5222 if ((on_dir && vauth_dir_owner(vcp)) ||
5223 (!on_dir && vauth_file_owner(vcp))) {
5224 _SETWHERE("user");
5225 if (!owner_ok)
5226 error = EACCES;
5227 goto out;
5228 }
5229
5230 /* Not owner; if group and world both grant it we're done */
5231 if (group_ok && world_ok) {
5232 _SETWHERE("group/world");
5233 goto out;
5234 }
5235 if (!group_ok && !world_ok) {
5236 _SETWHERE("group/world");
5237 error = EACCES;
5238 goto out;
5239 }
5240
5241 /* Check group membership (most expensive) */
5242 ismember = 0; /* Default to allow, if the target has no group owner */
5243
5244 /*
5245 * In the case we can't get an answer about the user from the call to
5246 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
5247 * the side of caution, rather than simply granting access, or we will
5248 * fail to correctly implement exclusion groups, so we set the third
5249 * parameter on the basis of the state of 'group_ok'.
5250 */
5251 if (on_dir) {
5252 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5253 } else {
5254 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5255 }
5256 if (error)
5257 goto out;
5258 if (ismember) {
5259 _SETWHERE("group");
5260 if (!group_ok)
5261 error = EACCES;
5262 goto out;
5263 }
5264
5265 /* Not owner, not in group, use world result */
5266 _SETWHERE("world");
5267 if (!world_ok)
5268 error = EACCES;
5269
5270 /* FALLTHROUGH */
5271
5272 out:
5273 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5274 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5275 (action & VREAD) ? "r" : "-",
5276 (action & VWRITE) ? "w" : "-",
5277 (action & VEXEC) ? "x" : "-",
5278 needed,
5279 (vap->va_mode & S_IRUSR) ? "r" : "-",
5280 (vap->va_mode & S_IWUSR) ? "w" : "-",
5281 (vap->va_mode & S_IXUSR) ? "x" : "-",
5282 (vap->va_mode & S_IRGRP) ? "r" : "-",
5283 (vap->va_mode & S_IWGRP) ? "w" : "-",
5284 (vap->va_mode & S_IXGRP) ? "x" : "-",
5285 (vap->va_mode & S_IROTH) ? "r" : "-",
5286 (vap->va_mode & S_IWOTH) ? "w" : "-",
5287 (vap->va_mode & S_IXOTH) ? "x" : "-",
5288 kauth_cred_getuid(vcp->ctx->vc_ucred),
5289 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5290 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5291 return(error);
5292 }
5293
5294 /*
5295 * Authorize the deletion of the node vp from the directory dvp.
5296 *
5297 * We assume that:
5298 * - Neither the node nor the directory are immutable.
5299 * - The user is not the superuser.
5300 *
5301 * Deletion is not permitted if the directory is sticky and the caller is
5302 * not owner of the node or directory.
5303 *
5304 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5305 * the node may be deleted. If neither denies the permission, and the
5306 * caller has Posix write access to the directory, then the node may be
5307 * deleted.
5308 *
5309 * As an optimization, we cache whether or not delete child is permitted
5310 * on directories without the sticky bit set.
5311 */
5312 int
5313 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5314 /*static*/ int
5315 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5316 {
5317 struct vnode_attr *vap = vcp->vap;
5318 struct vnode_attr *dvap = vcp->dvap;
5319 kauth_cred_t cred = vcp->ctx->vc_ucred;
5320 struct kauth_acl_eval eval;
5321 int error, delete_denied, delete_child_denied, ismember;
5322
5323 /* check the ACL on the directory */
5324 delete_child_denied = 0;
5325 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5326 errno_t posix_error;
5327
5328 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5329 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5330 eval.ae_count = dvap->va_acl->acl_entrycount;
5331 eval.ae_options = 0;
5332 if (vauth_dir_owner(vcp))
5333 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5334 /*
5335 * We use ENOENT as a marker to indicate we could not get
5336 * information in order to delay evaluation until after we
5337 * have the ACL evaluation answer. Previously, we would
5338 * always deny the operation at this point.
5339 */
5340 if ((posix_error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5341 return(posix_error);
5342 if (ismember)
5343 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5344 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5345 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5346 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5347 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5348
5349 /*
5350 * If there is no entry, we are going to defer to other
5351 * authorization mechanisms.
5352 */
5353 error = kauth_acl_evaluate(cred, &eval);
5354
5355 if (error != 0) {
5356 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5357 return(error);
5358 }
5359 switch(eval.ae_result) {
5360 case KAUTH_RESULT_DENY:
5361 delete_child_denied = 1;
5362 break;
5363 case KAUTH_RESULT_ALLOW:
5364 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5365 return(0);
5366 case KAUTH_RESULT_DEFER:
5367 /*
5368 * If we don't have a POSIX answer of "yes", and we
5369 * can't get an ACL answer, then we deny it now.
5370 */
5371 if (posix_error == ENOENT) {
5372 delete_child_denied = 1;
5373 break;
5374 }
5375 default:
5376 /* Effectively the same as !delete_child_denied */
5377 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5378 break;
5379 }
5380 }
5381
5382 /* check the ACL on the node */
5383 delete_denied = 0;
5384 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5385 errno_t posix_error;
5386
5387 eval.ae_requested = KAUTH_VNODE_DELETE;
5388 eval.ae_acl = &vap->va_acl->acl_ace[0];
5389 eval.ae_count = vap->va_acl->acl_entrycount;
5390 eval.ae_options = 0;
5391 if (vauth_file_owner(vcp))
5392 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5393 /*
5394 * We use ENOENT as a marker to indicate we could not get
5395 * information in order to delay evaluation until after we
5396 * have the ACL evaluation answer. Previously, we would
5397 * always deny the operation at this point.
5398 */
5399 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5400 return(posix_error);
5401 if (ismember)
5402 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5403 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5404 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5405 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5406 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5407
5408 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5409 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5410 return(error);
5411 }
5412
5413 switch(eval.ae_result) {
5414 case KAUTH_RESULT_DENY:
5415 delete_denied = 1;
5416 break;
5417 case KAUTH_RESULT_ALLOW:
5418 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5419 return(0);
5420 case KAUTH_RESULT_DEFER:
5421 /*
5422 * If we don't have a POSIX answer of "yes", and we
5423 * can't get an ACL answer, then we deny it now.
5424 */
5425 if (posix_error == ENOENT) {
5426 delete_denied = 1;
5427 }
5428 default:
5429 /* Effectively the same as !delete_child_denied */
5430 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
5431 break;
5432 }
5433 }
5434
5435 /* if denied by ACL on directory or node, return denial */
5436 if (delete_denied || delete_child_denied) {
5437 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
5438 return(EACCES);
5439 }
5440
5441 /*
5442 * enforce sticky bit behaviour; the cached_delete_child property will
5443 * be false and the dvap contents valis for sticky bit directories;
5444 * this makes us check the directory each time, but it's unavoidable,
5445 * as sticky bit is an exception to caching.
5446 */
5447 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
5448 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
5449 vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
5450 return(EACCES);
5451 }
5452
5453 /* check the directory */
5454 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
5455 KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
5456 return(error);
5457 }
5458
5459 /* not denied, must be OK */
5460 return(0);
5461 }
5462
5463
5464 /*
5465 * Authorize an operation based on the node's attributes.
5466 */
5467 static int
5468 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
5469 {
5470 struct vnode_attr *vap = vcp->vap;
5471 kauth_cred_t cred = vcp->ctx->vc_ucred;
5472 struct kauth_acl_eval eval;
5473 int error, ismember;
5474 mode_t posix_action;
5475
5476 /*
5477 * If we are the file owner, we automatically have some rights.
5478 *
5479 * Do we need to expand this to support group ownership?
5480 */
5481 if (vauth_file_owner(vcp))
5482 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
5483
5484 /*
5485 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
5486 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
5487 * change ownership to themselves, and WRITE_SECURITY is implicitly
5488 * granted to the owner. We need to do this because at this point
5489 * WRITE_SECURITY may not be granted as the caller is not currently
5490 * the owner.
5491 */
5492 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
5493 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
5494 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
5495
5496 if (acl_rights == 0) {
5497 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
5498 return(0);
5499 }
5500
5501 /* if we have an ACL, evaluate it */
5502 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5503 errno_t posix_error;
5504
5505 eval.ae_requested = acl_rights;
5506 eval.ae_acl = &vap->va_acl->acl_ace[0];
5507 eval.ae_count = vap->va_acl->acl_entrycount;
5508 eval.ae_options = 0;
5509 if (vauth_file_owner(vcp))
5510 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5511 /*
5512 * We use ENOENT as a marker to indicate we could not get
5513 * information in order to delay evaluation until after we
5514 * have the ACL evaluation answer. Previously, we would
5515 * always deny the operation at this point.
5516 */
5517 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5518 return(posix_error);
5519 if (ismember)
5520 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5521 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5522 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5523 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5524 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5525
5526 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5527 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5528 return(error);
5529 }
5530
5531 switch(eval.ae_result) {
5532 case KAUTH_RESULT_DENY:
5533 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
5534 return(EACCES); /* deny, deny, counter-allege */
5535 case KAUTH_RESULT_ALLOW:
5536 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
5537 return(0);
5538 case KAUTH_RESULT_DEFER:
5539 /*
5540 * If we don't have a POSIX answer of "yes", and we
5541 * can't get an ACL answer, then we deny it now.
5542 */
5543 if (posix_error == ENOENT) {
5544 KAUTH_DEBUG("%p DENIED(DEFERRED) - by ACL", vcp->vp);
5545 return(EACCES); /* deny, deny, counter-allege */
5546 }
5547 default:
5548 /* Effectively the same as !delete_child_denied */
5549 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5550 break;
5551 }
5552
5553 *found_deny = eval.ae_found_deny;
5554
5555 /* fall through and evaluate residual rights */
5556 } else {
5557 /* no ACL, everything is residual */
5558 eval.ae_residual = acl_rights;
5559 }
5560
5561 /*
5562 * Grant residual rights that have been pre-authorized.
5563 */
5564 eval.ae_residual &= ~preauth_rights;
5565
5566 /*
5567 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
5568 */
5569 if (vauth_file_owner(vcp))
5570 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
5571
5572 if (eval.ae_residual == 0) {
5573 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
5574 return(0);
5575 }
5576
5577 /*
5578 * Bail if we have residual rights that can't be granted by posix permissions,
5579 * or aren't presumed granted at this point.
5580 *
5581 * XXX these can be collapsed for performance
5582 */
5583 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
5584 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
5585 return(EACCES);
5586 }
5587 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
5588 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
5589 return(EACCES);
5590 }
5591
5592 #if DIAGNOSTIC
5593 if (eval.ae_residual & KAUTH_VNODE_DELETE)
5594 panic("vnode_authorize: can't be checking delete permission here");
5595 #endif
5596
5597 /*
5598 * Compute the fallback posix permissions that will satisfy the remaining
5599 * rights.
5600 */
5601 posix_action = 0;
5602 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
5603 KAUTH_VNODE_LIST_DIRECTORY |
5604 KAUTH_VNODE_READ_EXTATTRIBUTES))
5605 posix_action |= VREAD;
5606 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
5607 KAUTH_VNODE_ADD_FILE |
5608 KAUTH_VNODE_ADD_SUBDIRECTORY |
5609 KAUTH_VNODE_DELETE_CHILD |
5610 KAUTH_VNODE_WRITE_ATTRIBUTES |
5611 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
5612 posix_action |= VWRITE;
5613 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
5614 KAUTH_VNODE_SEARCH))
5615 posix_action |= VEXEC;
5616
5617 if (posix_action != 0) {
5618 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
5619 } else {
5620 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
5621 vcp->vp,
5622 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
5623 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5624 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
5625 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5626 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
5627 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
5628 (eval.ae_residual & KAUTH_VNODE_DELETE)
5629 ? " DELETE" : "",
5630 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
5631 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5632 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
5633 ? " DELETE_CHILD" : "",
5634 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
5635 ? " READ_ATTRIBUTES" : "",
5636 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
5637 ? " WRITE_ATTRIBUTES" : "",
5638 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
5639 ? " READ_EXTATTRIBUTES" : "",
5640 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
5641 ? " WRITE_EXTATTRIBUTES" : "",
5642 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
5643 ? " READ_SECURITY" : "",
5644 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
5645 ? " WRITE_SECURITY" : "",
5646 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
5647 ? " CHECKIMMUTABLE" : "",
5648 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
5649 ? " CHANGE_OWNER" : "");
5650 }
5651
5652 /*
5653 * Lack of required Posix permissions implies no reason to deny access.
5654 */
5655 return(0);
5656 }
5657
5658 /*
5659 * Check for file immutability.
5660 */
5661 static int
5662 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
5663 {
5664 mount_t mp;
5665 int error;
5666 int append;
5667
5668 /*
5669 * Perform immutability checks for operations that change data.
5670 *
5671 * Sockets, fifos and devices require special handling.
5672 */
5673 switch(vp->v_type) {
5674 case VSOCK:
5675 case VFIFO:
5676 case VBLK:
5677 case VCHR:
5678 /*
5679 * Writing to these nodes does not change the filesystem data,
5680 * so forget that it's being tried.
5681 */
5682 rights &= ~KAUTH_VNODE_WRITE_DATA;
5683 break;
5684 default:
5685 break;
5686 }
5687
5688 error = 0;
5689 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
5690
5691 /* check per-filesystem options if possible */
5692 mp = vp->v_mount;
5693 if (mp != NULL) {
5694
5695 /* check for no-EA filesystems */
5696 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
5697 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
5698 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
5699 error = EACCES; /* User attributes disabled */
5700 goto out;
5701 }
5702 }
5703
5704 /*
5705 * check for file immutability. first, check if the requested rights are
5706 * allowable for a UF_APPEND file.
5707 */
5708 append = 0;
5709 if (vp->v_type == VDIR) {
5710 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5711 append = 1;
5712 } else {
5713 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5714 append = 1;
5715 }
5716 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
5717 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
5718 goto out;
5719 }
5720 }
5721 out:
5722 return(error);
5723 }
5724
5725 /*
5726 * Handle authorization actions for filesystems that advertise that the
5727 * server will be enforcing.
5728 *
5729 * Returns: 0 Authorization should be handled locally
5730 * 1 Authorization was handled by the FS
5731 *
5732 * Note: Imputed returns will only occur if the authorization request
5733 * was handled by the FS.
5734 *
5735 * Imputed: *resultp, modified Return code from FS when the request is
5736 * handled by the FS.
5737 * VNOP_ACCESS:???
5738 * VNOP_OPEN:???
5739 */
5740 static int
5741 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
5742 {
5743 int error;
5744
5745 /*
5746 * If the vp is a device node, socket or FIFO it actually represents a local
5747 * endpoint, so we need to handle it locally.
5748 */
5749 switch(vp->v_type) {
5750 case VBLK:
5751 case VCHR:
5752 case VSOCK:
5753 case VFIFO:
5754 return(0);
5755 default:
5756 break;
5757 }
5758
5759 /*
5760 * In the advisory request case, if the filesystem doesn't think it's reliable
5761 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
5762 */
5763 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
5764 return(0);
5765
5766 /*
5767 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
5768 * VNOP_ACCESS, as most will authorise inline with the actual request.
5769 */
5770 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
5771 *resultp = error;
5772 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
5773 return(1);
5774 }
5775
5776 /*
5777 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
5778 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
5779 */
5780 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
5781 /* try a VNOP_OPEN for readonly access */
5782 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
5783 *resultp = error;
5784 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
5785 return(1);
5786 }
5787 VNOP_CLOSE(vp, FREAD, ctx);
5788 }
5789
5790 /*
5791 * We don't have any reason to believe that the request has to be denied at this point,
5792 * so go ahead and allow it.
5793 */
5794 *resultp = 0;
5795 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
5796 return(1);
5797 }
5798
5799
5800
5801
5802 /*
5803 * Returns: KAUTH_RESULT_ALLOW
5804 * KAUTH_RESULT_DENY
5805 *
5806 * Imputed: *arg3, modified Error code in the deny case
5807 * EROFS Read-only file system
5808 * EACCES Permission denied
5809 * EPERM Operation not permitted [no execute]
5810 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
5811 * vnode_getattr:???
5812 * vnode_authorize_opaque:*arg2 ???
5813 * vnode_authorize_checkimmutable:???
5814 * vnode_authorize_delete:???
5815 * vnode_authorize_simple:???
5816 */
5817
5818
5819 static int
5820 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
5821 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5822 {
5823 vfs_context_t ctx;
5824 vnode_t cvp = NULLVP;
5825 vnode_t vp, dvp;
5826 int result = KAUTH_RESULT_DENY;
5827 int parent_iocount = 0;
5828 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
5829
5830 ctx = (vfs_context_t)arg0;
5831 vp = (vnode_t)arg1;
5832 dvp = (vnode_t)arg2;
5833
5834 /*
5835 * if there are 2 vnodes passed in, we don't know at
5836 * this point which rights to look at based on the
5837 * combined action being passed in... defer until later...
5838 * otherwise check the kauth 'rights' cache hung
5839 * off of the vnode we're interested in... if we've already
5840 * been granted the right we're currently interested in,
5841 * we can just return success... otherwise we'll go through
5842 * the process of authorizing the requested right(s)... if that
5843 * succeeds, we'll add the right(s) to the cache.
5844 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
5845 */
5846 if (dvp && vp)
5847 goto defer;
5848 if (dvp) {
5849 cvp = dvp;
5850 } else {
5851 /*
5852 * For named streams on local-authorization volumes, rights are cached on the parent;
5853 * authorization is determined by looking at the parent's properties anyway, so storing
5854 * on the parent means that we don't recompute for the named stream and that if
5855 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
5856 * stream to flush its cache separately. If we miss in the cache, then we authorize
5857 * as if there were no cached rights (passing the named stream vnode and desired rights to
5858 * vnode_authorize_callback_int()).
5859 *
5860 * On an opaquely authorized volume, we don't know the relationship between the
5861 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
5862 * on such a volume are authorized directly (rather than using the parent) and have their
5863 * own caches. When a named stream vnode is created, we mark the parent as having a named
5864 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
5865 * find the stream and flush its cache.
5866 */
5867 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
5868 cvp = vp->v_parent;
5869 if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
5870 parent_iocount = 1;
5871 } else {
5872 cvp = NULL;
5873 goto defer; /* If we can't use the parent, take the slow path */
5874 }
5875
5876 /* Have to translate some actions */
5877 parent_action = action;
5878 if (parent_action & KAUTH_VNODE_READ_DATA) {
5879 parent_action &= ~KAUTH_VNODE_READ_DATA;
5880 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5881 }
5882 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
5883 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
5884 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5885 }
5886
5887 } else {
5888 cvp = vp;
5889 }
5890 }
5891
5892 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
5893 result = KAUTH_RESULT_ALLOW;
5894 goto out;
5895 }
5896 defer:
5897 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
5898
5899 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
5900 vnode_cache_authorized_action(cvp, ctx, action);
5901
5902 out:
5903 if (parent_iocount) {
5904 vnode_put(cvp);
5905 }
5906
5907 return result;
5908 }
5909
5910
5911 static int
5912 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
5913 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5914 {
5915 struct _vnode_authorize_context auth_context;
5916 vauth_ctx vcp;
5917 vfs_context_t ctx;
5918 vnode_t vp, dvp;
5919 kauth_cred_t cred;
5920 kauth_ace_rights_t rights;
5921 struct vnode_attr va, dva;
5922 int result;
5923 int *errorp;
5924 int noimmutable;
5925 boolean_t parent_authorized_for_delete_child = FALSE;
5926 boolean_t found_deny = FALSE;
5927 boolean_t parent_ref= FALSE;
5928
5929 vcp = &auth_context;
5930 ctx = vcp->ctx = (vfs_context_t)arg0;
5931 vp = vcp->vp = (vnode_t)arg1;
5932 dvp = vcp->dvp = (vnode_t)arg2;
5933 errorp = (int *)arg3;
5934 /*
5935 * Note that we authorize against the context, not the passed cred
5936 * (the same thing anyway)
5937 */
5938 cred = ctx->vc_ucred;
5939
5940 VATTR_INIT(&va);
5941 vcp->vap = &va;
5942 VATTR_INIT(&dva);
5943 vcp->dvap = &dva;
5944
5945 vcp->flags = vcp->flags_valid = 0;
5946
5947 #if DIAGNOSTIC
5948 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
5949 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
5950 #endif
5951
5952 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
5953 vp, vfs_context_proc(ctx)->p_comm,
5954 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
5955 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5956 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5957 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
5958 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
5959 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5960 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
5961 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
5962 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
5963 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
5964 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
5965 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
5966 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
5967 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
5968 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
5969 vnode_isdir(vp) ? "directory" : "file",
5970 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
5971
5972 /*
5973 * Extract the control bits from the action, everything else is
5974 * requested rights.
5975 */
5976 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
5977 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
5978
5979 if (rights & KAUTH_VNODE_DELETE) {
5980 #if DIAGNOSTIC
5981 if (dvp == NULL)
5982 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
5983 #endif
5984 /*
5985 * check to see if we've already authorized the parent
5986 * directory for deletion of its children... if so, we
5987 * can skip a whole bunch of work... we will still have to
5988 * authorize that this specific child can be removed
5989 */
5990 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
5991 parent_authorized_for_delete_child = TRUE;
5992 } else {
5993 dvp = NULL;
5994 }
5995
5996 /*
5997 * Check for read-only filesystems.
5998 */
5999 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
6000 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
6001 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
6002 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
6003 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
6004 result = EROFS;
6005 goto out;
6006 }
6007
6008 /*
6009 * Check for noexec filesystems.
6010 */
6011 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
6012 result = EACCES;
6013 goto out;
6014 }
6015
6016 /*
6017 * Handle cases related to filesystems with non-local enforcement.
6018 * This call can return 0, in which case we will fall through to perform a
6019 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
6020 * an appropriate result, at which point we can return immediately.
6021 */
6022 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
6023 goto out;
6024
6025 /*
6026 * Get vnode attributes and extended security information for the vnode
6027 * and directory if required.
6028 */
6029 VATTR_WANTED(&va, va_mode);
6030 VATTR_WANTED(&va, va_uid);
6031 VATTR_WANTED(&va, va_gid);
6032 VATTR_WANTED(&va, va_flags);
6033 VATTR_WANTED(&va, va_acl);
6034 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
6035 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
6036 goto out;
6037 }
6038 if (dvp && parent_authorized_for_delete_child == FALSE) {
6039 VATTR_WANTED(&dva, va_mode);
6040 VATTR_WANTED(&dva, va_uid);
6041 VATTR_WANTED(&dva, va_gid);
6042 VATTR_WANTED(&dva, va_flags);
6043 VATTR_WANTED(&dva, va_acl);
6044 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
6045 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
6046 goto out;
6047 }
6048 }
6049
6050 /*
6051 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
6052 * *_EXTATTRIBUTES.
6053 */
6054 if (vnode_isnamedstream(vp)) {
6055 if (rights & KAUTH_VNODE_READ_DATA) {
6056 rights &= ~KAUTH_VNODE_READ_DATA;
6057 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
6058 }
6059 if (rights & KAUTH_VNODE_WRITE_DATA) {
6060 rights &= ~KAUTH_VNODE_WRITE_DATA;
6061 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
6062 }
6063 }
6064
6065 /*
6066 * Point 'vp' to the resource fork's parent for ACL checking
6067 */
6068 if (vnode_isnamedstream(vp) &&
6069 (vp->v_parent != NULL) &&
6070 (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) {
6071 parent_ref = TRUE;
6072 vcp->vp = vp = vp->v_parent;
6073 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6074 kauth_acl_free(va.va_acl);
6075 VATTR_INIT(&va);
6076 VATTR_WANTED(&va, va_mode);
6077 VATTR_WANTED(&va, va_uid);
6078 VATTR_WANTED(&va, va_gid);
6079 VATTR_WANTED(&va, va_flags);
6080 VATTR_WANTED(&va, va_acl);
6081 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
6082 goto out;
6083 }
6084
6085 /*
6086 * Check for immutability.
6087 *
6088 * In the deletion case, parent directory immutability vetoes specific
6089 * file rights.
6090 */
6091 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
6092 goto out;
6093 if ((rights & KAUTH_VNODE_DELETE) &&
6094 parent_authorized_for_delete_child == FALSE &&
6095 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
6096 goto out;
6097
6098 /*
6099 * Clear rights that have been authorized by reaching this point, bail if nothing left to
6100 * check.
6101 */
6102 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
6103 if (rights == 0)
6104 goto out;
6105
6106 /*
6107 * If we're not the superuser, authorize based on file properties;
6108 * note that even if parent_authorized_for_delete_child is TRUE, we
6109 * need to check on the node itself.
6110 */
6111 if (!vfs_context_issuser(ctx)) {
6112 /* process delete rights */
6113 if ((rights & KAUTH_VNODE_DELETE) &&
6114 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
6115 goto out;
6116
6117 /* process remaining rights */
6118 if ((rights & ~KAUTH_VNODE_DELETE) &&
6119 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
6120 goto out;
6121 } else {
6122
6123 /*
6124 * Execute is only granted to root if one of the x bits is set. This check only
6125 * makes sense if the posix mode bits are actually supported.
6126 */
6127 if ((rights & KAUTH_VNODE_EXECUTE) &&
6128 (vp->v_type == VREG) &&
6129 VATTR_IS_SUPPORTED(&va, va_mode) &&
6130 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
6131 result = EPERM;
6132 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
6133 goto out;
6134 }
6135
6136 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
6137 }
6138 out:
6139 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6140 kauth_acl_free(va.va_acl);
6141 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
6142 kauth_acl_free(dva.va_acl);
6143
6144 if (result) {
6145 if (parent_ref)
6146 vnode_put(vp);
6147 *errorp = result;
6148 KAUTH_DEBUG("%p DENIED - auth denied", vp);
6149 return(KAUTH_RESULT_DENY);
6150 }
6151 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
6152 /*
6153 * if we were successfully granted the right to search this directory
6154 * and there were NO ACL DENYs for search and the posix permissions also don't
6155 * deny execute, we can synthesize a global right that allows anyone to
6156 * traverse this directory during a pathname lookup without having to
6157 * match the credential associated with this cache of rights.
6158 */
6159 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
6160 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
6161 (S_IXUSR | S_IXGRP | S_IXOTH))) {
6162 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
6163 }
6164 }
6165 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
6166 /*
6167 * parent was successfully and newly authorized for content deletions
6168 * add it to the cache, but only if it doesn't have the sticky
6169 * bit set on it. This same check is done earlier guarding
6170 * fetching of dva, and if we jumped to out without having done
6171 * this, we will have returned already because of a non-zero
6172 * 'result' value.
6173 */
6174 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
6175 !(dva.va_mode & (S_ISVTX))) {
6176 /* OK to cache delete rights */
6177 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
6178 }
6179 }
6180 if (parent_ref)
6181 vnode_put(vp);
6182 /*
6183 * Note that this implies that we will allow requests for no rights, as well as
6184 * for rights that we do not recognise. There should be none of these.
6185 */
6186 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
6187 return(KAUTH_RESULT_ALLOW);
6188 }
6189
6190 /*
6191 * Check that the attribute information in vattr can be legally applied to
6192 * a new file by the context.
6193 */
6194 int
6195 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
6196 {
6197 int error;
6198 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6199 kauth_cred_t cred;
6200 guid_t changer;
6201 mount_t dmp;
6202
6203 error = 0;
6204 defaulted_owner = defaulted_group = defaulted_mode = 0;
6205
6206 /*
6207 * Require that the filesystem support extended security to apply any.
6208 */
6209 if (!vfs_extendedsecurity(dvp->v_mount) &&
6210 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6211 error = EINVAL;
6212 goto out;
6213 }
6214
6215 /*
6216 * Default some fields.
6217 */
6218 dmp = dvp->v_mount;
6219
6220 /*
6221 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6222 * owner takes ownership of all new files.
6223 */
6224 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6225 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6226 defaulted_owner = 1;
6227 } else {
6228 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6229 /* default owner is current user */
6230 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6231 defaulted_owner = 1;
6232 }
6233 }
6234
6235 /*
6236 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6237 * group takes ownership of all new files.
6238 */
6239 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6240 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6241 defaulted_group = 1;
6242 } else {
6243 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6244 /* default group comes from parent object, fallback to current user */
6245 struct vnode_attr dva;
6246 VATTR_INIT(&dva);
6247 VATTR_WANTED(&dva, va_gid);
6248 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6249 goto out;
6250 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6251 VATTR_SET(vap, va_gid, dva.va_gid);
6252 } else {
6253 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6254 }
6255 defaulted_group = 1;
6256 }
6257 }
6258
6259 if (!VATTR_IS_ACTIVE(vap, va_flags))
6260 VATTR_SET(vap, va_flags, 0);
6261
6262 /* default mode is everything, masked with current umask */
6263 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6264 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6265 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6266 defaulted_mode = 1;
6267 }
6268 /* set timestamps to now */
6269 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6270 nanotime(&vap->va_create_time);
6271 VATTR_SET_ACTIVE(vap, va_create_time);
6272 }
6273
6274 /*
6275 * Check for attempts to set nonsensical fields.
6276 */
6277 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6278 error = EINVAL;
6279 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6280 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6281 goto out;
6282 }
6283
6284 /*
6285 * Quickly check for the applicability of any enforcement here.
6286 * Tests below maintain the integrity of the local security model.
6287 */
6288 if (vfs_authopaque(dvp->v_mount))
6289 goto out;
6290
6291 /*
6292 * We need to know if the caller is the superuser, or if the work is
6293 * otherwise already authorised.
6294 */
6295 cred = vfs_context_ucred(ctx);
6296 if (noauth) {
6297 /* doing work for the kernel */
6298 has_priv_suser = 1;
6299 } else {
6300 has_priv_suser = vfs_context_issuser(ctx);
6301 }
6302
6303
6304 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6305 if (has_priv_suser) {
6306 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6307 error = EPERM;
6308 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6309 goto out;
6310 }
6311 } else {
6312 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6313 error = EPERM;
6314 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6315 goto out;
6316 }
6317 }
6318 }
6319
6320 /* if not superuser, validate legality of new-item attributes */
6321 if (!has_priv_suser) {
6322 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6323 /* setgid? */
6324 if (vap->va_mode & S_ISGID) {
6325 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6326 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6327 goto out;
6328 }
6329 if (!ismember) {
6330 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6331 error = EPERM;
6332 goto out;
6333 }
6334 }
6335
6336 /* setuid? */
6337 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6338 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6339 error = EPERM;
6340 goto out;
6341 }
6342 }
6343 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6344 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6345 error = EPERM;
6346 goto out;
6347 }
6348 if (!defaulted_group) {
6349 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6350 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6351 goto out;
6352 }
6353 if (!ismember) {
6354 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6355 error = EPERM;
6356 goto out;
6357 }
6358 }
6359
6360 /* initialising owner/group UUID */
6361 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6362 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6363 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6364 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6365 goto out;
6366 }
6367 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6368 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6369 error = EPERM;
6370 goto out;
6371 }
6372 }
6373 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6374 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6375 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6376 goto out;
6377 }
6378 if (!ismember) {
6379 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6380 error = EPERM;
6381 goto out;
6382 }
6383 }
6384 }
6385 out:
6386 return(error);
6387 }
6388
6389 /*
6390 * Check that the attribute information in vap can be legally written by the
6391 * context.
6392 *
6393 * Call this when you're not sure about the vnode_attr; either its contents
6394 * have come from an unknown source, or when they are variable.
6395 *
6396 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6397 * must be authorized to be permitted to write the vattr.
6398 */
6399 int
6400 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6401 {
6402 struct vnode_attr ova;
6403 kauth_action_t required_action;
6404 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6405 guid_t changer;
6406 gid_t group;
6407 uid_t owner;
6408 mode_t newmode;
6409 kauth_cred_t cred;
6410 uint32_t fdelta;
6411
6412 VATTR_INIT(&ova);
6413 required_action = 0;
6414 error = 0;
6415
6416 /*
6417 * Quickly check for enforcement applicability.
6418 */
6419 if (vfs_authopaque(vp->v_mount))
6420 goto out;
6421
6422 /*
6423 * Check for attempts to set nonsensical fields.
6424 */
6425 if (vap->va_active & VNODE_ATTR_RDONLY) {
6426 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
6427 error = EINVAL;
6428 goto out;
6429 }
6430
6431 /*
6432 * We need to know if the caller is the superuser.
6433 */
6434 cred = vfs_context_ucred(ctx);
6435 has_priv_suser = kauth_cred_issuser(cred);
6436
6437 /*
6438 * If any of the following are changing, we need information from the old file:
6439 * va_uid
6440 * va_gid
6441 * va_mode
6442 * va_uuuid
6443 * va_guuid
6444 */
6445 if (VATTR_IS_ACTIVE(vap, va_uid) ||
6446 VATTR_IS_ACTIVE(vap, va_gid) ||
6447 VATTR_IS_ACTIVE(vap, va_mode) ||
6448 VATTR_IS_ACTIVE(vap, va_uuuid) ||
6449 VATTR_IS_ACTIVE(vap, va_guuid)) {
6450 VATTR_WANTED(&ova, va_mode);
6451 VATTR_WANTED(&ova, va_uid);
6452 VATTR_WANTED(&ova, va_gid);
6453 VATTR_WANTED(&ova, va_uuuid);
6454 VATTR_WANTED(&ova, va_guuid);
6455 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
6456 }
6457
6458 /*
6459 * If timestamps are being changed, we need to know who the file is owned
6460 * by.
6461 */
6462 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6463 VATTR_IS_ACTIVE(vap, va_change_time) ||
6464 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6465 VATTR_IS_ACTIVE(vap, va_access_time) ||
6466 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6467
6468 VATTR_WANTED(&ova, va_uid);
6469 #if 0 /* enable this when we support UUIDs as official owners */
6470 VATTR_WANTED(&ova, va_uuuid);
6471 #endif
6472 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
6473 }
6474
6475 /*
6476 * If flags are being changed, we need the old flags.
6477 */
6478 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6479 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
6480 VATTR_WANTED(&ova, va_flags);
6481 }
6482
6483 /*
6484 * If the size is being set, make sure it's not a directory.
6485 */
6486 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6487 /* size is meaningless on a directory, don't permit this */
6488 if (vnode_isdir(vp)) {
6489 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
6490 error = EISDIR;
6491 goto out;
6492 }
6493 }
6494
6495 /*
6496 * Get old data.
6497 */
6498 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
6499 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
6500 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
6501 goto out;
6502 }
6503
6504 /*
6505 * Size changes require write access to the file data.
6506 */
6507 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6508 /* if we can't get the size, or it's different, we need write access */
6509 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
6510 required_action |= KAUTH_VNODE_WRITE_DATA;
6511 }
6512
6513 /*
6514 * Changing timestamps?
6515 *
6516 * Note that we are only called to authorize user-requested time changes;
6517 * side-effect time changes are not authorized. Authorisation is only
6518 * required for existing files.
6519 *
6520 * Non-owners are not permitted to change the time on an existing
6521 * file to anything other than the current time.
6522 */
6523 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6524 VATTR_IS_ACTIVE(vap, va_change_time) ||
6525 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6526 VATTR_IS_ACTIVE(vap, va_access_time) ||
6527 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6528 /*
6529 * The owner and root may set any timestamps they like,
6530 * provided that the file is not immutable. The owner still needs
6531 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
6532 */
6533 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
6534 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
6535 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
6536 } else {
6537 /* just setting the current time? */
6538 if (vap->va_vaflags & VA_UTIMES_NULL) {
6539 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
6540 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6541 } else {
6542 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
6543 error = EACCES;
6544 goto out;
6545 }
6546 }
6547 }
6548
6549 /*
6550 * Changing file mode?
6551 */
6552 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
6553 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
6554
6555 /*
6556 * Mode changes always have the same basic auth requirements.
6557 */
6558 if (has_priv_suser) {
6559 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
6560 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6561 } else {
6562 /* need WRITE_SECURITY */
6563 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
6564 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6565 }
6566
6567 /*
6568 * Can't set the setgid bit if you're not in the group and not root. Have to have
6569 * existing group information in the case we're not setting it right now.
6570 */
6571 if (vap->va_mode & S_ISGID) {
6572 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6573 if (!has_priv_suser) {
6574 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6575 group = vap->va_gid;
6576 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
6577 group = ova.va_gid;
6578 } else {
6579 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
6580 error = EINVAL;
6581 goto out;
6582 }
6583 /*
6584 * This might be too restrictive; WRITE_SECURITY might be implied by
6585 * membership in this case, rather than being an additional requirement.
6586 */
6587 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
6588 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6589 goto out;
6590 }
6591 if (!ismember) {
6592 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
6593 error = EPERM;
6594 goto out;
6595 }
6596 }
6597 }
6598
6599 /*
6600 * Can't set the setuid bit unless you're root or the file's owner.
6601 */
6602 if (vap->va_mode & S_ISUID) {
6603 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6604 if (!has_priv_suser) {
6605 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6606 owner = vap->va_uid;
6607 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
6608 owner = ova.va_uid;
6609 } else {
6610 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
6611 error = EINVAL;
6612 goto out;
6613 }
6614 if (owner != kauth_cred_getuid(cred)) {
6615 /*
6616 * We could allow this if WRITE_SECURITY is permitted, perhaps.
6617 */
6618 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6619 error = EPERM;
6620 goto out;
6621 }
6622 }
6623 }
6624 }
6625
6626 /*
6627 * Validate/mask flags changes. This checks that only the flags in
6628 * the UF_SETTABLE mask are being set, and preserves the flags in
6629 * the SF_SETTABLE case.
6630 *
6631 * Since flags changes may be made in conjunction with other changes,
6632 * we will ask the auth code to ignore immutability in the case that
6633 * the SF_* flags are not set and we are only manipulating the file flags.
6634 *
6635 */
6636 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6637 /* compute changing flags bits */
6638 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
6639 fdelta = vap->va_flags ^ ova.va_flags;
6640 } else {
6641 fdelta = vap->va_flags;
6642 }
6643
6644 if (fdelta != 0) {
6645 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
6646 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6647
6648 /* check that changing bits are legal */
6649 if (has_priv_suser) {
6650 /*
6651 * The immutability check will prevent us from clearing the SF_*
6652 * flags unless the system securelevel permits it, so just check
6653 * for legal flags here.
6654 */
6655 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
6656 error = EPERM;
6657 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6658 goto out;
6659 }
6660 } else {
6661 if (fdelta & ~UF_SETTABLE) {
6662 error = EPERM;
6663 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6664 goto out;
6665 }
6666 }
6667 /*
6668 * If the caller has the ability to manipulate file flags,
6669 * security is not reduced by ignoring them for this operation.
6670 *
6671 * A more complete test here would consider the 'after' states of the flags
6672 * to determine whether it would permit the operation, but this becomes
6673 * very complex.
6674 *
6675 * Ignoring immutability is conditional on securelevel; this does not bypass
6676 * the SF_* flags if securelevel > 0.
6677 */
6678 required_action |= KAUTH_VNODE_NOIMMUTABLE;
6679 }
6680 }
6681
6682 /*
6683 * Validate ownership information.
6684 */
6685 chowner = 0;
6686 chgroup = 0;
6687 clear_suid = 0;
6688 clear_sgid = 0;
6689
6690 /*
6691 * uid changing
6692 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
6693 * support them in general, and will ignore it if/when we try to set it.
6694 * We might want to clear the uid out of vap completely here.
6695 */
6696 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6697 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
6698 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
6699 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
6700 error = EPERM;
6701 goto out;
6702 }
6703 chowner = 1;
6704 }
6705 clear_suid = 1;
6706 }
6707
6708 /*
6709 * gid changing
6710 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
6711 * support them in general, and will ignore it if/when we try to set it.
6712 * We might want to clear the gid out of vap completely here.
6713 */
6714 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6715 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
6716 if (!has_priv_suser) {
6717 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6718 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6719 goto out;
6720 }
6721 if (!ismember) {
6722 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
6723 ova.va_gid, vap->va_gid);
6724 error = EPERM;
6725 goto out;
6726 }
6727 }
6728 chgroup = 1;
6729 }
6730 clear_sgid = 1;
6731 }
6732
6733 /*
6734 * Owner UUID being set or changed.
6735 */
6736 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6737 /* if the owner UUID is not actually changing ... */
6738 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
6739 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
6740 goto no_uuuid_change;
6741
6742 /*
6743 * If the current owner UUID is a null GUID, check
6744 * it against the UUID corresponding to the owner UID.
6745 */
6746 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
6747 VATTR_IS_SUPPORTED(&ova, va_uid)) {
6748 guid_t uid_guid;
6749
6750 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
6751 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
6752 goto no_uuuid_change;
6753 }
6754 }
6755
6756 /*
6757 * The owner UUID cannot be set by a non-superuser to anything other than
6758 * their own or a null GUID (to "unset" the owner UUID).
6759 * Note that file systems must be prepared to handle the
6760 * null UUID case in a manner appropriate for that file
6761 * system.
6762 */
6763 if (!has_priv_suser) {
6764 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6765 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6766 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
6767 goto out;
6768 }
6769 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
6770 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
6771 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
6772 error = EPERM;
6773 goto out;
6774 }
6775 }
6776 chowner = 1;
6777 clear_suid = 1;
6778 }
6779 no_uuuid_change:
6780 /*
6781 * Group UUID being set or changed.
6782 */
6783 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6784 /* if the group UUID is not actually changing ... */
6785 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
6786 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
6787 goto no_guuid_change;
6788
6789 /*
6790 * If the current group UUID is a null UUID, check
6791 * it against the UUID corresponding to the group GID.
6792 */
6793 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
6794 VATTR_IS_SUPPORTED(&ova, va_gid)) {
6795 guid_t gid_guid;
6796
6797 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
6798 kauth_guid_equal(&vap->va_guuid, &gid_guid))
6799 goto no_guuid_change;
6800 }
6801 }
6802
6803 /*
6804 * The group UUID cannot be set by a non-superuser to anything other than
6805 * one of which they are a member or a null GUID (to "unset"
6806 * the group UUID).
6807 * Note that file systems must be prepared to handle the
6808 * null UUID case in a manner appropriate for that file
6809 * system.
6810 */
6811 if (!has_priv_suser) {
6812 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
6813 ismember = 1;
6814 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6815 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6816 goto out;
6817 }
6818 if (!ismember) {
6819 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
6820 error = EPERM;
6821 goto out;
6822 }
6823 }
6824 chgroup = 1;
6825 }
6826 no_guuid_change:
6827
6828 /*
6829 * Compute authorisation for group/ownership changes.
6830 */
6831 if (chowner || chgroup || clear_suid || clear_sgid) {
6832 if (has_priv_suser) {
6833 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
6834 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6835 } else {
6836 if (chowner) {
6837 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
6838 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
6839 }
6840 if (chgroup && !chowner) {
6841 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
6842 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6843 }
6844
6845 /* clear set-uid and set-gid bits as required by Posix */
6846 if (VATTR_IS_ACTIVE(vap, va_mode)) {
6847 newmode = vap->va_mode;
6848 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
6849 newmode = ova.va_mode;
6850 } else {
6851 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
6852 newmode = 0;
6853 }
6854 if (newmode & (S_ISUID | S_ISGID)) {
6855 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
6856 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
6857 }
6858 }
6859 }
6860
6861 /*
6862 * Authorise changes in the ACL.
6863 */
6864 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6865
6866 /* no existing ACL */
6867 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
6868
6869 /* adding an ACL */
6870 if (vap->va_acl != NULL) {
6871 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6872 KAUTH_DEBUG("CHMOD - adding ACL");
6873 }
6874
6875 /* removing an existing ACL */
6876 } else if (vap->va_acl == NULL) {
6877 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6878 KAUTH_DEBUG("CHMOD - removing ACL");
6879
6880 /* updating an existing ACL */
6881 } else {
6882 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
6883 /* entry count changed, must be different */
6884 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6885 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
6886 } else if (vap->va_acl->acl_entrycount > 0) {
6887 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
6888 if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
6889 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
6890 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6891 KAUTH_DEBUG("CHMOD - changing ACL entries");
6892 }
6893 }
6894 }
6895 }
6896
6897 /*
6898 * Other attributes that require authorisation.
6899 */
6900 if (VATTR_IS_ACTIVE(vap, va_encoding))
6901 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6902
6903 out:
6904 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
6905 kauth_acl_free(ova.va_acl);
6906 if (error == 0)
6907 *actionp = required_action;
6908 return(error);
6909 }
6910
6911
6912 void
6913 vfs_setlocklocal(mount_t mp)
6914 {
6915 vnode_t vp;
6916
6917 mount_lock(mp);
6918 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
6919
6920 /*
6921 * We do not expect anyone to be using any vnodes at the
6922 * time this routine is called. So no need for vnode locking
6923 */
6924 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
6925 vp->v_flag |= VLOCKLOCAL;
6926 }
6927 TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) {
6928 vp->v_flag |= VLOCKLOCAL;
6929 }
6930 TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) {
6931 vp->v_flag |= VLOCKLOCAL;
6932 }
6933 mount_unlock(mp);
6934 }
6935
6936 void
6937 vfs_setunmountpreflight(mount_t mp)
6938 {
6939 mount_lock_spin(mp);
6940 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
6941 mount_unlock(mp);
6942 }
6943
6944 void
6945 vn_setunionwait(vnode_t vp)
6946 {
6947 vnode_lock_spin(vp);
6948 vp->v_flag |= VISUNION;
6949 vnode_unlock(vp);
6950 }
6951
6952
6953 void
6954 vn_checkunionwait(vnode_t vp)
6955 {
6956 vnode_lock_spin(vp);
6957 while ((vp->v_flag & VISUNION) == VISUNION)
6958 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
6959 vnode_unlock(vp);
6960 }
6961
6962 void
6963 vn_clearunionwait(vnode_t vp, int locked)
6964 {
6965 if (!locked)
6966 vnode_lock_spin(vp);
6967 if((vp->v_flag & VISUNION) == VISUNION) {
6968 vp->v_flag &= ~VISUNION;
6969 wakeup((caddr_t)&vp->v_flag);
6970 }
6971 if (!locked)
6972 vnode_unlock(vp);
6973 }
6974
6975 /*
6976 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
6977 */
6978 extern int thread_notrigger(void);
6979
6980 int
6981 thread_notrigger(void)
6982 {
6983 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
6984 return (uth->uu_notrigger);
6985 }
6986
6987 /*
6988 * Removes orphaned apple double files during a rmdir
6989 * Works by:
6990 * 1. vnode_suspend().
6991 * 2. Call VNOP_READDIR() till the end of directory is reached.
6992 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
6993 * 4. Continue (2) and (3) till end of directory is reached.
6994 * 5. If all the entries in the directory were files with "._" name, delete all the files.
6995 * 6. vnode_resume()
6996 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
6997 */
6998
6999 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
7000 {
7001
7002 #define UIO_BUFF_SIZE 2048
7003 uio_t auio = NULL;
7004 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
7005 int open_flag = 0, full_erase_flag = 0;
7006 char uio_buf[ UIO_SIZEOF(1) ];
7007 char *rbuf = NULL, *cpos, *cend;
7008 struct nameidata nd_temp;
7009 struct dirent *dp;
7010 errno_t error;
7011
7012 error = vnode_suspend(vp);
7013
7014 /*
7015 * restart_flag is set so that the calling rmdir sleeps and resets
7016 */
7017 if (error == EBUSY)
7018 *restart_flag = 1;
7019 if (error != 0)
7020 goto outsc;
7021
7022 /*
7023 * set up UIO
7024 */
7025 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
7026 if (rbuf)
7027 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
7028 &uio_buf[0], sizeof(uio_buf));
7029 if (!rbuf || !auio) {
7030 error = ENOMEM;
7031 goto outsc;
7032 }
7033
7034 uio_setoffset(auio,0);
7035
7036 eofflag = 0;
7037
7038 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
7039 goto outsc;
7040 else
7041 open_flag = 1;
7042
7043 /*
7044 * First pass checks if all files are appleDouble files.
7045 */
7046
7047 do {
7048 siz = UIO_BUFF_SIZE;
7049 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7050 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7051
7052 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
7053 goto outsc;
7054
7055 if (uio_resid(auio) != 0)
7056 siz -= uio_resid(auio);
7057
7058 /*
7059 * Iterate through directory
7060 */
7061 cpos = rbuf;
7062 cend = rbuf + siz;
7063 dp = (struct dirent*) cpos;
7064
7065 if (cpos == cend)
7066 eofflag = 1;
7067
7068 while ((cpos < cend)) {
7069 /*
7070 * Check for . and .. as well as directories
7071 */
7072 if (dp->d_ino != 0 &&
7073 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7074 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
7075 /*
7076 * Check for irregular files and ._ files
7077 * If there is a ._._ file abort the op
7078 */
7079 if ( dp->d_namlen < 2 ||
7080 strncmp(dp->d_name,"._",2) ||
7081 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
7082 error = ENOTEMPTY;
7083 goto outsc;
7084 }
7085 }
7086 cpos += dp->d_reclen;
7087 dp = (struct dirent*)cpos;
7088 }
7089
7090 /*
7091 * workaround for HFS/NFS setting eofflag before end of file
7092 */
7093 if (vp->v_tag == VT_HFS && nentries > 2)
7094 eofflag=0;
7095
7096 if (vp->v_tag == VT_NFS) {
7097 if (eofflag && !full_erase_flag) {
7098 full_erase_flag = 1;
7099 eofflag = 0;
7100 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7101 }
7102 else if (!eofflag && full_erase_flag)
7103 full_erase_flag = 0;
7104 }
7105
7106 } while (!eofflag);
7107 /*
7108 * If we've made it here all the files in the dir are ._ files.
7109 * We can delete the files even though the node is suspended
7110 * because we are the owner of the file.
7111 */
7112
7113 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7114 eofflag = 0;
7115 full_erase_flag = 0;
7116
7117 do {
7118 siz = UIO_BUFF_SIZE;
7119 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7120 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7121
7122 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
7123
7124 if (error != 0)
7125 goto outsc;
7126
7127 if (uio_resid(auio) != 0)
7128 siz -= uio_resid(auio);
7129
7130 /*
7131 * Iterate through directory
7132 */
7133 cpos = rbuf;
7134 cend = rbuf + siz;
7135 dp = (struct dirent*) cpos;
7136
7137 if (cpos == cend)
7138 eofflag = 1;
7139
7140 while ((cpos < cend)) {
7141 /*
7142 * Check for . and .. as well as directories
7143 */
7144 if (dp->d_ino != 0 &&
7145 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7146 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
7147 ) {
7148
7149 NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
7150 nd_temp.ni_dvp = vp;
7151 error = unlink1(ctx, &nd_temp, 0);
7152 if (error && error != ENOENT) {
7153 goto outsc;
7154 }
7155 }
7156 cpos += dp->d_reclen;
7157 dp = (struct dirent*)cpos;
7158 }
7159
7160 /*
7161 * workaround for HFS/NFS setting eofflag before end of file
7162 */
7163 if (vp->v_tag == VT_HFS && nentries > 2)
7164 eofflag=0;
7165
7166 if (vp->v_tag == VT_NFS) {
7167 if (eofflag && !full_erase_flag) {
7168 full_erase_flag = 1;
7169 eofflag = 0;
7170 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7171 }
7172 else if (!eofflag && full_erase_flag)
7173 full_erase_flag = 0;
7174 }
7175
7176 } while (!eofflag);
7177
7178
7179 error = 0;
7180
7181 outsc:
7182 if (open_flag)
7183 VNOP_CLOSE(vp, FREAD, ctx);
7184
7185 uio_free(auio);
7186 FREE(rbuf, M_TEMP);
7187
7188 vnode_resume(vp);
7189
7190
7191 return(error);
7192
7193 }
7194
7195
7196 void
7197 lock_vnode_and_post(vnode_t vp, int kevent_num)
7198 {
7199 /* Only take the lock if there's something there! */
7200 if (vp->v_knotes.slh_first != NULL) {
7201 vnode_lock(vp);
7202 KNOTE(&vp->v_knotes, kevent_num);
7203 vnode_unlock(vp);
7204 }
7205 }
7206
7207 #ifdef JOE_DEBUG
7208 static void record_vp(vnode_t vp, int count) {
7209 struct uthread *ut;
7210 int i;
7211
7212 if ((vp->v_flag & VSYSTEM))
7213 return;
7214
7215 ut = get_bsdthread_info(current_thread());
7216 ut->uu_iocount += count;
7217
7218 if (ut->uu_vpindex < 32) {
7219 for (i = 0; i < ut->uu_vpindex; i++) {
7220 if (ut->uu_vps[i] == vp)
7221 return;
7222 }
7223 ut->uu_vps[ut->uu_vpindex] = vp;
7224 ut->uu_vpindex++;
7225 }
7226 }
7227 #endif