]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_subr.c
3b10114cb52f4fc41442e4645fcd3536c5eb63af
[apple/xnu.git] / bsd / vfs / vfs_subr.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/kern_memorystatus.h>
109 #include <miscfs/fifofs/fifo.h>
110
111 #include <string.h>
112 #include <machine/spl.h>
113
114
115 #include <kern/assert.h>
116
117 #include <miscfs/specfs/specdev.h>
118
119 #include <mach/mach_types.h>
120 #include <mach/memory_object_types.h>
121
122 #include <kern/kalloc.h> /* kalloc()/kfree() */
123 #include <kern/clock.h> /* delay_for_interval() */
124 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
125
126
127 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #endif
132
133 extern lck_grp_t *vnode_lck_grp;
134 extern lck_attr_t *vnode_lck_attr;
135
136
137 extern lck_mtx_t * mnt_list_mtx_lock;
138
139 enum vtype iftovt_tab[16] = {
140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[9] = {
144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 S_IFSOCK, S_IFIFO, S_IFMT,
146 };
147
148 /* XXX next protptype should be from <nfs/nfs.h> */
149 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
150
151 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
152 __private_extern__ void qsort(
153 void * array,
154 size_t nmembers,
155 size_t member_size,
156 int (*)(const void *, const void *));
157
158 extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval);
159 __private_extern__ void vntblinit(void);
160 __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
161 unsigned int val2);
162 __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
163
164 extern int system_inshutdown;
165
166 static void vnode_list_add(vnode_t);
167 static void vnode_list_remove(vnode_t);
168 static void vnode_list_remove_locked(vnode_t);
169
170 static errno_t vnode_drain(vnode_t);
171 static void vgone(vnode_t, int flags);
172 static void vclean(vnode_t vp, int flag);
173 static void vnode_reclaim_internal(vnode_t, int, int, int);
174
175 static void vnode_dropiocount (vnode_t);
176 static errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags);
177
178 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
179 static int vnode_reload(vnode_t);
180 static int vnode_isinuse_locked(vnode_t, int, int);
181
182 static void insmntque(vnode_t vp, mount_t mp);
183 static int mount_getvfscnt(void);
184 static int mount_fillfsids(fsid_t *, int );
185 static void vnode_iterate_setup(mount_t);
186 int vnode_umount_preflight(mount_t, vnode_t, int);
187 static int vnode_iterate_prepare(mount_t);
188 static int vnode_iterate_reloadq(mount_t);
189 static void vnode_iterate_clear(mount_t);
190 static mount_t vfs_getvfs_locked(fsid_t *);
191
192 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
193
194 #ifdef JOE_DEBUG
195 static void record_vp(vnode_t vp, int count);
196 #endif
197
198 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
199 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
200
201 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
202 struct timeval rage_tv;
203 int rage_limit = 0;
204 int ragevnodes = 0;
205
206 #define RAGE_LIMIT_MIN 100
207 #define RAGE_TIME_LIMIT 5
208
209 struct mntlist mountlist; /* mounted filesystem list */
210 static int nummounts = 0;
211
212 #if DIAGNOSTIC
213 #define VLISTCHECK(fun, vp, list) \
214 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
215 panic("%s: %s vnode not on %slist", (fun), (list), (list));
216 #else
217 #define VLISTCHECK(fun, vp, list)
218 #endif /* DIAGNOSTIC */
219
220 #define VLISTNONE(vp) \
221 do { \
222 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
223 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
224 } while(0)
225
226 #define VONLIST(vp) \
227 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
228
229 /* remove a vnode from free vnode list */
230 #define VREMFREE(fun, vp) \
231 do { \
232 VLISTCHECK((fun), (vp), "free"); \
233 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
234 VLISTNONE((vp)); \
235 freevnodes--; \
236 } while(0)
237
238
239
240 /* remove a vnode from dead vnode list */
241 #define VREMDEAD(fun, vp) \
242 do { \
243 VLISTCHECK((fun), (vp), "dead"); \
244 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
245 VLISTNONE((vp)); \
246 vp->v_listflag &= ~VLIST_DEAD; \
247 deadvnodes--; \
248 } while(0)
249
250
251 /* remove a vnode from rage vnode list */
252 #define VREMRAGE(fun, vp) \
253 do { \
254 if ( !(vp->v_listflag & VLIST_RAGE)) \
255 panic("VREMRAGE: vp not on rage list"); \
256 VLISTCHECK((fun), (vp), "rage"); \
257 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
258 VLISTNONE((vp)); \
259 vp->v_listflag &= ~VLIST_RAGE; \
260 ragevnodes--; \
261 } while(0)
262
263
264 /*
265 * vnodetarget hasn't been used in a long time, but
266 * it was exported for some reason... I'm leaving in
267 * place for now... it should be deprecated out of the
268 * exports and removed eventually.
269 */
270 u_int32_t vnodetarget; /* target for vnreclaim() */
271 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
272
273 /*
274 * We need quite a few vnodes on the free list to sustain the
275 * rapid stat() the compilation process does, and still benefit from the name
276 * cache. Having too few vnodes on the free list causes serious disk
277 * thrashing as we cycle through them.
278 */
279 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
280
281 /*
282 * Initialize the vnode management data structures.
283 */
284 __private_extern__ void
285 vntblinit(void)
286 {
287 TAILQ_INIT(&vnode_free_list);
288 TAILQ_INIT(&vnode_rage_list);
289 TAILQ_INIT(&vnode_dead_list);
290 TAILQ_INIT(&mountlist);
291
292 if (!vnodetarget)
293 vnodetarget = VNODE_FREE_TARGET;
294
295 microuptime(&rage_tv);
296 rage_limit = desiredvnodes / 100;
297
298 if (rage_limit < RAGE_LIMIT_MIN)
299 rage_limit = RAGE_LIMIT_MIN;
300
301 /*
302 * Scale the vm_object_cache to accomodate the vnodes
303 * we want to cache
304 */
305 (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
306 }
307
308 /* Reset the VM Object Cache with the values passed in */
309 __private_extern__ kern_return_t
310 reset_vmobjectcache(unsigned int val1, unsigned int val2)
311 {
312 vm_size_t oval = val1 - VNODE_FREE_MIN;
313 vm_size_t nval;
314
315 if (val1 == val2) {
316 return KERN_SUCCESS;
317 }
318
319 if(val2 < VNODE_FREE_MIN)
320 nval = 0;
321 else
322 nval = val2 - VNODE_FREE_MIN;
323
324 return(adjust_vm_object_cache(oval, nval));
325 }
326
327
328 /* the timeout is in 10 msecs */
329 int
330 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
331 int error = 0;
332 struct timespec ts;
333
334 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
335
336 if (vp->v_numoutput > output_target) {
337
338 slpflag |= PDROP;
339
340 vnode_lock_spin(vp);
341
342 while ((vp->v_numoutput > output_target) && error == 0) {
343 if (output_target)
344 vp->v_flag |= VTHROTTLED;
345 else
346 vp->v_flag |= VBWAIT;
347
348 ts.tv_sec = (slptimeout/100);
349 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ;
350 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
351
352 vnode_lock_spin(vp);
353 }
354 vnode_unlock(vp);
355 }
356 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
357
358 return error;
359 }
360
361
362 void
363 vnode_startwrite(vnode_t vp) {
364
365 OSAddAtomic(1, &vp->v_numoutput);
366 }
367
368
369 void
370 vnode_writedone(vnode_t vp)
371 {
372 if (vp) {
373 OSAddAtomic(-1, &vp->v_numoutput);
374
375 if (vp->v_numoutput <= 1) {
376 int need_wakeup = 0;
377
378 vnode_lock_spin(vp);
379
380 if (vp->v_numoutput < 0)
381 panic("vnode_writedone: numoutput < 0");
382
383 if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) {
384 vp->v_flag &= ~VTHROTTLED;
385 need_wakeup = 1;
386 }
387 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
388 vp->v_flag &= ~VBWAIT;
389 need_wakeup = 1;
390 }
391 vnode_unlock(vp);
392
393 if (need_wakeup)
394 wakeup((caddr_t)&vp->v_numoutput);
395 }
396 }
397 }
398
399
400
401 int
402 vnode_hasdirtyblks(vnode_t vp)
403 {
404 struct cl_writebehind *wbp;
405
406 /*
407 * Not taking the buf_mtxp as there is little
408 * point doing it. Even if the lock is taken the
409 * state can change right after that. If their
410 * needs to be a synchronization, it must be driven
411 * by the caller
412 */
413 if (vp->v_dirtyblkhd.lh_first)
414 return (1);
415
416 if (!UBCINFOEXISTS(vp))
417 return (0);
418
419 wbp = vp->v_ubcinfo->cl_wbehind;
420
421 if (wbp && (wbp->cl_number || wbp->cl_scmap))
422 return (1);
423
424 return (0);
425 }
426
427 int
428 vnode_hascleanblks(vnode_t vp)
429 {
430 /*
431 * Not taking the buf_mtxp as there is little
432 * point doing it. Even if the lock is taken the
433 * state can change right after that. If their
434 * needs to be a synchronization, it must be driven
435 * by the caller
436 */
437 if (vp->v_cleanblkhd.lh_first)
438 return (1);
439 return (0);
440 }
441
442 void
443 vnode_iterate_setup(mount_t mp)
444 {
445 while (mp->mnt_lflag & MNT_LITER) {
446 mp->mnt_lflag |= MNT_LITERWAIT;
447 msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL);
448 }
449
450 mp->mnt_lflag |= MNT_LITER;
451
452 }
453
454 int
455 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
456 {
457 vnode_t vp;
458
459 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
460 /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
461 if (vp->v_tag == VT_UDF)
462 return 0;
463 if (vp->v_type == VDIR)
464 continue;
465 if (vp == skipvp)
466 continue;
467 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
468 (vp->v_flag & VNOFLUSH)))
469 continue;
470 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
471 continue;
472 if ((flags & WRITECLOSE) &&
473 (vp->v_writecount == 0 || vp->v_type != VREG))
474 continue;
475 /* Look for busy vnode */
476 if (((vp->v_usecount != 0) &&
477 ((vp->v_usecount - vp->v_kusecount) != 0)))
478 return(1);
479 }
480
481 return(0);
482 }
483
484 /*
485 * This routine prepares iteration by moving all the vnodes to worker queue
486 * called with mount lock held
487 */
488 int
489 vnode_iterate_prepare(mount_t mp)
490 {
491 vnode_t vp;
492
493 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
494 /* nothing to do */
495 return (0);
496 }
497
498 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
499 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
500 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
501 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
502
503 TAILQ_INIT(&mp->mnt_vnodelist);
504 if (mp->mnt_newvnodes.tqh_first != NULL)
505 panic("vnode_iterate_prepare: newvnode when entering vnode");
506 TAILQ_INIT(&mp->mnt_newvnodes);
507
508 return (1);
509 }
510
511
512 /* called with mount lock held */
513 int
514 vnode_iterate_reloadq(mount_t mp)
515 {
516 int moved = 0;
517
518 /* add the remaining entries in workerq to the end of mount vnode list */
519 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
520 struct vnode * mvp;
521 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
522
523 /* Joining the workerque entities to mount vnode list */
524 if (mvp)
525 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
526 else
527 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
528 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
529 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
530 TAILQ_INIT(&mp->mnt_workerqueue);
531 }
532
533 /* add the newvnodes to the head of mount vnode list */
534 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
535 struct vnode * nlvp;
536 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
537
538 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
539 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
540 if(mp->mnt_vnodelist.tqh_first)
541 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
542 else
543 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
544 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
545 TAILQ_INIT(&mp->mnt_newvnodes);
546 moved = 1;
547 }
548
549 return(moved);
550 }
551
552
553 void
554 vnode_iterate_clear(mount_t mp)
555 {
556 mp->mnt_lflag &= ~MNT_LITER;
557 if (mp->mnt_lflag & MNT_LITERWAIT) {
558 mp->mnt_lflag &= ~MNT_LITERWAIT;
559 wakeup(mp);
560 }
561 }
562
563
564 int
565 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
566 void *arg)
567 {
568 struct vnode *vp;
569 int vid, retval;
570 int ret = 0;
571
572 mount_lock(mp);
573
574 vnode_iterate_setup(mp);
575
576 /* it is returns 0 then there is nothing to do */
577 retval = vnode_iterate_prepare(mp);
578
579 if (retval == 0) {
580 vnode_iterate_clear(mp);
581 mount_unlock(mp);
582 return(ret);
583 }
584
585 /* iterate over all the vnodes */
586 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
587 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
588 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
589 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
590 vid = vp->v_id;
591 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
592 continue;
593 }
594 mount_unlock(mp);
595
596 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
597 mount_lock(mp);
598 continue;
599 }
600 if (flags & VNODE_RELOAD) {
601 /*
602 * we're reloading the filesystem
603 * cast out any inactive vnodes...
604 */
605 if (vnode_reload(vp)) {
606 /* vnode will be recycled on the refcount drop */
607 vnode_put(vp);
608 mount_lock(mp);
609 continue;
610 }
611 }
612
613 retval = callout(vp, arg);
614
615 switch (retval) {
616 case VNODE_RETURNED:
617 case VNODE_RETURNED_DONE:
618 vnode_put(vp);
619 if (retval == VNODE_RETURNED_DONE) {
620 mount_lock(mp);
621 ret = 0;
622 goto out;
623 }
624 break;
625
626 case VNODE_CLAIMED_DONE:
627 mount_lock(mp);
628 ret = 0;
629 goto out;
630 case VNODE_CLAIMED:
631 default:
632 break;
633 }
634 mount_lock(mp);
635 }
636
637 out:
638 (void)vnode_iterate_reloadq(mp);
639 vnode_iterate_clear(mp);
640 mount_unlock(mp);
641 return (ret);
642 }
643
644 void
645 mount_lock_renames(mount_t mp)
646 {
647 lck_mtx_lock(&mp->mnt_renamelock);
648 }
649
650 void
651 mount_unlock_renames(mount_t mp)
652 {
653 lck_mtx_unlock(&mp->mnt_renamelock);
654 }
655
656 void
657 mount_lock(mount_t mp)
658 {
659 lck_mtx_lock(&mp->mnt_mlock);
660 }
661
662 void
663 mount_lock_spin(mount_t mp)
664 {
665 lck_mtx_lock_spin(&mp->mnt_mlock);
666 }
667
668 void
669 mount_unlock(mount_t mp)
670 {
671 lck_mtx_unlock(&mp->mnt_mlock);
672 }
673
674
675 void
676 mount_ref(mount_t mp, int locked)
677 {
678 if ( !locked)
679 mount_lock_spin(mp);
680
681 mp->mnt_count++;
682
683 if ( !locked)
684 mount_unlock(mp);
685 }
686
687
688 void
689 mount_drop(mount_t mp, int locked)
690 {
691 if ( !locked)
692 mount_lock_spin(mp);
693
694 mp->mnt_count--;
695
696 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN))
697 wakeup(&mp->mnt_lflag);
698
699 if ( !locked)
700 mount_unlock(mp);
701 }
702
703
704 int
705 mount_iterref(mount_t mp, int locked)
706 {
707 int retval = 0;
708
709 if (!locked)
710 mount_list_lock();
711 if (mp->mnt_iterref < 0) {
712 retval = 1;
713 } else {
714 mp->mnt_iterref++;
715 }
716 if (!locked)
717 mount_list_unlock();
718 return(retval);
719 }
720
721 int
722 mount_isdrained(mount_t mp, int locked)
723 {
724 int retval;
725
726 if (!locked)
727 mount_list_lock();
728 if (mp->mnt_iterref < 0)
729 retval = 1;
730 else
731 retval = 0;
732 if (!locked)
733 mount_list_unlock();
734 return(retval);
735 }
736
737 void
738 mount_iterdrop(mount_t mp)
739 {
740 mount_list_lock();
741 mp->mnt_iterref--;
742 wakeup(&mp->mnt_iterref);
743 mount_list_unlock();
744 }
745
746 void
747 mount_iterdrain(mount_t mp)
748 {
749 mount_list_lock();
750 while (mp->mnt_iterref)
751 msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
752 /* mount iterations drained */
753 mp->mnt_iterref = -1;
754 mount_list_unlock();
755 }
756 void
757 mount_iterreset(mount_t mp)
758 {
759 mount_list_lock();
760 if (mp->mnt_iterref == -1)
761 mp->mnt_iterref = 0;
762 mount_list_unlock();
763 }
764
765 /* always called with mount lock held */
766 int
767 mount_refdrain(mount_t mp)
768 {
769 if (mp->mnt_lflag & MNT_LDRAIN)
770 panic("already in drain");
771 mp->mnt_lflag |= MNT_LDRAIN;
772
773 while (mp->mnt_count)
774 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
775
776 if (mp->mnt_vnodelist.tqh_first != NULL)
777 panic("mount_refdrain: dangling vnode");
778
779 mp->mnt_lflag &= ~MNT_LDRAIN;
780
781 return(0);
782 }
783
784
785 /*
786 * Mark a mount point as busy. Used to synchronize access and to delay
787 * unmounting.
788 */
789 int
790 vfs_busy(mount_t mp, int flags)
791 {
792
793 restart:
794 if (mp->mnt_lflag & MNT_LDEAD)
795 return(ENOENT);
796
797 if (mp->mnt_lflag & MNT_LUNMOUNT) {
798 if (flags & LK_NOWAIT)
799 return (ENOENT);
800
801 mount_lock(mp);
802
803 if (mp->mnt_lflag & MNT_LDEAD) {
804 mount_unlock(mp);
805 return(ENOENT);
806 }
807 if (mp->mnt_lflag & MNT_LUNMOUNT) {
808 mp->mnt_lflag |= MNT_LWAIT;
809 /*
810 * Since all busy locks are shared except the exclusive
811 * lock granted when unmounting, the only place that a
812 * wakeup needs to be done is at the release of the
813 * exclusive lock at the end of dounmount.
814 */
815 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
816 return (ENOENT);
817 }
818 mount_unlock(mp);
819 }
820
821 lck_rw_lock_shared(&mp->mnt_rwlock);
822
823 /*
824 * until we are granted the rwlock, it's possible for the mount point to
825 * change state, so reevaluate before granting the vfs_busy
826 */
827 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
828 lck_rw_done(&mp->mnt_rwlock);
829 goto restart;
830 }
831 return (0);
832 }
833
834 /*
835 * Free a busy filesystem.
836 */
837
838 void
839 vfs_unbusy(mount_t mp)
840 {
841 lck_rw_done(&mp->mnt_rwlock);
842 }
843
844
845
846 static void
847 vfs_rootmountfailed(mount_t mp) {
848
849 mount_list_lock();
850 mp->mnt_vtable->vfc_refcount--;
851 mount_list_unlock();
852
853 vfs_unbusy(mp);
854
855 mount_lock_destroy(mp);
856
857 #if CONFIG_MACF
858 mac_mount_label_destroy(mp);
859 #endif
860
861 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
862 }
863
864 /*
865 * Lookup a filesystem type, and if found allocate and initialize
866 * a mount structure for it.
867 *
868 * Devname is usually updated by mount(8) after booting.
869 */
870 static mount_t
871 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
872 {
873 mount_t mp;
874
875 mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK);
876 bzero((char *)mp, sizeof(struct mount));
877
878 /* Initialize the default IO constraints */
879 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
880 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
881 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
882 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
883 mp->mnt_devblocksize = DEV_BSIZE;
884 mp->mnt_alignmentmask = PAGE_MASK;
885 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
886 mp->mnt_ioscale = 1;
887 mp->mnt_ioflags = 0;
888 mp->mnt_realrootvp = NULLVP;
889 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
890
891 mount_lock_init(mp);
892 (void)vfs_busy(mp, LK_NOWAIT);
893
894 TAILQ_INIT(&mp->mnt_vnodelist);
895 TAILQ_INIT(&mp->mnt_workerqueue);
896 TAILQ_INIT(&mp->mnt_newvnodes);
897
898 mp->mnt_vtable = vfsp;
899 mp->mnt_op = vfsp->vfc_vfsops;
900 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
901 mp->mnt_vnodecovered = NULLVP;
902 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
903 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
904
905 mount_list_lock();
906 vfsp->vfc_refcount++;
907 mount_list_unlock();
908
909 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
910 mp->mnt_vfsstat.f_mntonname[0] = '/';
911 /* XXX const poisoning layering violation */
912 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
913
914 #if CONFIG_MACF
915 mac_mount_label_init(mp);
916 mac_mount_label_associate(vfs_context_kernel(), mp);
917 #endif
918 return (mp);
919 }
920
921 errno_t
922 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
923 {
924 struct vfstable *vfsp;
925
926 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
927 if (!strncmp(vfsp->vfc_name, fstypename,
928 sizeof(vfsp->vfc_name)))
929 break;
930 if (vfsp == NULL)
931 return (ENODEV);
932
933 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
934
935 if (*mpp)
936 return (0);
937
938 return (ENOMEM);
939 }
940
941
942 /*
943 * Find an appropriate filesystem to use for the root. If a filesystem
944 * has not been preselected, walk through the list of known filesystems
945 * trying those that have mountroot routines, and try them until one
946 * works or we have tried them all.
947 */
948 extern int (*mountroot)(void);
949
950 int
951 vfs_mountroot(void)
952 {
953 #if CONFIG_MACF
954 struct vnode *vp;
955 #endif
956 struct vfstable *vfsp;
957 vfs_context_t ctx = vfs_context_kernel();
958 struct vfs_attr vfsattr;
959 int error;
960 mount_t mp;
961 vnode_t bdevvp_rootvp;
962
963 if (mountroot != NULL) {
964 /*
965 * used for netboot which follows a different set of rules
966 */
967 error = (*mountroot)();
968 return (error);
969 }
970 if ((error = bdevvp(rootdev, &rootvp))) {
971 printf("vfs_mountroot: can't setup bdevvp\n");
972 return (error);
973 }
974 /*
975 * 4951998 - code we call in vfc_mountroot may replace rootvp
976 * so keep a local copy for some house keeping.
977 */
978 bdevvp_rootvp = rootvp;
979
980 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
981 if (vfsp->vfc_mountroot == NULL)
982 continue;
983
984 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
985 mp->mnt_devvp = rootvp;
986
987 if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) {
988 if ( bdevvp_rootvp != rootvp ) {
989 /*
990 * rootvp changed...
991 * bump the iocount and fix up mnt_devvp for the
992 * new rootvp (it will already have a usecount taken)...
993 * drop the iocount and the usecount on the orignal
994 * since we are no longer going to use it...
995 */
996 vnode_getwithref(rootvp);
997 mp->mnt_devvp = rootvp;
998
999 vnode_rele(bdevvp_rootvp);
1000 vnode_put(bdevvp_rootvp);
1001 }
1002 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1003
1004 vfs_unbusy(mp);
1005
1006 mount_list_add(mp);
1007
1008 /*
1009 * cache the IO attributes for the underlying physical media...
1010 * an error return indicates the underlying driver doesn't
1011 * support all the queries necessary... however, reasonable
1012 * defaults will have been set, so no reason to bail or care
1013 */
1014 vfs_init_io_attributes(rootvp, mp);
1015
1016 /*
1017 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1018 */
1019 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1020 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1021 }
1022 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1023 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1024 }
1025
1026 /*
1027 * Probe root file system for additional features.
1028 */
1029 (void)VFS_START(mp, 0, ctx);
1030
1031 VFSATTR_INIT(&vfsattr);
1032 VFSATTR_WANTED(&vfsattr, f_capabilities);
1033 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1034 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1035 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1036 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1037 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1038 }
1039 #if NAMEDSTREAMS
1040 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1041 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1042 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1043 }
1044 #endif
1045 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1046 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1047 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1048 }
1049 }
1050
1051 /*
1052 * get rid of iocount reference returned
1053 * by bdevvp (or picked up by us on the substitued
1054 * rootvp)... it (or we) will have also taken
1055 * a usecount reference which we want to keep
1056 */
1057 vnode_put(rootvp);
1058
1059 #if CONFIG_MACF
1060 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0)
1061 return (0);
1062
1063 error = VFS_ROOT(mp, &vp, ctx);
1064 if (error) {
1065 printf("%s() VFS_ROOT() returned %d\n",
1066 __func__, error);
1067 dounmount(mp, MNT_FORCE, 0, ctx);
1068 goto fail;
1069 }
1070 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1071 /*
1072 * get rid of reference provided by VFS_ROOT
1073 */
1074 vnode_put(vp);
1075
1076 if (error) {
1077 printf("%s() vnode_label() returned %d\n",
1078 __func__, error);
1079 dounmount(mp, MNT_FORCE, 0, ctx);
1080 goto fail;
1081 }
1082 #endif
1083 return (0);
1084 }
1085 #if CONFIG_MACF
1086 fail:
1087 #endif
1088 vfs_rootmountfailed(mp);
1089
1090 if (error != EINVAL)
1091 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1092 }
1093 return (ENODEV);
1094 }
1095
1096 /*
1097 * Lookup a mount point by filesystem identifier.
1098 */
1099
1100 struct mount *
1101 vfs_getvfs(fsid_t *fsid)
1102 {
1103 return (mount_list_lookupby_fsid(fsid, 0, 0));
1104 }
1105
1106 static struct mount *
1107 vfs_getvfs_locked(fsid_t *fsid)
1108 {
1109 return(mount_list_lookupby_fsid(fsid, 1, 0));
1110 }
1111
1112 struct mount *
1113 vfs_getvfs_by_mntonname(char *path)
1114 {
1115 mount_t retmp = (mount_t)0;
1116 mount_t mp;
1117
1118 mount_list_lock();
1119 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1120 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1121 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1122 retmp = mp;
1123 goto out;
1124 }
1125 }
1126 out:
1127 mount_list_unlock();
1128 return (retmp);
1129 }
1130
1131 /* generation number for creation of new fsids */
1132 u_short mntid_gen = 0;
1133 /*
1134 * Get a new unique fsid
1135 */
1136 void
1137 vfs_getnewfsid(struct mount *mp)
1138 {
1139
1140 fsid_t tfsid;
1141 int mtype;
1142 mount_t nmp;
1143
1144 mount_list_lock();
1145
1146 /* generate a new fsid */
1147 mtype = mp->mnt_vtable->vfc_typenum;
1148 if (++mntid_gen == 0)
1149 mntid_gen++;
1150 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1151 tfsid.val[1] = mtype;
1152
1153 TAILQ_FOREACH(nmp, &mountlist, mnt_list) {
1154 while (vfs_getvfs_locked(&tfsid)) {
1155 if (++mntid_gen == 0)
1156 mntid_gen++;
1157 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1158 }
1159 }
1160 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1161 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1162 mount_list_unlock();
1163 }
1164
1165 /*
1166 * Routines having to do with the management of the vnode table.
1167 */
1168 extern int (**dead_vnodeop_p)(void *);
1169 long numvnodes, freevnodes, deadvnodes;
1170
1171
1172 /*
1173 * Move a vnode from one mount queue to another.
1174 */
1175 static void
1176 insmntque(vnode_t vp, mount_t mp)
1177 {
1178 mount_t lmp;
1179 /*
1180 * Delete from old mount point vnode list, if on one.
1181 */
1182 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1183 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1184 panic("insmntque: vp not in mount vnode list");
1185 vp->v_lflag &= ~VNAMED_MOUNT;
1186
1187 mount_lock_spin(lmp);
1188
1189 mount_drop(lmp, 1);
1190
1191 if (vp->v_mntvnodes.tqe_next == NULL) {
1192 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1193 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1194 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1195 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1196 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1197 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1198 } else {
1199 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1200 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1201 }
1202 vp->v_mntvnodes.tqe_next = NULL;
1203 vp->v_mntvnodes.tqe_prev = NULL;
1204 mount_unlock(lmp);
1205 return;
1206 }
1207
1208 /*
1209 * Insert into list of vnodes for the new mount point, if available.
1210 */
1211 if ((vp->v_mount = mp) != NULL) {
1212 mount_lock_spin(mp);
1213 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1214 panic("vp already in mount list");
1215 if (mp->mnt_lflag & MNT_LITER)
1216 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1217 else
1218 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1219 if (vp->v_lflag & VNAMED_MOUNT)
1220 panic("insmntque: vp already in mount vnode list");
1221 vp->v_lflag |= VNAMED_MOUNT;
1222 mount_ref(mp, 1);
1223 mount_unlock(mp);
1224 }
1225 }
1226
1227
1228 /*
1229 * Create a vnode for a block device.
1230 * Used for root filesystem, argdev, and swap areas.
1231 * Also used for memory file system special devices.
1232 */
1233 int
1234 bdevvp(dev_t dev, vnode_t *vpp)
1235 {
1236 vnode_t nvp;
1237 int error;
1238 struct vnode_fsparam vfsp;
1239 struct vfs_context context;
1240
1241 if (dev == NODEV) {
1242 *vpp = NULLVP;
1243 return (ENODEV);
1244 }
1245
1246 context.vc_thread = current_thread();
1247 context.vc_ucred = FSCRED;
1248
1249 vfsp.vnfs_mp = (struct mount *)0;
1250 vfsp.vnfs_vtype = VBLK;
1251 vfsp.vnfs_str = "bdevvp";
1252 vfsp.vnfs_dvp = NULL;
1253 vfsp.vnfs_fsnode = NULL;
1254 vfsp.vnfs_cnp = NULL;
1255 vfsp.vnfs_vops = spec_vnodeop_p;
1256 vfsp.vnfs_rdev = dev;
1257 vfsp.vnfs_filesize = 0;
1258
1259 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
1260
1261 vfsp.vnfs_marksystem = 0;
1262 vfsp.vnfs_markroot = 0;
1263
1264 if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) {
1265 *vpp = NULLVP;
1266 return (error);
1267 }
1268 vnode_lock_spin(nvp);
1269 nvp->v_flag |= VBDEVVP;
1270 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
1271 vnode_unlock(nvp);
1272 if ( (error = vnode_ref(nvp)) ) {
1273 panic("bdevvp failed: vnode_ref");
1274 return (error);
1275 }
1276 if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) {
1277 panic("bdevvp failed: fsync");
1278 return (error);
1279 }
1280 if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) {
1281 panic("bdevvp failed: invalidateblks");
1282 return (error);
1283 }
1284
1285 #if CONFIG_MACF
1286 /*
1287 * XXXMAC: We can't put a MAC check here, the system will
1288 * panic without this vnode.
1289 */
1290 #endif /* MAC */
1291
1292 if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) {
1293 panic("bdevvp failed: open");
1294 return (error);
1295 }
1296 *vpp = nvp;
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Check to see if the new vnode represents a special device
1303 * for which we already have a vnode (either because of
1304 * bdevvp() or because of a different vnode representing
1305 * the same block device). If such an alias exists, deallocate
1306 * the existing contents and return the aliased vnode. The
1307 * caller is responsible for filling it with its new contents.
1308 */
1309 static vnode_t
1310 checkalias(struct vnode *nvp, dev_t nvp_rdev)
1311 {
1312 struct vnode *vp;
1313 struct vnode **vpp;
1314 struct specinfo *sin = NULL;
1315 int vid = 0;
1316
1317 vpp = &speclisth[SPECHASH(nvp_rdev)];
1318 loop:
1319 SPECHASH_LOCK();
1320
1321 for (vp = *vpp; vp; vp = vp->v_specnext) {
1322 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1323 vid = vp->v_id;
1324 break;
1325 }
1326 }
1327 SPECHASH_UNLOCK();
1328
1329 if (vp) {
1330 found_alias:
1331 if (vnode_getwithvid(vp,vid)) {
1332 goto loop;
1333 }
1334 /*
1335 * Termination state is checked in vnode_getwithvid
1336 */
1337 vnode_lock(vp);
1338
1339 /*
1340 * Alias, but not in use, so flush it out.
1341 */
1342 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1343 vnode_reclaim_internal(vp, 1, 1, 0);
1344 vnode_put_locked(vp);
1345 vnode_unlock(vp);
1346 goto loop;
1347 }
1348
1349 }
1350 if (vp == NULL || vp->v_tag != VT_NON) {
1351 if (sin == NULL) {
1352 MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo),
1353 M_SPECINFO, M_WAITOK);
1354 }
1355
1356 nvp->v_specinfo = sin;
1357 bzero(nvp->v_specinfo, sizeof(struct specinfo));
1358 nvp->v_rdev = nvp_rdev;
1359 nvp->v_specflags = 0;
1360 nvp->v_speclastr = -1;
1361
1362 SPECHASH_LOCK();
1363
1364 /* We dropped the lock, someone could have added */
1365 if (vp == NULLVP) {
1366 for (vp = *vpp; vp; vp = vp->v_specnext) {
1367 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1368 vid = vp->v_id;
1369 SPECHASH_UNLOCK();
1370 goto found_alias;
1371 }
1372 }
1373 }
1374
1375 nvp->v_hashchain = vpp;
1376 nvp->v_specnext = *vpp;
1377 *vpp = nvp;
1378
1379 if (vp != NULLVP) {
1380 nvp->v_specflags |= SI_ALIASED;
1381 vp->v_specflags |= SI_ALIASED;
1382 SPECHASH_UNLOCK();
1383 vnode_put_locked(vp);
1384 vnode_unlock(vp);
1385 } else {
1386 SPECHASH_UNLOCK();
1387 }
1388
1389 return (NULLVP);
1390 }
1391
1392 if (sin) {
1393 FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO);
1394 }
1395
1396 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1397 return(vp);
1398
1399 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1400
1401 return (vp);
1402 }
1403
1404
1405 /*
1406 * Get a reference on a particular vnode and lock it if requested.
1407 * If the vnode was on the inactive list, remove it from the list.
1408 * If the vnode was on the free list, remove it from the list and
1409 * move it to inactive list as needed.
1410 * The vnode lock bit is set if the vnode is being eliminated in
1411 * vgone. The process is awakened when the transition is completed,
1412 * and an error returned to indicate that the vnode is no longer
1413 * usable (possibly having been changed to a new file system type).
1414 */
1415 int
1416 vget_internal(vnode_t vp, int vid, int vflags)
1417 {
1418 int error = 0;
1419 int vpid;
1420
1421 vnode_lock_spin(vp);
1422
1423 if (vflags & VNODE_WITHID)
1424 vpid = vid;
1425 else
1426 vpid = vp->v_id; // save off the original v_id
1427
1428 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1429 /*
1430 * vnode to be returned only if it has writers opened
1431 */
1432 error = EINVAL;
1433 else
1434 error = vnode_getiocount(vp, vpid, vflags);
1435
1436 vnode_unlock(vp);
1437
1438 return (error);
1439 }
1440
1441 /*
1442 * Returns: 0 Success
1443 * ENOENT No such file or directory [terminating]
1444 */
1445 int
1446 vnode_ref(vnode_t vp)
1447 {
1448
1449 return (vnode_ref_ext(vp, 0));
1450 }
1451
1452 /*
1453 * Returns: 0 Success
1454 * ENOENT No such file or directory [terminating]
1455 */
1456 int
1457 vnode_ref_ext(vnode_t vp, int fmode)
1458 {
1459 int error = 0;
1460
1461 vnode_lock_spin(vp);
1462
1463 /*
1464 * once all the current call sites have been fixed to insure they have
1465 * taken an iocount, we can toughen this assert up and insist that the
1466 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1467 */
1468 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1469 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1470
1471 /*
1472 * if you are the owner of drain/termination, can acquire usecount
1473 */
1474 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1475 if (vp->v_owner != current_thread()) {
1476 error = ENOENT;
1477 goto out;
1478 }
1479 }
1480 vp->v_usecount++;
1481
1482 if (fmode & FWRITE) {
1483 if (++vp->v_writecount <= 0)
1484 panic("vnode_ref_ext: v_writecount");
1485 }
1486 if (fmode & O_EVTONLY) {
1487 if (++vp->v_kusecount <= 0)
1488 panic("vnode_ref_ext: v_kusecount");
1489 }
1490 if (vp->v_flag & VRAGE) {
1491 struct uthread *ut;
1492
1493 ut = get_bsdthread_info(current_thread());
1494
1495 if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) &&
1496 !(ut->uu_flag & UT_RAGE_VNODES)) {
1497 /*
1498 * a 'normal' process accessed this vnode
1499 * so make sure its no longer marked
1500 * for rapid aging... also, make sure
1501 * it gets removed from the rage list...
1502 * when v_usecount drops back to 0, it
1503 * will be put back on the real free list
1504 */
1505 vp->v_flag &= ~VRAGE;
1506 vp->v_references = 0;
1507 vnode_list_remove(vp);
1508 }
1509 }
1510 out:
1511 vnode_unlock(vp);
1512
1513 return (error);
1514 }
1515
1516
1517 /*
1518 * put the vnode on appropriate free list.
1519 * called with vnode LOCKED
1520 */
1521 static void
1522 vnode_list_add(vnode_t vp)
1523 {
1524 #if DIAGNOSTIC
1525 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1526 #endif
1527 /*
1528 * if it is already on a list or non zero references return
1529 */
1530 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1531 return;
1532
1533 vnode_list_lock();
1534
1535 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1536 /*
1537 * add the new guy to the appropriate end of the RAGE list
1538 */
1539 if ((vp->v_flag & VAGE))
1540 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1541 else
1542 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1543
1544 vp->v_listflag |= VLIST_RAGE;
1545 ragevnodes++;
1546
1547 /*
1548 * reset the timestamp for the last inserted vp on the RAGE
1549 * queue to let new_vnode know that its not ok to start stealing
1550 * from this list... as long as we're actively adding to this list
1551 * we'll push out the vnodes we want to donate to the real free list
1552 * once we stop pushing, we'll let some time elapse before we start
1553 * stealing them in the new_vnode routine
1554 */
1555 microuptime(&rage_tv);
1556 } else {
1557 /*
1558 * if VL_DEAD, insert it at head of the dead list
1559 * else insert at tail of LRU list or at head if VAGE is set
1560 */
1561 if ( (vp->v_lflag & VL_DEAD)) {
1562 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1563 vp->v_listflag |= VLIST_DEAD;
1564 deadvnodes++;
1565 } else if ((vp->v_flag & VAGE)) {
1566 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1567 vp->v_flag &= ~VAGE;
1568 freevnodes++;
1569 } else {
1570 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1571 freevnodes++;
1572 }
1573 }
1574 vnode_list_unlock();
1575 }
1576
1577
1578 /*
1579 * remove the vnode from appropriate free list.
1580 * called with vnode LOCKED and
1581 * the list lock held
1582 */
1583 static void
1584 vnode_list_remove_locked(vnode_t vp)
1585 {
1586 if (VONLIST(vp)) {
1587 /*
1588 * the v_listflag field is
1589 * protected by the vnode_list_lock
1590 */
1591 if (vp->v_listflag & VLIST_RAGE)
1592 VREMRAGE("vnode_list_remove", vp);
1593 else if (vp->v_listflag & VLIST_DEAD)
1594 VREMDEAD("vnode_list_remove", vp);
1595 else
1596 VREMFREE("vnode_list_remove", vp);
1597 }
1598 }
1599
1600
1601 /*
1602 * remove the vnode from appropriate free list.
1603 * called with vnode LOCKED
1604 */
1605 static void
1606 vnode_list_remove(vnode_t vp)
1607 {
1608 #if DIAGNOSTIC
1609 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1610 #endif
1611 /*
1612 * we want to avoid taking the list lock
1613 * in the case where we're not on the free
1614 * list... this will be true for most
1615 * directories and any currently in use files
1616 *
1617 * we're guaranteed that we can't go from
1618 * the not-on-list state to the on-list
1619 * state since we hold the vnode lock...
1620 * all calls to vnode_list_add are done
1621 * under the vnode lock... so we can
1622 * check for that condition (the prevelant one)
1623 * without taking the list lock
1624 */
1625 if (VONLIST(vp)) {
1626 vnode_list_lock();
1627 /*
1628 * however, we're not guaranteed that
1629 * we won't go from the on-list state
1630 * to the not-on-list state until we
1631 * hold the vnode_list_lock... this
1632 * is due to "new_vnode" removing vnodes
1633 * from the free list uder the list_lock
1634 * w/o the vnode lock... so we need to
1635 * check again whether we're currently
1636 * on the free list
1637 */
1638 vnode_list_remove_locked(vp);
1639
1640 vnode_list_unlock();
1641 }
1642 }
1643
1644
1645 void
1646 vnode_rele(vnode_t vp)
1647 {
1648 vnode_rele_internal(vp, 0, 0, 0);
1649 }
1650
1651
1652 void
1653 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1654 {
1655 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1656 }
1657
1658
1659 void
1660 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1661 {
1662 if ( !locked)
1663 vnode_lock_spin(vp);
1664 #if DIAGNOSTIC
1665 else
1666 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1667 #endif
1668 if (--vp->v_usecount < 0)
1669 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1670
1671 if (fmode & FWRITE) {
1672 if (--vp->v_writecount < 0)
1673 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1674 }
1675 if (fmode & O_EVTONLY) {
1676 if (--vp->v_kusecount < 0)
1677 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1678 }
1679 if (vp->v_kusecount > vp->v_usecount)
1680 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1681
1682 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1683 /*
1684 * vnode is still busy... if we're the last
1685 * usecount, mark for a future call to VNOP_INACTIVE
1686 * when the iocount finally drops to 0
1687 */
1688 if (vp->v_usecount == 0) {
1689 vp->v_lflag |= VL_NEEDINACTIVE;
1690 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1691 }
1692 if ( !locked)
1693 vnode_unlock(vp);
1694 return;
1695 }
1696 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1697
1698 if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
1699 /*
1700 * vnode is being cleaned, or
1701 * we've requested that we don't reenter
1702 * the filesystem on this release... in
1703 * this case, we'll mark the vnode aged
1704 * if it's been marked for termination
1705 */
1706 if (dont_reenter) {
1707 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
1708 vp->v_lflag |= VL_NEEDINACTIVE;
1709 vp->v_flag |= VAGE;
1710 }
1711 vnode_list_add(vp);
1712 if ( !locked)
1713 vnode_unlock(vp);
1714 return;
1715 }
1716 /*
1717 * at this point both the iocount and usecount
1718 * are zero
1719 * pick up an iocount so that we can call
1720 * VNOP_INACTIVE with the vnode lock unheld
1721 */
1722 vp->v_iocount++;
1723 #ifdef JOE_DEBUG
1724 record_vp(vp, 1);
1725 #endif
1726 vp->v_lflag &= ~VL_NEEDINACTIVE;
1727 vnode_unlock(vp);
1728
1729 VNOP_INACTIVE(vp, vfs_context_current());
1730
1731 vnode_lock_spin(vp);
1732 /*
1733 * because we dropped the vnode lock to call VNOP_INACTIVE
1734 * the state of the vnode may have changed... we may have
1735 * picked up an iocount, usecount or the MARKTERM may have
1736 * been set... we need to reevaluate the reference counts
1737 * to determine if we can call vnode_reclaim_internal at
1738 * this point... if the reference counts are up, we'll pick
1739 * up the MARKTERM state when they get subsequently dropped
1740 */
1741 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1742 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1743 struct uthread *ut;
1744
1745 ut = get_bsdthread_info(current_thread());
1746
1747 if (ut->uu_defer_reclaims) {
1748 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1749 ut->uu_vreclaims = vp;
1750 goto defer_reclaim;
1751 }
1752 vnode_lock_convert(vp);
1753 vnode_reclaim_internal(vp, 1, 1, 0);
1754 }
1755 vnode_dropiocount(vp);
1756 vnode_list_add(vp);
1757 defer_reclaim:
1758 if ( !locked)
1759 vnode_unlock(vp);
1760 return;
1761 }
1762
1763 /*
1764 * Remove any vnodes in the vnode table belonging to mount point mp.
1765 *
1766 * If MNT_NOFORCE is specified, there should not be any active ones,
1767 * return error if any are found (nb: this is a user error, not a
1768 * system error). If MNT_FORCE is specified, detach any active vnodes
1769 * that are found.
1770 */
1771 #if DIAGNOSTIC
1772 int busyprt = 0; /* print out busy vnodes */
1773 #if 0
1774 struct ctldebug debug1 = { "busyprt", &busyprt };
1775 #endif /* 0 */
1776 #endif
1777
1778 int
1779 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1780 {
1781 struct vnode *vp;
1782 int busy = 0;
1783 int reclaimed = 0;
1784 int retval;
1785 unsigned int vid;
1786
1787 mount_lock(mp);
1788 vnode_iterate_setup(mp);
1789 /*
1790 * On regular unmounts(not forced) do a
1791 * quick check for vnodes to be in use. This
1792 * preserves the caching of vnodes. automounter
1793 * tries unmounting every so often to see whether
1794 * it is still busy or not.
1795 */
1796 if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
1797 if (vnode_umount_preflight(mp, skipvp, flags)) {
1798 vnode_iterate_clear(mp);
1799 mount_unlock(mp);
1800 return(EBUSY);
1801 }
1802 }
1803 loop:
1804 /* it is returns 0 then there is nothing to do */
1805 retval = vnode_iterate_prepare(mp);
1806
1807 if (retval == 0) {
1808 vnode_iterate_clear(mp);
1809 mount_unlock(mp);
1810 return(retval);
1811 }
1812
1813 /* iterate over all the vnodes */
1814 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
1815
1816 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1817 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1818 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1819
1820 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1821 continue;
1822 }
1823 vid = vp->v_id;
1824 mount_unlock(mp);
1825
1826 vnode_lock_spin(vp);
1827
1828 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
1829 vnode_unlock(vp);
1830 mount_lock(mp);
1831 continue;
1832 }
1833
1834 /*
1835 * If requested, skip over vnodes marked VSYSTEM.
1836 * Skip over all vnodes marked VNOFLUSH.
1837 */
1838 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
1839 (vp->v_flag & VNOFLUSH))) {
1840 vnode_unlock(vp);
1841 mount_lock(mp);
1842 continue;
1843 }
1844 /*
1845 * If requested, skip over vnodes marked VSWAP.
1846 */
1847 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
1848 vnode_unlock(vp);
1849 mount_lock(mp);
1850 continue;
1851 }
1852 /*
1853 * If requested, skip over vnodes marked VROOT.
1854 */
1855 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
1856 vnode_unlock(vp);
1857 mount_lock(mp);
1858 continue;
1859 }
1860 /*
1861 * If WRITECLOSE is set, only flush out regular file
1862 * vnodes open for writing.
1863 */
1864 if ((flags & WRITECLOSE) &&
1865 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1866 vnode_unlock(vp);
1867 mount_lock(mp);
1868 continue;
1869 }
1870 /*
1871 * If the real usecount is 0, all we need to do is clear
1872 * out the vnode data structures and we are done.
1873 */
1874 if (((vp->v_usecount == 0) ||
1875 ((vp->v_usecount - vp->v_kusecount) == 0))) {
1876
1877 vnode_lock_convert(vp);
1878 vp->v_iocount++; /* so that drain waits for * other iocounts */
1879 #ifdef JOE_DEBUG
1880 record_vp(vp, 1);
1881 #endif
1882 vnode_reclaim_internal(vp, 1, 1, 0);
1883 vnode_dropiocount(vp);
1884 vnode_list_add(vp);
1885 vnode_unlock(vp);
1886
1887 reclaimed++;
1888 mount_lock(mp);
1889 continue;
1890 }
1891 /*
1892 * If FORCECLOSE is set, forcibly close the vnode.
1893 * For block or character devices, revert to an
1894 * anonymous device. For all other files, just kill them.
1895 */
1896 if (flags & FORCECLOSE) {
1897 vnode_lock_convert(vp);
1898
1899 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1900 vp->v_iocount++; /* so that drain waits * for other iocounts */
1901 #ifdef JOE_DEBUG
1902 record_vp(vp, 1);
1903 #endif
1904 vnode_reclaim_internal(vp, 1, 1, 0);
1905 vnode_dropiocount(vp);
1906 vnode_list_add(vp);
1907 vnode_unlock(vp);
1908 } else {
1909 vclean(vp, 0);
1910 vp->v_lflag &= ~VL_DEAD;
1911 vp->v_op = spec_vnodeop_p;
1912 vp->v_flag |= VDEVFLUSH;
1913 vnode_unlock(vp);
1914 }
1915 mount_lock(mp);
1916 continue;
1917 }
1918 #if DIAGNOSTIC
1919 if (busyprt)
1920 vprint("vflush: busy vnode", vp);
1921 #endif
1922 vnode_unlock(vp);
1923 mount_lock(mp);
1924 busy++;
1925 }
1926
1927 /* At this point the worker queue is completed */
1928 if (busy && ((flags & FORCECLOSE)==0) && reclaimed) {
1929 busy = 0;
1930 reclaimed = 0;
1931 (void)vnode_iterate_reloadq(mp);
1932 /* returned with mount lock held */
1933 goto loop;
1934 }
1935
1936 /* if new vnodes were created in between retry the reclaim */
1937 if ( vnode_iterate_reloadq(mp) != 0) {
1938 if (!(busy && ((flags & FORCECLOSE)==0)))
1939 goto loop;
1940 }
1941 vnode_iterate_clear(mp);
1942 mount_unlock(mp);
1943
1944 if (busy && ((flags & FORCECLOSE)==0))
1945 return (EBUSY);
1946 return (0);
1947 }
1948
1949 long num_recycledvnodes = 0;
1950 /*
1951 * Disassociate the underlying file system from a vnode.
1952 * The vnode lock is held on entry.
1953 */
1954 static void
1955 vclean(vnode_t vp, int flags)
1956 {
1957 vfs_context_t ctx = vfs_context_current();
1958 int active;
1959 int need_inactive;
1960 int already_terminating;
1961 int clflags = 0;
1962 #if NAMEDSTREAMS
1963 int is_namedstream;
1964 #endif
1965
1966 /*
1967 * Check to see if the vnode is in use.
1968 * If so we have to reference it before we clean it out
1969 * so that its count cannot fall to zero and generate a
1970 * race against ourselves to recycle it.
1971 */
1972 active = vp->v_usecount;
1973
1974 /*
1975 * just in case we missed sending a needed
1976 * VNOP_INACTIVE, we'll do it now
1977 */
1978 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
1979
1980 vp->v_lflag &= ~VL_NEEDINACTIVE;
1981
1982 /*
1983 * Prevent the vnode from being recycled or
1984 * brought into use while we clean it out.
1985 */
1986 already_terminating = (vp->v_lflag & VL_TERMINATE);
1987
1988 vp->v_lflag |= VL_TERMINATE;
1989
1990 /*
1991 * remove the vnode from any mount list
1992 * it might be on...
1993 */
1994 insmntque(vp, (struct mount *)0);
1995
1996 #if NAMEDSTREAMS
1997 is_namedstream = vnode_isnamedstream(vp);
1998 #endif
1999
2000 vnode_unlock(vp);
2001
2002 OSAddAtomicLong(1, &num_recycledvnodes);
2003
2004 if (flags & DOCLOSE)
2005 clflags |= IO_NDELAY;
2006 if (flags & REVOKEALL)
2007 clflags |= IO_REVOKE;
2008
2009 if (active && (flags & DOCLOSE))
2010 VNOP_CLOSE(vp, clflags, ctx);
2011
2012 /*
2013 * Clean out any buffers associated with the vnode.
2014 */
2015 if (flags & DOCLOSE) {
2016 #if NFSCLIENT
2017 if (vp->v_tag == VT_NFS)
2018 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2019 else
2020 #endif
2021 {
2022 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2023 buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2024 }
2025 if (UBCINFOEXISTS(vp))
2026 /*
2027 * Clean the pages in VM.
2028 */
2029 (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL);
2030 }
2031 if (active || need_inactive)
2032 VNOP_INACTIVE(vp, ctx);
2033
2034 #if NAMEDSTREAMS
2035 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2036 vnode_t pvp = vp->v_parent;
2037
2038 /* Delete the shadow stream file before we reclaim its vnode */
2039 if (vnode_isshadow(vp)) {
2040 vnode_relenamedstream(pvp, vp, ctx);
2041 }
2042
2043 /*
2044 * Because vclean calls VNOP_INACTIVE prior to calling vnode_relenamedstream, we may not have
2045 * torn down and/or deleted the shadow file yet. On HFS, if the shadow file is sufficiently large
2046 * and occupies a large number of extents, the deletion will be deferred until VNOP_INACTIVE
2047 * and the file treated like an open-unlinked. To rectify this, call VNOP_INACTIVE again
2048 * explicitly to force its removal.
2049 */
2050 if (vnode_isshadow(vp)) {
2051 VNOP_INACTIVE(vp, ctx);
2052 }
2053
2054 /*
2055 * No more streams associated with the parent. We
2056 * have a ref on it, so its identity is stable.
2057 * If the parent is on an opaque volume, then we need to know
2058 * whether it has associated named streams.
2059 */
2060 if (vfs_authopaque(pvp->v_mount)) {
2061 vnode_lock_spin(pvp);
2062 pvp->v_lflag &= ~VL_HASSTREAMS;
2063 vnode_unlock(pvp);
2064 }
2065 }
2066 #endif
2067
2068 /*
2069 * Destroy ubc named reference
2070 * cluster_release is done on this path
2071 * along with dropping the reference on the ucred
2072 */
2073 ubc_destroy_named(vp);
2074
2075 /*
2076 * Reclaim the vnode.
2077 */
2078 if (VNOP_RECLAIM(vp, ctx))
2079 panic("vclean: cannot reclaim");
2080
2081 // make sure the name & parent ptrs get cleaned out!
2082 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2083
2084 vnode_lock(vp);
2085
2086 vp->v_mount = dead_mountp;
2087 vp->v_op = dead_vnodeop_p;
2088 vp->v_tag = VT_NON;
2089 vp->v_data = NULL;
2090
2091 vp->v_lflag |= VL_DEAD;
2092
2093 if (already_terminating == 0) {
2094 vp->v_lflag &= ~VL_TERMINATE;
2095 /*
2096 * Done with purge, notify sleepers of the grim news.
2097 */
2098 if (vp->v_lflag & VL_TERMWANT) {
2099 vp->v_lflag &= ~VL_TERMWANT;
2100 wakeup(&vp->v_lflag);
2101 }
2102 }
2103 }
2104
2105 /*
2106 * Eliminate all activity associated with the requested vnode
2107 * and with all vnodes aliased to the requested vnode.
2108 */
2109 int
2110 #if DIAGNOSTIC
2111 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2112 #else
2113 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2114 #endif
2115 {
2116 struct vnode *vq;
2117 int vid;
2118
2119 #if DIAGNOSTIC
2120 if ((flags & REVOKEALL) == 0)
2121 panic("vnop_revoke");
2122 #endif
2123
2124 if (vnode_isaliased(vp)) {
2125 /*
2126 * If a vgone (or vclean) is already in progress,
2127 * return an immediate error
2128 */
2129 if (vp->v_lflag & VL_TERMINATE)
2130 return(ENOENT);
2131
2132 /*
2133 * Ensure that vp will not be vgone'd while we
2134 * are eliminating its aliases.
2135 */
2136 SPECHASH_LOCK();
2137 while ((vp->v_specflags & SI_ALIASED)) {
2138 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2139 if (vq->v_rdev != vp->v_rdev ||
2140 vq->v_type != vp->v_type || vp == vq)
2141 continue;
2142 vid = vq->v_id;
2143 SPECHASH_UNLOCK();
2144 if (vnode_getwithvid(vq,vid)){
2145 SPECHASH_LOCK();
2146 break;
2147 }
2148 vnode_reclaim_internal(vq, 0, 1, 0);
2149 vnode_put(vq);
2150 SPECHASH_LOCK();
2151 break;
2152 }
2153 }
2154 SPECHASH_UNLOCK();
2155 }
2156 vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
2157
2158 return (0);
2159 }
2160
2161 /*
2162 * Recycle an unused vnode to the front of the free list.
2163 * Release the passed interlock if the vnode will be recycled.
2164 */
2165 int
2166 vnode_recycle(struct vnode *vp)
2167 {
2168 vnode_lock_spin(vp);
2169
2170 if (vp->v_iocount || vp->v_usecount) {
2171 vp->v_lflag |= VL_MARKTERM;
2172 vnode_unlock(vp);
2173 return(0);
2174 }
2175 vnode_lock_convert(vp);
2176 vnode_reclaim_internal(vp, 1, 0, 0);
2177
2178 vnode_unlock(vp);
2179
2180 return (1);
2181 }
2182
2183 static int
2184 vnode_reload(vnode_t vp)
2185 {
2186 vnode_lock_spin(vp);
2187
2188 if ((vp->v_iocount > 1) || vp->v_usecount) {
2189 vnode_unlock(vp);
2190 return(0);
2191 }
2192 if (vp->v_iocount <= 0)
2193 panic("vnode_reload with no iocount %d", vp->v_iocount);
2194
2195 /* mark for release when iocount is dopped */
2196 vp->v_lflag |= VL_MARKTERM;
2197 vnode_unlock(vp);
2198
2199 return (1);
2200 }
2201
2202
2203 static void
2204 vgone(vnode_t vp, int flags)
2205 {
2206 struct vnode *vq;
2207 struct vnode *vx;
2208
2209 /*
2210 * Clean out the filesystem specific data.
2211 * vclean also takes care of removing the
2212 * vnode from any mount list it might be on
2213 */
2214 vclean(vp, flags | DOCLOSE);
2215
2216 /*
2217 * If special device, remove it from special device alias list
2218 * if it is on one.
2219 */
2220 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2221 SPECHASH_LOCK();
2222 if (*vp->v_hashchain == vp) {
2223 *vp->v_hashchain = vp->v_specnext;
2224 } else {
2225 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2226 if (vq->v_specnext != vp)
2227 continue;
2228 vq->v_specnext = vp->v_specnext;
2229 break;
2230 }
2231 if (vq == NULL)
2232 panic("missing bdev");
2233 }
2234 if (vp->v_specflags & SI_ALIASED) {
2235 vx = NULL;
2236 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2237 if (vq->v_rdev != vp->v_rdev ||
2238 vq->v_type != vp->v_type)
2239 continue;
2240 if (vx)
2241 break;
2242 vx = vq;
2243 }
2244 if (vx == NULL)
2245 panic("missing alias");
2246 if (vq == NULL)
2247 vx->v_specflags &= ~SI_ALIASED;
2248 vp->v_specflags &= ~SI_ALIASED;
2249 }
2250 SPECHASH_UNLOCK();
2251 {
2252 struct specinfo *tmp = vp->v_specinfo;
2253 vp->v_specinfo = NULL;
2254 FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO);
2255 }
2256 }
2257 }
2258
2259 /*
2260 * Lookup a vnode by device number.
2261 */
2262 int
2263 check_mountedon(dev_t dev, enum vtype type, int *errorp)
2264 {
2265 vnode_t vp;
2266 int rc = 0;
2267 int vid;
2268
2269 loop:
2270 SPECHASH_LOCK();
2271 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2272 if (dev != vp->v_rdev || type != vp->v_type)
2273 continue;
2274 vid = vp->v_id;
2275 SPECHASH_UNLOCK();
2276 if (vnode_getwithvid(vp,vid))
2277 goto loop;
2278 vnode_lock_spin(vp);
2279 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2280 vnode_unlock(vp);
2281 if ((*errorp = vfs_mountedon(vp)) != 0)
2282 rc = 1;
2283 } else
2284 vnode_unlock(vp);
2285 vnode_put(vp);
2286 return(rc);
2287 }
2288 SPECHASH_UNLOCK();
2289 return (0);
2290 }
2291
2292 /*
2293 * Calculate the total number of references to a special device.
2294 */
2295 int
2296 vcount(vnode_t vp)
2297 {
2298 vnode_t vq, vnext;
2299 int count;
2300 int vid;
2301
2302 loop:
2303 if (!vnode_isaliased(vp))
2304 return (vp->v_usecount - vp->v_kusecount);
2305 count = 0;
2306
2307 SPECHASH_LOCK();
2308 /*
2309 * Grab first vnode and its vid.
2310 */
2311 vq = *vp->v_hashchain;
2312 vid = vq ? vq->v_id : 0;
2313
2314 SPECHASH_UNLOCK();
2315
2316 while (vq) {
2317 /*
2318 * Attempt to get the vnode outside the SPECHASH lock.
2319 */
2320 if (vnode_getwithvid(vq, vid)) {
2321 goto loop;
2322 }
2323 vnode_lock(vq);
2324
2325 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2326 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2327 /*
2328 * Alias, but not in use, so flush it out.
2329 */
2330 vnode_reclaim_internal(vq, 1, 1, 0);
2331 vnode_put_locked(vq);
2332 vnode_unlock(vq);
2333 goto loop;
2334 }
2335 count += (vq->v_usecount - vq->v_kusecount);
2336 }
2337 vnode_unlock(vq);
2338
2339 SPECHASH_LOCK();
2340 /*
2341 * must do this with the reference still held on 'vq'
2342 * so that it can't be destroyed while we're poking
2343 * through v_specnext
2344 */
2345 vnext = vq->v_specnext;
2346 vid = vnext ? vnext->v_id : 0;
2347
2348 SPECHASH_UNLOCK();
2349
2350 vnode_put(vq);
2351
2352 vq = vnext;
2353 }
2354
2355 return (count);
2356 }
2357
2358 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
2359
2360 /*
2361 * Print out a description of a vnode.
2362 */
2363 static const char *typename[] =
2364 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2365
2366 void
2367 vprint(const char *label, struct vnode *vp)
2368 {
2369 char sbuf[64];
2370
2371 if (label != NULL)
2372 printf("%s: ", label);
2373 printf("type %s, usecount %d, writecount %d",
2374 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2375 sbuf[0] = '\0';
2376 if (vp->v_flag & VROOT)
2377 strlcat(sbuf, "|VROOT", sizeof(sbuf));
2378 if (vp->v_flag & VTEXT)
2379 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
2380 if (vp->v_flag & VSYSTEM)
2381 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
2382 if (vp->v_flag & VNOFLUSH)
2383 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
2384 if (vp->v_flag & VBWAIT)
2385 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
2386 if (vnode_isaliased(vp))
2387 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
2388 if (sbuf[0] != '\0')
2389 printf(" flags (%s)", &sbuf[1]);
2390 }
2391
2392
2393 int
2394 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2395 {
2396 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2397 }
2398
2399 int
2400 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2401 {
2402 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2403 }
2404
2405 int
2406 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2407 {
2408 return ubc_cs_getcdhash(vp, offset, cdhash);
2409 }
2410
2411
2412 static char *extension_table=NULL;
2413 static int nexts;
2414 static int max_ext_width;
2415
2416 static int
2417 extension_cmp(const void *a, const void *b)
2418 {
2419 return (strlen((const char *)a) - strlen((const char *)b));
2420 }
2421
2422
2423 //
2424 // This is the api LaunchServices uses to inform the kernel
2425 // the list of package extensions to ignore.
2426 //
2427 // Internally we keep the list sorted by the length of the
2428 // the extension (from longest to shortest). We sort the
2429 // list of extensions so that we can speed up our searches
2430 // when comparing file names -- we only compare extensions
2431 // that could possibly fit into the file name, not all of
2432 // them (i.e. a short 8 character name can't have an 8
2433 // character extension).
2434 //
2435 extern lck_mtx_t *pkg_extensions_lck;
2436
2437 __private_extern__ int
2438 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
2439 {
2440 char *new_exts, *old_exts;
2441 int error;
2442
2443 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
2444 return EINVAL;
2445 }
2446
2447
2448 // allocate one byte extra so we can guarantee null termination
2449 MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK);
2450 if (new_exts == NULL) {
2451 return ENOMEM;
2452 }
2453
2454 error = copyin(data, new_exts, nentries * maxwidth);
2455 if (error) {
2456 FREE(new_exts, M_TEMP);
2457 return error;
2458 }
2459
2460 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
2461
2462 qsort(new_exts, nentries, maxwidth, extension_cmp);
2463
2464 lck_mtx_lock(pkg_extensions_lck);
2465
2466 old_exts = extension_table;
2467 extension_table = new_exts;
2468 nexts = nentries;
2469 max_ext_width = maxwidth;
2470
2471 lck_mtx_unlock(pkg_extensions_lck);
2472
2473 if (old_exts) {
2474 FREE(old_exts, M_TEMP);
2475 }
2476
2477 return 0;
2478 }
2479
2480
2481 __private_extern__ int
2482 is_package_name(const char *name, int len)
2483 {
2484 int i, extlen;
2485 const char *ptr, *name_ext;
2486
2487 if (len <= 3) {
2488 return 0;
2489 }
2490
2491 name_ext = NULL;
2492 for(ptr=name; *ptr != '\0'; ptr++) {
2493 if (*ptr == '.') {
2494 name_ext = ptr;
2495 }
2496 }
2497
2498 // if there is no "." extension, it can't match
2499 if (name_ext == NULL) {
2500 return 0;
2501 }
2502
2503 // advance over the "."
2504 name_ext++;
2505
2506 lck_mtx_lock(pkg_extensions_lck);
2507
2508 // now iterate over all the extensions to see if any match
2509 ptr = &extension_table[0];
2510 for(i=0; i < nexts; i++, ptr+=max_ext_width) {
2511 extlen = strlen(ptr);
2512 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
2513 // aha, a match!
2514 lck_mtx_unlock(pkg_extensions_lck);
2515 return 1;
2516 }
2517 }
2518
2519 lck_mtx_unlock(pkg_extensions_lck);
2520
2521 // if we get here, no extension matched
2522 return 0;
2523 }
2524
2525 int
2526 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2527 {
2528 char *ptr, *end;
2529 int comp=0;
2530
2531 *component = -1;
2532 if (*path != '/') {
2533 return EINVAL;
2534 }
2535
2536 end = path + 1;
2537 while(end < path + pathlen && *end != '\0') {
2538 while(end < path + pathlen && *end == '/' && *end != '\0') {
2539 end++;
2540 }
2541
2542 ptr = end;
2543
2544 while(end < path + pathlen && *end != '/' && *end != '\0') {
2545 end++;
2546 }
2547
2548 if (end > path + pathlen) {
2549 // hmm, string wasn't null terminated
2550 return EINVAL;
2551 }
2552
2553 *end = '\0';
2554 if (is_package_name(ptr, end - ptr)) {
2555 *component = comp;
2556 break;
2557 }
2558
2559 end++;
2560 comp++;
2561 }
2562
2563 return 0;
2564 }
2565
2566 /*
2567 * Determine if a name is inappropriate for a searchfs query.
2568 * This list consists of /System currently.
2569 */
2570
2571 int vn_searchfs_inappropriate_name(const char *name, int len) {
2572 const char *bad_names[] = { "System" };
2573 int bad_len[] = { 6 };
2574 int i;
2575
2576 for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
2577 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
2578 return 1;
2579 }
2580 }
2581
2582 // if we get here, no name matched
2583 return 0;
2584 }
2585
2586 /*
2587 * Top level filesystem related information gathering.
2588 */
2589 extern unsigned int vfs_nummntops;
2590
2591 int
2592 vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
2593 user_addr_t newp, size_t newlen, proc_t p)
2594 {
2595 struct vfstable *vfsp;
2596 int *username;
2597 u_int usernamelen;
2598 int error;
2599 struct vfsconf vfsc;
2600
2601 /* All non VFS_GENERIC and in VFS_GENERIC,
2602 * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
2603 * needs to have root priv to have modifiers.
2604 * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
2605 */
2606 if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
2607 ((name[1] == VFS_MAXTYPENUM) ||
2608 (name[1] == VFS_CONF) ||
2609 (name[1] == VFS_SET_PACKAGE_EXTS)))
2610 && (error = suser(kauth_cred_get(), &p->p_acflag))) {
2611 return(error);
2612 }
2613 /*
2614 * The VFS_NUMMNTOPS shouldn't be at name[0] since
2615 * is a VFS generic variable. So now we must check
2616 * namelen so we don't end up covering any UFS
2617 * variables (sinc UFS vfc_typenum is 1).
2618 *
2619 * It should have been:
2620 * name[0]: VFS_GENERIC
2621 * name[1]: VFS_NUMMNTOPS
2622 */
2623 if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
2624 return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
2625 }
2626
2627 /* all sysctl names at this level are at least name and field */
2628 if (namelen < 2)
2629 return (EISDIR); /* overloaded */
2630 if (name[0] != VFS_GENERIC) {
2631
2632 mount_list_lock();
2633 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2634 if (vfsp->vfc_typenum == name[0]) {
2635 vfsp->vfc_refcount++;
2636 break;
2637 }
2638 mount_list_unlock();
2639
2640 if (vfsp == NULL)
2641 return (ENOTSUP);
2642
2643 /* XXX current context proxy for proc p? */
2644 error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2645 oldp, oldlenp, newp, newlen,
2646 vfs_context_current()));
2647
2648 mount_list_lock();
2649 vfsp->vfc_refcount--;
2650 mount_list_unlock();
2651 return error;
2652 }
2653 switch (name[1]) {
2654 case VFS_MAXTYPENUM:
2655 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
2656 case VFS_CONF:
2657 if (namelen < 3)
2658 return (ENOTDIR); /* overloaded */
2659
2660 mount_list_lock();
2661 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2662 if (vfsp->vfc_typenum == name[2])
2663 break;
2664
2665 if (vfsp == NULL) {
2666 mount_list_unlock();
2667 return (ENOTSUP);
2668 }
2669
2670 vfsc.vfc_reserved1 = 0;
2671 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
2672 vfsc.vfc_typenum = vfsp->vfc_typenum;
2673 vfsc.vfc_refcount = vfsp->vfc_refcount;
2674 vfsc.vfc_flags = vfsp->vfc_flags;
2675 vfsc.vfc_reserved2 = 0;
2676 vfsc.vfc_reserved3 = 0;
2677
2678 mount_list_unlock();
2679 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
2680 sizeof(struct vfsconf)));
2681
2682 case VFS_SET_PACKAGE_EXTS:
2683 return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
2684 }
2685 /*
2686 * We need to get back into the general MIB, so we need to re-prepend
2687 * CTL_VFS to our name and try userland_sysctl().
2688 */
2689 usernamelen = namelen + 1;
2690 MALLOC(username, int *, usernamelen * sizeof(*username),
2691 M_TEMP, M_WAITOK);
2692 bcopy(name, username + 1, namelen * sizeof(*name));
2693 username[0] = CTL_VFS;
2694 error = userland_sysctl(p, username, usernamelen, oldp,
2695 oldlenp, newp, newlen, oldlenp);
2696 FREE(username, M_TEMP);
2697 return (error);
2698 }
2699
2700 /*
2701 * Dump vnode list (via sysctl) - defunct
2702 * use "pstat" instead
2703 */
2704 /* ARGSUSED */
2705 int
2706 sysctl_vnode
2707 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
2708 {
2709 return(EINVAL);
2710 }
2711
2712 SYSCTL_PROC(_kern, KERN_VNODE, vnode,
2713 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED,
2714 0, 0, sysctl_vnode, "S,", "");
2715
2716
2717 /*
2718 * Check to see if a filesystem is mounted on a block device.
2719 */
2720 int
2721 vfs_mountedon(struct vnode *vp)
2722 {
2723 struct vnode *vq;
2724 int error = 0;
2725
2726 SPECHASH_LOCK();
2727 if (vp->v_specflags & SI_MOUNTEDON) {
2728 error = EBUSY;
2729 goto out;
2730 }
2731 if (vp->v_specflags & SI_ALIASED) {
2732 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2733 if (vq->v_rdev != vp->v_rdev ||
2734 vq->v_type != vp->v_type)
2735 continue;
2736 if (vq->v_specflags & SI_MOUNTEDON) {
2737 error = EBUSY;
2738 break;
2739 }
2740 }
2741 }
2742 out:
2743 SPECHASH_UNLOCK();
2744 return (error);
2745 }
2746
2747 /*
2748 * Unmount all filesystems. The list is traversed in reverse order
2749 * of mounting to avoid dependencies.
2750 */
2751 __private_extern__ void
2752 vfs_unmountall(void)
2753 {
2754 struct mount *mp;
2755 int error;
2756
2757 /*
2758 * Since this only runs when rebooting, it is not interlocked.
2759 */
2760 mount_list_lock();
2761 while(!TAILQ_EMPTY(&mountlist)) {
2762 mp = TAILQ_LAST(&mountlist, mntlist);
2763 mount_list_unlock();
2764 error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
2765 if ((error != 0) && (error != EBUSY)) {
2766 printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
2767 printf("%d)\n", error);
2768 mount_list_lock();
2769 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2770 continue;
2771 } else if (error == EBUSY) {
2772 /* If EBUSY is returned, the unmount was already in progress */
2773 printf("unmount of %p failed (", mp);
2774 printf("BUSY)\n");
2775 }
2776 mount_list_lock();
2777 }
2778 mount_list_unlock();
2779 }
2780
2781
2782 /*
2783 * This routine is called from vnode_pager_deallocate out of the VM
2784 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
2785 * on a vnode that has a UBCINFO
2786 */
2787 __private_extern__ void
2788 vnode_pager_vrele(vnode_t vp)
2789 {
2790 struct ubc_info *uip;
2791
2792 vnode_lock_spin(vp);
2793
2794 vp->v_lflag &= ~VNAMED_UBC;
2795
2796 uip = vp->v_ubcinfo;
2797 vp->v_ubcinfo = UBC_INFO_NULL;
2798
2799 vnode_unlock(vp);
2800
2801 ubc_info_deallocate(uip);
2802 }
2803
2804
2805 #include <sys/disk.h>
2806
2807 errno_t
2808 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
2809 {
2810 int error;
2811 off_t readblockcnt = 0;
2812 off_t writeblockcnt = 0;
2813 off_t readmaxcnt = 0;
2814 off_t writemaxcnt = 0;
2815 off_t readsegcnt = 0;
2816 off_t writesegcnt = 0;
2817 off_t readsegsize = 0;
2818 off_t writesegsize = 0;
2819 off_t alignment = 0;
2820 off_t ioqueue_depth = 0;
2821 u_int32_t blksize;
2822 u_int64_t temp;
2823 u_int32_t features;
2824 vfs_context_t ctx = vfs_context_current();
2825 int isssd = 0;
2826 int isvirtual = 0;
2827 /*
2828 * determine if this mount point exists on the same device as the root
2829 * partition... if so, then it comes under the hard throttle control
2830 */
2831 int thisunit = -1;
2832 static int rootunit = -1;
2833
2834 if (rootunit == -1) {
2835 if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx))
2836 rootunit = -1;
2837 else if (rootvp == devvp)
2838 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2839 }
2840 if (devvp != rootvp && rootunit != -1) {
2841 if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) {
2842 if (thisunit == rootunit)
2843 mp->mnt_kern_flag |= MNTK_ROOTDEV;
2844 }
2845 }
2846 /*
2847 * force the spec device to re-cache
2848 * the underlying block size in case
2849 * the filesystem overrode the initial value
2850 */
2851 set_fsblocksize(devvp);
2852
2853
2854 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
2855 (caddr_t)&blksize, 0, ctx)))
2856 return (error);
2857
2858 mp->mnt_devblocksize = blksize;
2859
2860 /*
2861 * set the maximum possible I/O size
2862 * this may get clipped to a smaller value
2863 * based on which constraints are being advertised
2864 * and if those advertised constraints result in a smaller
2865 * limit for a given I/O
2866 */
2867 mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
2868 mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
2869
2870 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
2871 if (isvirtual)
2872 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
2873 }
2874 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
2875 if (isssd)
2876 mp->mnt_kern_flag |= MNTK_SSD;
2877 }
2878
2879 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
2880 (caddr_t)&features, 0, ctx)))
2881 return (error);
2882
2883 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
2884 (caddr_t)&readblockcnt, 0, ctx)))
2885 return (error);
2886
2887 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
2888 (caddr_t)&writeblockcnt, 0, ctx)))
2889 return (error);
2890
2891 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
2892 (caddr_t)&readmaxcnt, 0, ctx)))
2893 return (error);
2894
2895 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
2896 (caddr_t)&writemaxcnt, 0, ctx)))
2897 return (error);
2898
2899 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
2900 (caddr_t)&readsegcnt, 0, ctx)))
2901 return (error);
2902
2903 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
2904 (caddr_t)&writesegcnt, 0, ctx)))
2905 return (error);
2906
2907 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
2908 (caddr_t)&readsegsize, 0, ctx)))
2909 return (error);
2910
2911 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
2912 (caddr_t)&writesegsize, 0, ctx)))
2913 return (error);
2914
2915 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
2916 (caddr_t)&alignment, 0, ctx)))
2917 return (error);
2918
2919 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
2920 (caddr_t)&ioqueue_depth, 0, ctx)))
2921 return (error);
2922
2923 if (readmaxcnt)
2924 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt;
2925
2926 if (readblockcnt) {
2927 temp = readblockcnt * blksize;
2928 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2929
2930 if (temp < mp->mnt_maxreadcnt)
2931 mp->mnt_maxreadcnt = (u_int32_t)temp;
2932 }
2933
2934 if (writemaxcnt)
2935 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt;
2936
2937 if (writeblockcnt) {
2938 temp = writeblockcnt * blksize;
2939 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
2940
2941 if (temp < mp->mnt_maxwritecnt)
2942 mp->mnt_maxwritecnt = (u_int32_t)temp;
2943 }
2944
2945 if (readsegcnt) {
2946 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
2947 } else {
2948 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
2949
2950 if (temp > UINT16_MAX)
2951 temp = UINT16_MAX;
2952 }
2953 mp->mnt_segreadcnt = (u_int16_t)temp;
2954
2955 if (writesegcnt) {
2956 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
2957 } else {
2958 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
2959
2960 if (temp > UINT16_MAX)
2961 temp = UINT16_MAX;
2962 }
2963 mp->mnt_segwritecnt = (u_int16_t)temp;
2964
2965 if (readsegsize)
2966 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
2967 else
2968 temp = mp->mnt_maxreadcnt;
2969 mp->mnt_maxsegreadsize = (u_int32_t)temp;
2970
2971 if (writesegsize)
2972 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
2973 else
2974 temp = mp->mnt_maxwritecnt;
2975 mp->mnt_maxsegwritesize = (u_int32_t)temp;
2976
2977 if (alignment)
2978 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
2979 else
2980 temp = 0;
2981 mp->mnt_alignmentmask = temp;
2982
2983
2984 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH)
2985 temp = ioqueue_depth;
2986 else
2987 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
2988
2989 mp->mnt_ioqueue_depth = temp;
2990 mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH;
2991
2992 if (mp->mnt_ioscale > 1)
2993 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
2994
2995 if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
2996 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
2997 if (features & DK_FEATURE_UNMAP)
2998 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
2999 return (error);
3000 }
3001
3002 static struct klist fs_klist;
3003 lck_grp_t *fs_klist_lck_grp;
3004 lck_mtx_t *fs_klist_lock;
3005
3006 void
3007 vfs_event_init(void)
3008 {
3009
3010 klist_init(&fs_klist);
3011 fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL);
3012 fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL);
3013 }
3014
3015 void
3016 vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
3017 {
3018 lck_mtx_lock(fs_klist_lock);
3019 KNOTE(&fs_klist, event);
3020 lck_mtx_unlock(fs_klist_lock);
3021 }
3022
3023 /*
3024 * return the number of mounted filesystems.
3025 */
3026 static int
3027 sysctl_vfs_getvfscnt(void)
3028 {
3029 return(mount_getvfscnt());
3030 }
3031
3032
3033 static int
3034 mount_getvfscnt(void)
3035 {
3036 int ret;
3037
3038 mount_list_lock();
3039 ret = nummounts;
3040 mount_list_unlock();
3041 return (ret);
3042
3043 }
3044
3045
3046
3047 static int
3048 mount_fillfsids(fsid_t *fsidlst, int count)
3049 {
3050 struct mount *mp;
3051 int actual=0;
3052
3053 actual = 0;
3054 mount_list_lock();
3055 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3056 if (actual <= count) {
3057 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
3058 actual++;
3059 }
3060 }
3061 mount_list_unlock();
3062 return (actual);
3063
3064 }
3065
3066 /*
3067 * fill in the array of fsid_t's up to a max of 'count', the actual
3068 * number filled in will be set in '*actual'. If there are more fsid_t's
3069 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3070 * have the actual count.
3071 * having *actual filled out even in the error case is depended upon.
3072 */
3073 static int
3074 sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual)
3075 {
3076 struct mount *mp;
3077
3078 *actual = 0;
3079 mount_list_lock();
3080 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3081 (*actual)++;
3082 if (*actual <= count)
3083 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
3084 }
3085 mount_list_unlock();
3086 return (*actual <= count ? 0 : ENOMEM);
3087 }
3088
3089 static int
3090 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
3091 __unused int arg2, struct sysctl_req *req)
3092 {
3093 int actual, error;
3094 size_t space;
3095 fsid_t *fsidlst;
3096
3097 /* This is a readonly node. */
3098 if (req->newptr != USER_ADDR_NULL)
3099 return (EPERM);
3100
3101 /* they are querying us so just return the space required. */
3102 if (req->oldptr == USER_ADDR_NULL) {
3103 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3104 return 0;
3105 }
3106 again:
3107 /*
3108 * Retrieve an accurate count of the amount of space required to copy
3109 * out all the fsids in the system.
3110 */
3111 space = req->oldlen;
3112 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
3113
3114 /* they didn't give us enough space. */
3115 if (space < req->oldlen)
3116 return (ENOMEM);
3117
3118 MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
3119 if (fsidlst == NULL) {
3120 return (ENOMEM);
3121 }
3122
3123 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
3124 &actual);
3125 /*
3126 * If we get back ENOMEM, then another mount has been added while we
3127 * slept in malloc above. If this is the case then try again.
3128 */
3129 if (error == ENOMEM) {
3130 FREE(fsidlst, M_TEMP);
3131 req->oldlen = space;
3132 goto again;
3133 }
3134 if (error == 0) {
3135 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
3136 }
3137 FREE(fsidlst, M_TEMP);
3138 return (error);
3139 }
3140
3141 /*
3142 * Do a sysctl by fsid.
3143 */
3144 static int
3145 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3146 struct sysctl_req *req)
3147 {
3148 union union_vfsidctl vc;
3149 struct mount *mp;
3150 struct vfsstatfs *sp;
3151 int *name, flags, namelen;
3152 int error=0, gotref=0;
3153 vfs_context_t ctx = vfs_context_current();
3154 proc_t p = req->p; /* XXX req->p != current_proc()? */
3155 boolean_t is_64_bit;
3156
3157 name = arg1;
3158 namelen = arg2;
3159 is_64_bit = proc_is64bit(p);
3160
3161 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
3162 if (error)
3163 goto out;
3164 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
3165 error = EINVAL;
3166 goto out;
3167 }
3168 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
3169 if (mp == NULL) {
3170 error = ENOENT;
3171 goto out;
3172 }
3173 gotref = 1;
3174 /* reset so that the fs specific code can fetch it. */
3175 req->newidx = 0;
3176 /*
3177 * Note if this is a VFS_CTL then we pass the actual sysctl req
3178 * in for "oldp" so that the lower layer can DTRT and use the
3179 * SYSCTL_IN/OUT routines.
3180 */
3181 if (mp->mnt_op->vfs_sysctl != NULL) {
3182 if (is_64_bit) {
3183 if (vfs_64bitready(mp)) {
3184 error = mp->mnt_op->vfs_sysctl(name, namelen,
3185 CAST_USER_ADDR_T(req),
3186 NULL, USER_ADDR_NULL, 0,
3187 ctx);
3188 }
3189 else {
3190 error = ENOTSUP;
3191 }
3192 }
3193 else {
3194 error = mp->mnt_op->vfs_sysctl(name, namelen,
3195 CAST_USER_ADDR_T(req),
3196 NULL, USER_ADDR_NULL, 0,
3197 ctx);
3198 }
3199 if (error != ENOTSUP) {
3200 goto out;
3201 }
3202 }
3203 switch (name[0]) {
3204 case VFS_CTL_UMOUNT:
3205 req->newidx = 0;
3206 if (is_64_bit) {
3207 req->newptr = vc.vc64.vc_ptr;
3208 req->newlen = (size_t)vc.vc64.vc_len;
3209 }
3210 else {
3211 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3212 req->newlen = vc.vc32.vc_len;
3213 }
3214 error = SYSCTL_IN(req, &flags, sizeof(flags));
3215 if (error)
3216 break;
3217
3218 mount_ref(mp, 0);
3219 mount_iterdrop(mp);
3220 gotref = 0;
3221 /* safedounmount consumes a ref */
3222 error = safedounmount(mp, flags, ctx);
3223 break;
3224 case VFS_CTL_STATFS:
3225 req->newidx = 0;
3226 if (is_64_bit) {
3227 req->newptr = vc.vc64.vc_ptr;
3228 req->newlen = (size_t)vc.vc64.vc_len;
3229 }
3230 else {
3231 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
3232 req->newlen = vc.vc32.vc_len;
3233 }
3234 error = SYSCTL_IN(req, &flags, sizeof(flags));
3235 if (error)
3236 break;
3237 sp = &mp->mnt_vfsstat;
3238 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
3239 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT)))
3240 goto out;
3241 if (is_64_bit) {
3242 struct user64_statfs sfs;
3243 bzero(&sfs, sizeof(sfs));
3244 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3245 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3246 sfs.f_bsize = (user64_long_t)sp->f_bsize;
3247 sfs.f_iosize = (user64_long_t)sp->f_iosize;
3248 sfs.f_blocks = (user64_long_t)sp->f_blocks;
3249 sfs.f_bfree = (user64_long_t)sp->f_bfree;
3250 sfs.f_bavail = (user64_long_t)sp->f_bavail;
3251 sfs.f_files = (user64_long_t)sp->f_files;
3252 sfs.f_ffree = (user64_long_t)sp->f_ffree;
3253 sfs.f_fsid = sp->f_fsid;
3254 sfs.f_owner = sp->f_owner;
3255
3256 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3257 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3258 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3259
3260 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3261 }
3262 else {
3263 struct user32_statfs sfs;
3264 bzero(&sfs, sizeof(sfs));
3265 sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3266 sfs.f_type = mp->mnt_vtable->vfc_typenum;
3267
3268 /*
3269 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3270 * have to fudge the numbers here in that case. We inflate the blocksize in order
3271 * to reflect the filesystem size as best we can.
3272 */
3273 if (sp->f_blocks > INT_MAX) {
3274 int shift;
3275
3276 /*
3277 * Work out how far we have to shift the block count down to make it fit.
3278 * Note that it's possible to have to shift so far that the resulting
3279 * blocksize would be unreportably large. At that point, we will clip
3280 * any values that don't fit.
3281 *
3282 * For safety's sake, we also ensure that f_iosize is never reported as
3283 * being smaller than f_bsize.
3284 */
3285 for (shift = 0; shift < 32; shift++) {
3286 if ((sp->f_blocks >> shift) <= INT_MAX)
3287 break;
3288 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3289 break;
3290 }
3291 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3292 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3293 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
3294 sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
3295 #undef __SHIFT_OR_CLIP
3296 sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift);
3297 sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize);
3298 } else {
3299 sfs.f_bsize = (user32_long_t)sp->f_bsize;
3300 sfs.f_iosize = (user32_long_t)sp->f_iosize;
3301 sfs.f_blocks = (user32_long_t)sp->f_blocks;
3302 sfs.f_bfree = (user32_long_t)sp->f_bfree;
3303 sfs.f_bavail = (user32_long_t)sp->f_bavail;
3304 }
3305 sfs.f_files = (user32_long_t)sp->f_files;
3306 sfs.f_ffree = (user32_long_t)sp->f_ffree;
3307 sfs.f_fsid = sp->f_fsid;
3308 sfs.f_owner = sp->f_owner;
3309
3310 strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN);
3311 strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN);
3312 strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN);
3313
3314 error = SYSCTL_OUT(req, &sfs, sizeof(sfs));
3315 }
3316 break;
3317 default:
3318 error = ENOTSUP;
3319 goto out;
3320 }
3321 out:
3322 if(gotref != 0)
3323 mount_iterdrop(mp);
3324 return (error);
3325 }
3326
3327 static int filt_fsattach(struct knote *kn);
3328 static void filt_fsdetach(struct knote *kn);
3329 static int filt_fsevent(struct knote *kn, long hint);
3330 struct filterops fs_filtops = {
3331 .f_attach = filt_fsattach,
3332 .f_detach = filt_fsdetach,
3333 .f_event = filt_fsevent,
3334 };
3335
3336 static int
3337 filt_fsattach(struct knote *kn)
3338 {
3339
3340 lck_mtx_lock(fs_klist_lock);
3341 kn->kn_flags |= EV_CLEAR;
3342 KNOTE_ATTACH(&fs_klist, kn);
3343 lck_mtx_unlock(fs_klist_lock);
3344 return (0);
3345 }
3346
3347 static void
3348 filt_fsdetach(struct knote *kn)
3349 {
3350 lck_mtx_lock(fs_klist_lock);
3351 KNOTE_DETACH(&fs_klist, kn);
3352 lck_mtx_unlock(fs_klist_lock);
3353 }
3354
3355 static int
3356 filt_fsevent(struct knote *kn, long hint)
3357 {
3358 /*
3359 * Backwards compatibility:
3360 * Other filters would do nothing if kn->kn_sfflags == 0
3361 */
3362
3363 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
3364 kn->kn_fflags |= hint;
3365 }
3366
3367 return (kn->kn_fflags != 0);
3368 }
3369
3370 static int
3371 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
3372 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3373 {
3374 int out, error;
3375 pid_t pid;
3376 proc_t p;
3377
3378 /* We need a pid. */
3379 if (req->newptr == USER_ADDR_NULL)
3380 return (EINVAL);
3381
3382 error = SYSCTL_IN(req, &pid, sizeof(pid));
3383 if (error)
3384 return (error);
3385
3386 p = proc_find(pid < 0 ? -pid : pid);
3387 if (p == NULL)
3388 return (ESRCH);
3389
3390 /*
3391 * Fetching the value is ok, but we only fetch if the old
3392 * pointer is given.
3393 */
3394 if (req->oldptr != USER_ADDR_NULL) {
3395 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
3396 proc_rele(p);
3397 error = SYSCTL_OUT(req, &out, sizeof(out));
3398 return (error);
3399 }
3400
3401 /* cansignal offers us enough security. */
3402 if (p != req->p && proc_suser(req->p) != 0) {
3403 proc_rele(p);
3404 return (EPERM);
3405 }
3406
3407 if (pid < 0)
3408 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
3409 else
3410 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
3411 proc_rele(p);
3412
3413 return (0);
3414 }
3415
3416 /* the vfs.generic. branch. */
3417 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge");
3418 /* retreive a list of mounted filesystem fsid_t */
3419 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD,
3420 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
3421 /* perform operations on filesystem via fsid_t */
3422 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED,
3423 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
3424 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY,
3425 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
3426
3427
3428 long num_reusedvnodes = 0;
3429
3430 static int
3431 new_vnode(vnode_t *vpp)
3432 {
3433 vnode_t vp;
3434 int retries = 0; /* retry incase of tablefull */
3435 int force_alloc = 0, walk_count = 0;
3436 unsigned int vpid;
3437 struct timespec ts;
3438 struct timeval current_tv;
3439 #ifndef __LP64__
3440 struct unsafe_fsnode *l_unsafefs = 0;
3441 #endif /* __LP64__ */
3442 proc_t curproc = current_proc();
3443
3444 retry:
3445 microuptime(&current_tv);
3446
3447 vp = NULLVP;
3448
3449 vnode_list_lock();
3450
3451 if ( !TAILQ_EMPTY(&vnode_dead_list)) {
3452 /*
3453 * Can always reuse a dead one
3454 */
3455 vp = TAILQ_FIRST(&vnode_dead_list);
3456 goto steal_this_vp;
3457 }
3458 /*
3459 * no dead vnodes available... if we're under
3460 * the limit, we'll create a new vnode
3461 */
3462 if (numvnodes < desiredvnodes || force_alloc) {
3463 numvnodes++;
3464 vnode_list_unlock();
3465
3466 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3467 bzero((char *)vp, sizeof(*vp));
3468 VLISTNONE(vp); /* avoid double queue removal */
3469 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3470
3471 klist_init(&vp->v_knotes);
3472 nanouptime(&ts);
3473 vp->v_id = ts.tv_nsec;
3474 vp->v_flag = VSTANDARD;
3475
3476 #if CONFIG_MACF
3477 if (mac_vnode_label_init_needed(vp))
3478 mac_vnode_label_init(vp);
3479 #endif /* MAC */
3480
3481 vp->v_iocount = 1;
3482 goto done;
3483 }
3484
3485 #define MAX_WALK_COUNT 1000
3486
3487 if ( !TAILQ_EMPTY(&vnode_rage_list) &&
3488 (ragevnodes >= rage_limit ||
3489 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
3490
3491 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3492 if ( !(vp->v_listflag & VLIST_RAGE))
3493 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3494
3495 // if we're a dependency-capable process, skip vnodes that can
3496 // cause recycling deadlocks. (i.e. this process is diskimages
3497 // helper and the vnode is in a disk image).
3498 //
3499 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3500 break;
3501 }
3502
3503 // don't iterate more than MAX_WALK_COUNT vnodes to
3504 // avoid keeping the vnode list lock held for too long.
3505 if (walk_count++ > MAX_WALK_COUNT) {
3506 vp = NULL;
3507 break;
3508 }
3509 }
3510
3511 }
3512
3513 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3514 /*
3515 * Pick the first vp for possible reuse
3516 */
3517 walk_count = 0;
3518 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3519 // if we're a dependency-capable process, skip vnodes that can
3520 // cause recycling deadlocks. (i.e. this process is diskimages
3521 // helper and the vnode is in a disk image)
3522 //
3523 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
3524 break;
3525 }
3526
3527 // don't iterate more than MAX_WALK_COUNT vnodes to
3528 // avoid keeping the vnode list lock held for too long.
3529 if (walk_count++ > MAX_WALK_COUNT) {
3530 vp = NULL;
3531 break;
3532 }
3533 }
3534
3535 }
3536
3537 //
3538 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
3539 // then we're trying to create a vnode on behalf of a
3540 // process like diskimages-helper that has file systems
3541 // mounted on top of itself (and thus we can't reclaim
3542 // vnodes in the file systems on top of us). if we can't
3543 // find a vnode to reclaim then we'll just have to force
3544 // the allocation.
3545 //
3546 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
3547 force_alloc = 1;
3548 vnode_list_unlock();
3549 goto retry;
3550 }
3551
3552 if (vp == NULL) {
3553 /*
3554 * we've reached the system imposed maximum number of vnodes
3555 * but there isn't a single one available
3556 * wait a bit and then retry... if we can't get a vnode
3557 * after 100 retries, than log a complaint
3558 */
3559 if (++retries <= 100) {
3560 vnode_list_unlock();
3561 delay_for_interval(1, 1000 * 1000);
3562 goto retry;
3563 }
3564
3565 vnode_list_unlock();
3566 tablefull("vnode");
3567 log(LOG_EMERG, "%d desired, %d numvnodes, "
3568 "%d free, %d dead, %d rage\n",
3569 desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
3570 #if CONFIG_EMBEDDED
3571 /*
3572 * Running out of vnodes tends to make a system unusable. Start killing
3573 * processes that jetsam knows are killable.
3574 */
3575 if (jetsam_kill_top_proc() < 0) {
3576 /*
3577 * If jetsam can't find any more processes to kill and there
3578 * still aren't any free vnodes, panic. Hopefully we'll get a
3579 * panic log to tell us why we ran out.
3580 */
3581 panic("vnode table is full\n");
3582 }
3583
3584 delay_for_interval(1, 1000 * 1000);
3585 goto retry;
3586 #endif
3587
3588 *vpp = NULL;
3589 return (ENFILE);
3590 }
3591 steal_this_vp:
3592 vpid = vp->v_id;
3593
3594 vnode_list_remove_locked(vp);
3595
3596 vnode_list_unlock();
3597
3598 vnode_lock_spin(vp);
3599
3600 /*
3601 * We could wait for the vnode_lock after removing the vp from the freelist
3602 * and the vid is bumped only at the very end of reclaim. So it is possible
3603 * that we are looking at a vnode that is being terminated. If so skip it.
3604 */
3605 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3606 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3607 /*
3608 * we lost the race between dropping the list lock
3609 * and picking up the vnode_lock... someone else
3610 * used this vnode and it is now in a new state
3611 * so we need to go back and try again
3612 */
3613 vnode_unlock(vp);
3614 goto retry;
3615 }
3616 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3617 /*
3618 * we did a vnode_rele_ext that asked for
3619 * us not to reenter the filesystem during
3620 * the release even though VL_NEEDINACTIVE was
3621 * set... we'll do it here by doing a
3622 * vnode_get/vnode_put
3623 *
3624 * pick up an iocount so that we can call
3625 * vnode_put and drive the VNOP_INACTIVE...
3626 * vnode_put will either leave us off
3627 * the freelist if a new ref comes in,
3628 * or put us back on the end of the freelist
3629 * or recycle us if we were marked for termination...
3630 * so we'll just go grab a new candidate
3631 */
3632 vp->v_iocount++;
3633 #ifdef JOE_DEBUG
3634 record_vp(vp, 1);
3635 #endif
3636 vnode_put_locked(vp);
3637 vnode_unlock(vp);
3638 goto retry;
3639 }
3640 OSAddAtomicLong(1, &num_reusedvnodes);
3641
3642 /* Checks for anyone racing us for recycle */
3643 if (vp->v_type != VBAD) {
3644 if (vp->v_lflag & VL_DEAD)
3645 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3646 vnode_lock_convert(vp);
3647 (void)vnode_reclaim_internal(vp, 1, 1, 0);
3648
3649 if ((VONLIST(vp)))
3650 panic("new_vnode(%p): vp on list", vp);
3651 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3652 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3653 panic("new_vnode(%p): free vnode still referenced", vp);
3654 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3655 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3656 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3657 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3658 }
3659
3660 #ifndef __LP64__
3661 if (vp->v_unsafefs) {
3662 l_unsafefs = vp->v_unsafefs;
3663 vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
3664 }
3665 #endif /* __LP64__ */
3666
3667 #if CONFIG_MACF
3668 /*
3669 * We should never see VL_LABELWAIT or VL_LABEL here.
3670 * as those operations hold a reference.
3671 */
3672 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
3673 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
3674 if (vp->v_lflag & VL_LABELED) {
3675 vnode_lock_convert(vp);
3676 mac_vnode_label_recycle(vp);
3677 } else if (mac_vnode_label_init_needed(vp)) {
3678 vnode_lock_convert(vp);
3679 mac_vnode_label_init(vp);
3680 }
3681
3682 #endif /* MAC */
3683
3684 vp->v_iocount = 1;
3685 vp->v_lflag = 0;
3686 vp->v_writecount = 0;
3687 vp->v_references = 0;
3688 vp->v_iterblkflags = 0;
3689 vp->v_flag = VSTANDARD;
3690 /* vbad vnodes can point to dead_mountp */
3691 vp->v_mount = NULL;
3692 vp->v_defer_reclaimlist = (vnode_t)0;
3693
3694 vnode_unlock(vp);
3695
3696 #ifndef __LP64__
3697 if (l_unsafefs) {
3698 lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
3699 FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
3700 }
3701 #endif /* __LP64__ */
3702
3703 done:
3704 *vpp = vp;
3705
3706 return (0);
3707 }
3708
3709 void
3710 vnode_lock(vnode_t vp)
3711 {
3712 lck_mtx_lock(&vp->v_lock);
3713 }
3714
3715 void
3716 vnode_lock_spin(vnode_t vp)
3717 {
3718 lck_mtx_lock_spin(&vp->v_lock);
3719 }
3720
3721 void
3722 vnode_unlock(vnode_t vp)
3723 {
3724 lck_mtx_unlock(&vp->v_lock);
3725 }
3726
3727
3728
3729 int
3730 vnode_get(struct vnode *vp)
3731 {
3732 int retval;
3733
3734 vnode_lock_spin(vp);
3735 retval = vnode_get_locked(vp);
3736 vnode_unlock(vp);
3737
3738 return(retval);
3739 }
3740
3741 int
3742 vnode_get_locked(struct vnode *vp)
3743 {
3744 #if DIAGNOSTIC
3745 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3746 #endif
3747 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
3748 return(ENOENT);
3749 }
3750 vp->v_iocount++;
3751 #ifdef JOE_DEBUG
3752 record_vp(vp, 1);
3753 #endif
3754 return (0);
3755 }
3756
3757 int
3758 vnode_getwithvid(vnode_t vp, uint32_t vid)
3759 {
3760 return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID)));
3761 }
3762
3763 int
3764 vnode_getwithref(vnode_t vp)
3765 {
3766 return(vget_internal(vp, 0, 0));
3767 }
3768
3769
3770 __private_extern__ int
3771 vnode_getalways(vnode_t vp)
3772 {
3773 return(vget_internal(vp, 0, VNODE_ALWAYS));
3774 }
3775
3776 int
3777 vnode_put(vnode_t vp)
3778 {
3779 int retval;
3780
3781 vnode_lock_spin(vp);
3782 retval = vnode_put_locked(vp);
3783 vnode_unlock(vp);
3784
3785 return(retval);
3786 }
3787
3788 int
3789 vnode_put_locked(vnode_t vp)
3790 {
3791 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
3792
3793 #if DIAGNOSTIC
3794 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
3795 #endif
3796 retry:
3797 if (vp->v_iocount < 1)
3798 panic("vnode_put(%p): iocount < 1", vp);
3799
3800 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3801 vnode_dropiocount(vp);
3802 return(0);
3803 }
3804 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
3805
3806 vp->v_lflag &= ~VL_NEEDINACTIVE;
3807 vnode_unlock(vp);
3808
3809 VNOP_INACTIVE(vp, ctx);
3810
3811 vnode_lock_spin(vp);
3812 /*
3813 * because we had to drop the vnode lock before calling
3814 * VNOP_INACTIVE, the state of this vnode may have changed...
3815 * we may pick up both VL_MARTERM and either
3816 * an iocount or a usecount while in the VNOP_INACTIVE call
3817 * we don't want to call vnode_reclaim_internal on a vnode
3818 * that has active references on it... so loop back around
3819 * and reevaluate the state
3820 */
3821 goto retry;
3822 }
3823 vp->v_lflag &= ~VL_NEEDINACTIVE;
3824
3825 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
3826 vnode_lock_convert(vp);
3827 vnode_reclaim_internal(vp, 1, 1, 0);
3828 }
3829 vnode_dropiocount(vp);
3830 vnode_list_add(vp);
3831
3832 return(0);
3833 }
3834
3835 /* is vnode_t in use by others? */
3836 int
3837 vnode_isinuse(vnode_t vp, int refcnt)
3838 {
3839 return(vnode_isinuse_locked(vp, refcnt, 0));
3840 }
3841
3842
3843 static int
3844 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
3845 {
3846 int retval = 0;
3847
3848 if (!locked)
3849 vnode_lock_spin(vp);
3850 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
3851 retval = 1;
3852 goto out;
3853 }
3854 if (vp->v_type == VREG) {
3855 retval = ubc_isinuse_locked(vp, refcnt, 1);
3856 }
3857
3858 out:
3859 if (!locked)
3860 vnode_unlock(vp);
3861 return(retval);
3862 }
3863
3864
3865 /* resume vnode_t */
3866 errno_t
3867 vnode_resume(vnode_t vp)
3868 {
3869 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
3870
3871 vnode_lock_spin(vp);
3872 vp->v_lflag &= ~VL_SUSPENDED;
3873 vp->v_owner = NULL;
3874 vnode_unlock(vp);
3875
3876 wakeup(&vp->v_iocount);
3877 }
3878 return(0);
3879 }
3880
3881 /* suspend vnode_t
3882 * Please do not use on more than one vnode at a time as it may
3883 * cause deadlocks.
3884 * xxx should we explicity prevent this from happening?
3885 */
3886
3887 errno_t
3888 vnode_suspend(vnode_t vp)
3889 {
3890 if (vp->v_lflag & VL_SUSPENDED) {
3891 return(EBUSY);
3892 }
3893
3894 vnode_lock_spin(vp);
3895
3896 /*
3897 * xxx is this sufficient to check if a vnode_drain is
3898 * progress?
3899 */
3900
3901 if (vp->v_owner == NULL) {
3902 vp->v_lflag |= VL_SUSPENDED;
3903 vp->v_owner = current_thread();
3904 }
3905 vnode_unlock(vp);
3906
3907 return(0);
3908 }
3909
3910
3911
3912 static errno_t
3913 vnode_drain(vnode_t vp)
3914 {
3915
3916 if (vp->v_lflag & VL_DRAIN) {
3917 panic("vnode_drain: recursuve drain");
3918 return(ENOENT);
3919 }
3920 vp->v_lflag |= VL_DRAIN;
3921 vp->v_owner = current_thread();
3922
3923 while (vp->v_iocount > 1)
3924 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
3925 return(0);
3926 }
3927
3928
3929 /*
3930 * if the number of recent references via vnode_getwithvid or vnode_getwithref
3931 * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from
3932 * the LRU list if it's currently on it... once the iocount and usecount both drop
3933 * to 0, it will get put back on the end of the list, effectively making it younger
3934 * this allows us to keep actively referenced vnodes in the list without having
3935 * to constantly remove and add to the list each time a vnode w/o a usecount is
3936 * referenced which costs us taking and dropping a global lock twice.
3937 */
3938 #define UNAGE_THRESHHOLD 25
3939
3940 static errno_t
3941 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
3942 {
3943 int nodead = vflags & VNODE_NODEAD;
3944 int nosusp = vflags & VNODE_NOSUSPEND;
3945 int always = vflags & VNODE_ALWAYS;
3946
3947 for (;;) {
3948 /*
3949 * if it is a dead vnode with deadfs
3950 */
3951 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
3952 return(ENOENT);
3953 }
3954 /*
3955 * will return VL_DEAD ones
3956 */
3957 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
3958 break;
3959 }
3960 /*
3961 * if suspended vnodes are to be failed
3962 */
3963 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
3964 return(ENOENT);
3965 }
3966 /*
3967 * if you are the owner of drain/suspend/termination , can acquire iocount
3968 * check for VL_TERMINATE; it does not set owner
3969 */
3970 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
3971 (vp->v_owner == current_thread())) {
3972 break;
3973 }
3974
3975 if (always != 0)
3976 break;
3977 vnode_lock_convert(vp);
3978
3979 if (vp->v_lflag & VL_TERMINATE) {
3980 vp->v_lflag |= VL_TERMWANT;
3981
3982 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
3983 } else
3984 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
3985 }
3986 if (vid != vp->v_id) {
3987 return(ENOENT);
3988 }
3989 if (++vp->v_references >= UNAGE_THRESHHOLD) {
3990 vp->v_references = 0;
3991 vnode_list_remove(vp);
3992 }
3993 vp->v_iocount++;
3994 #ifdef JOE_DEBUG
3995 record_vp(vp, 1);
3996 #endif
3997 return(0);
3998 }
3999
4000 static void
4001 vnode_dropiocount (vnode_t vp)
4002 {
4003 if (vp->v_iocount < 1)
4004 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
4005
4006 vp->v_iocount--;
4007 #ifdef JOE_DEBUG
4008 record_vp(vp, -1);
4009 #endif
4010 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
4011 wakeup(&vp->v_iocount);
4012 }
4013
4014
4015 void
4016 vnode_reclaim(struct vnode * vp)
4017 {
4018 vnode_reclaim_internal(vp, 0, 0, 0);
4019 }
4020
4021 __private_extern__
4022 void
4023 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4024 {
4025 int isfifo = 0;
4026
4027 if (!locked)
4028 vnode_lock(vp);
4029
4030 if (vp->v_lflag & VL_TERMINATE) {
4031 panic("vnode reclaim in progress");
4032 }
4033 vp->v_lflag |= VL_TERMINATE;
4034
4035 vn_clearunionwait(vp, 1);
4036
4037 vnode_drain(vp);
4038
4039 isfifo = (vp->v_type == VFIFO);
4040
4041 if (vp->v_type != VBAD)
4042 vgone(vp, flags); /* clean and reclaim the vnode */
4043
4044 /*
4045 * give the vnode a new identity so that vnode_getwithvid will fail
4046 * on any stale cache accesses...
4047 * grab the list_lock so that if we're in "new_vnode"
4048 * behind the list_lock trying to steal this vnode, the v_id is stable...
4049 * once new_vnode drops the list_lock, it will block trying to take
4050 * the vnode lock until we release it... at that point it will evaluate
4051 * whether the v_vid has changed
4052 * also need to make sure that the vnode isn't on a list where "new_vnode"
4053 * can find it after the v_id has been bumped until we are completely done
4054 * with the vnode (i.e. putting it back on a list has to be the very last
4055 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4056 * are holding an io_count on the vnode... they need to drop the io_count
4057 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4058 * they are completely done with the vnode
4059 */
4060 vnode_list_lock();
4061
4062 vnode_list_remove_locked(vp);
4063 vp->v_id++;
4064
4065 vnode_list_unlock();
4066
4067 if (isfifo) {
4068 struct fifoinfo * fip;
4069
4070 fip = vp->v_fifoinfo;
4071 vp->v_fifoinfo = NULL;
4072 FREE(fip, M_TEMP);
4073 }
4074 vp->v_type = VBAD;
4075
4076 if (vp->v_data)
4077 panic("vnode_reclaim_internal: cleaned vnode isn't");
4078 if (vp->v_numoutput)
4079 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4080 if (UBCINFOEXISTS(vp))
4081 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4082 if (vp->v_parent)
4083 panic("vnode_reclaim_internal: vparent not removed");
4084 if (vp->v_name)
4085 panic("vnode_reclaim_internal: vname not removed");
4086
4087 vp->v_socket = NULL;
4088
4089 vp->v_lflag &= ~VL_TERMINATE;
4090 vp->v_lflag &= ~VL_DRAIN;
4091 vp->v_owner = NULL;
4092
4093 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4094
4095 /* Make sure that when we reuse the vnode, no knotes left over */
4096 klist_init(&vp->v_knotes);
4097
4098 if (vp->v_lflag & VL_TERMWANT) {
4099 vp->v_lflag &= ~VL_TERMWANT;
4100 wakeup(&vp->v_lflag);
4101 }
4102 if (!reuse) {
4103 /*
4104 * make sure we get on the
4105 * dead list if appropriate
4106 */
4107 vnode_list_add(vp);
4108 }
4109 if (!locked)
4110 vnode_unlock(vp);
4111 }
4112
4113 /* USAGE:
4114 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4115 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4116 * is obsoleted by this.
4117 * vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
4118 */
4119 int
4120 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
4121 {
4122 int error;
4123 int insert = 1;
4124 vnode_t vp;
4125 vnode_t nvp;
4126 vnode_t dvp;
4127 struct uthread *ut;
4128 struct componentname *cnp;
4129 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
4130
4131 if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) {
4132 if ( (error = new_vnode(&vp)) ) {
4133 return(error);
4134 } else {
4135 dvp = param->vnfs_dvp;
4136 cnp = param->vnfs_cnp;
4137
4138 vp->v_op = param->vnfs_vops;
4139 vp->v_type = param->vnfs_vtype;
4140 vp->v_data = param->vnfs_fsnode;
4141
4142 if (param->vnfs_markroot)
4143 vp->v_flag |= VROOT;
4144 if (param->vnfs_marksystem)
4145 vp->v_flag |= VSYSTEM;
4146 if (vp->v_type == VREG) {
4147 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4148 if (error) {
4149 #ifdef JOE_DEBUG
4150 record_vp(vp, 1);
4151 #endif
4152 vp->v_mount = NULL;
4153 vp->v_op = dead_vnodeop_p;
4154 vp->v_tag = VT_NON;
4155 vp->v_data = NULL;
4156 vp->v_type = VBAD;
4157 vp->v_lflag |= VL_DEAD;
4158
4159 vnode_put(vp);
4160 return(error);
4161 }
4162 }
4163 #ifdef JOE_DEBUG
4164 record_vp(vp, 1);
4165 #endif
4166 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4167
4168 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4169
4170 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4171 /*
4172 * if checkalias returns a vnode, it will be locked
4173 *
4174 * first get rid of the unneeded vnode we acquired
4175 */
4176 vp->v_data = NULL;
4177 vp->v_op = spec_vnodeop_p;
4178 vp->v_type = VBAD;
4179 vp->v_lflag = VL_DEAD;
4180 vp->v_data = NULL;
4181 vp->v_tag = VT_NON;
4182 vnode_put(vp);
4183
4184 /*
4185 * switch to aliased vnode and finish
4186 * preparing it
4187 */
4188 vp = nvp;
4189
4190 vclean(vp, 0);
4191 vp->v_op = param->vnfs_vops;
4192 vp->v_type = param->vnfs_vtype;
4193 vp->v_data = param->vnfs_fsnode;
4194 vp->v_lflag = 0;
4195 vp->v_mount = NULL;
4196 insmntque(vp, param->vnfs_mp);
4197 insert = 0;
4198 vnode_unlock(vp);
4199 }
4200 }
4201
4202 if (vp->v_type == VFIFO) {
4203 struct fifoinfo *fip;
4204
4205 MALLOC(fip, struct fifoinfo *,
4206 sizeof(*fip), M_TEMP, M_WAITOK);
4207 bzero(fip, sizeof(struct fifoinfo ));
4208 vp->v_fifoinfo = fip;
4209 }
4210 /* The file systems must pass the address of the location where
4211 * they store the vnode pointer. When we add the vnode into the mount
4212 * list and name cache they become discoverable. So the file system node
4213 * must have the connection to vnode setup by then
4214 */
4215 *vpp = vp;
4216
4217 /* Add fs named reference. */
4218 if (param->vnfs_flags & VNFS_ADDFSREF) {
4219 vp->v_lflag |= VNAMED_FSHASH;
4220 }
4221 if (param->vnfs_mp) {
4222 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)
4223 vp->v_flag |= VLOCKLOCAL;
4224 if (insert) {
4225 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4226 panic("insmntque: vp on the free list\n");
4227 /*
4228 * enter in mount vnode list
4229 */
4230 insmntque(vp, param->vnfs_mp);
4231 }
4232 #ifndef __LP64__
4233 if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
4234 MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
4235 sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
4236 vp->v_unsafefs->fsnode_count = 0;
4237 vp->v_unsafefs->fsnodeowner = (void *)NULL;
4238 lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
4239 }
4240 #endif /* __LP64__ */
4241 }
4242 if (dvp && vnode_ref(dvp) == 0) {
4243 vp->v_parent = dvp;
4244 }
4245 if (cnp) {
4246 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
4247 /*
4248 * enter into name cache
4249 * we've got the info to enter it into the name cache now
4250 * cache_enter_create will pick up an extra reference on
4251 * the name entered into the string cache
4252 */
4253 vp->v_name = cache_enter_create(dvp, vp, cnp);
4254 } else
4255 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4256
4257 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED)
4258 vp->v_flag |= VISUNION;
4259 }
4260 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
4261 /*
4262 * this vnode is being created as cacheable in the name cache
4263 * this allows us to re-enter it in the cache
4264 */
4265 vp->v_flag |= VNCACHEABLE;
4266 }
4267 ut = get_bsdthread_info(current_thread());
4268
4269 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
4270 (ut->uu_flag & UT_RAGE_VNODES)) {
4271 /*
4272 * process has indicated that it wants any
4273 * vnodes created on its behalf to be rapidly
4274 * aged to reduce the impact on the cached set
4275 * of vnodes
4276 */
4277 vp->v_flag |= VRAGE;
4278 }
4279 return(0);
4280 }
4281 }
4282 return (EINVAL);
4283 }
4284
4285 int
4286 vnode_addfsref(vnode_t vp)
4287 {
4288 vnode_lock_spin(vp);
4289 if (vp->v_lflag & VNAMED_FSHASH)
4290 panic("add_fsref: vp already has named reference");
4291 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4292 panic("addfsref: vp on the free list\n");
4293 vp->v_lflag |= VNAMED_FSHASH;
4294 vnode_unlock(vp);
4295 return(0);
4296
4297 }
4298 int
4299 vnode_removefsref(vnode_t vp)
4300 {
4301 vnode_lock_spin(vp);
4302 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4303 panic("remove_fsref: no named reference");
4304 vp->v_lflag &= ~VNAMED_FSHASH;
4305 vnode_unlock(vp);
4306 return(0);
4307
4308 }
4309
4310
4311 int
4312 vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg)
4313 {
4314 mount_t mp;
4315 int ret = 0;
4316 fsid_t * fsid_list;
4317 int count, actualcount, i;
4318 void * allocmem;
4319
4320 count = mount_getvfscnt();
4321 count += 10;
4322
4323 fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t));
4324 allocmem = (void *)fsid_list;
4325
4326 actualcount = mount_fillfsids(fsid_list, count);
4327
4328 for (i=0; i< actualcount; i++) {
4329
4330 /* obtain the mount point with iteration reference */
4331 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
4332
4333 if(mp == (struct mount *)0)
4334 continue;
4335 mount_lock(mp);
4336 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
4337 mount_unlock(mp);
4338 mount_iterdrop(mp);
4339 continue;
4340
4341 }
4342 mount_unlock(mp);
4343
4344 /* iterate over all the vnodes */
4345 ret = callout(mp, arg);
4346
4347 mount_iterdrop(mp);
4348
4349 switch (ret) {
4350 case VFS_RETURNED:
4351 case VFS_RETURNED_DONE:
4352 if (ret == VFS_RETURNED_DONE) {
4353 ret = 0;
4354 goto out;
4355 }
4356 break;
4357
4358 case VFS_CLAIMED_DONE:
4359 ret = 0;
4360 goto out;
4361 case VFS_CLAIMED:
4362 default:
4363 break;
4364 }
4365 ret = 0;
4366 }
4367
4368 out:
4369 kfree(allocmem, (count * sizeof(fsid_t)));
4370 return (ret);
4371 }
4372
4373 /*
4374 * Update the vfsstatfs structure in the mountpoint.
4375 * MAC: Parameter eventtype added, indicating whether the event that
4376 * triggered this update came from user space, via a system call
4377 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4378 */
4379 int
4380 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
4381 {
4382 struct vfs_attr va;
4383 int error;
4384
4385 /*
4386 * Request the attributes we want to propagate into
4387 * the per-mount vfsstat structure.
4388 */
4389 VFSATTR_INIT(&va);
4390 VFSATTR_WANTED(&va, f_iosize);
4391 VFSATTR_WANTED(&va, f_blocks);
4392 VFSATTR_WANTED(&va, f_bfree);
4393 VFSATTR_WANTED(&va, f_bavail);
4394 VFSATTR_WANTED(&va, f_bused);
4395 VFSATTR_WANTED(&va, f_files);
4396 VFSATTR_WANTED(&va, f_ffree);
4397 VFSATTR_WANTED(&va, f_bsize);
4398 VFSATTR_WANTED(&va, f_fssubtype);
4399 #if CONFIG_MACF
4400 if (eventtype == VFS_USER_EVENT) {
4401 error = mac_mount_check_getattr(ctx, mp, &va);
4402 if (error != 0)
4403 return (error);
4404 }
4405 #endif
4406
4407 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
4408 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
4409 return(error);
4410 }
4411
4412 /*
4413 * Unpack into the per-mount structure.
4414 *
4415 * We only overwrite these fields, which are likely to change:
4416 * f_blocks
4417 * f_bfree
4418 * f_bavail
4419 * f_bused
4420 * f_files
4421 * f_ffree
4422 *
4423 * And these which are not, but which the FS has no other way
4424 * of providing to us:
4425 * f_bsize
4426 * f_iosize
4427 * f_fssubtype
4428 *
4429 */
4430 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
4431 /* 4822056 - protect against malformed server mount */
4432 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
4433 } else {
4434 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
4435 }
4436 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
4437 mp->mnt_vfsstat.f_iosize = va.f_iosize;
4438 } else {
4439 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
4440 }
4441 if (VFSATTR_IS_SUPPORTED(&va, f_blocks))
4442 mp->mnt_vfsstat.f_blocks = va.f_blocks;
4443 if (VFSATTR_IS_SUPPORTED(&va, f_bfree))
4444 mp->mnt_vfsstat.f_bfree = va.f_bfree;
4445 if (VFSATTR_IS_SUPPORTED(&va, f_bavail))
4446 mp->mnt_vfsstat.f_bavail = va.f_bavail;
4447 if (VFSATTR_IS_SUPPORTED(&va, f_bused))
4448 mp->mnt_vfsstat.f_bused = va.f_bused;
4449 if (VFSATTR_IS_SUPPORTED(&va, f_files))
4450 mp->mnt_vfsstat.f_files = va.f_files;
4451 if (VFSATTR_IS_SUPPORTED(&va, f_ffree))
4452 mp->mnt_vfsstat.f_ffree = va.f_ffree;
4453
4454 /* this is unlikely to change, but has to be queried for */
4455 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype))
4456 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
4457
4458 return(0);
4459 }
4460
4461 int
4462 mount_list_add(mount_t mp)
4463 {
4464 int res;
4465
4466 mount_list_lock();
4467 if (system_inshutdown != 0) {
4468 res = -1;
4469 } else {
4470 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
4471 nummounts++;
4472 res = 0;
4473 }
4474 mount_list_unlock();
4475
4476 return res;
4477 }
4478
4479 void
4480 mount_list_remove(mount_t mp)
4481 {
4482 mount_list_lock();
4483 TAILQ_REMOVE(&mountlist, mp, mnt_list);
4484 nummounts--;
4485 mp->mnt_list.tqe_next = NULL;
4486 mp->mnt_list.tqe_prev = NULL;
4487 mount_list_unlock();
4488 }
4489
4490 mount_t
4491 mount_lookupby_volfsid(int volfs_id, int withref)
4492 {
4493 mount_t cur_mount = (mount_t)0;
4494 mount_t mp;
4495
4496 mount_list_lock();
4497 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4498 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
4499 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
4500 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
4501 cur_mount = mp;
4502 if (withref) {
4503 if (mount_iterref(cur_mount, 1)) {
4504 cur_mount = (mount_t)0;
4505 mount_list_unlock();
4506 goto out;
4507 }
4508 }
4509 break;
4510 }
4511 }
4512 mount_list_unlock();
4513 if (withref && (cur_mount != (mount_t)0)) {
4514 mp = cur_mount;
4515 if (vfs_busy(mp, LK_NOWAIT) != 0) {
4516 cur_mount = (mount_t)0;
4517 }
4518 mount_iterdrop(mp);
4519 }
4520 out:
4521 return(cur_mount);
4522 }
4523
4524 mount_t
4525 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
4526 {
4527 mount_t retmp = (mount_t)0;
4528 mount_t mp;
4529
4530 if (!locked)
4531 mount_list_lock();
4532 TAILQ_FOREACH(mp, &mountlist, mnt_list)
4533 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
4534 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
4535 retmp = mp;
4536 if (withref) {
4537 if (mount_iterref(retmp, 1))
4538 retmp = (mount_t)0;
4539 }
4540 goto out;
4541 }
4542 out:
4543 if (!locked)
4544 mount_list_unlock();
4545 return (retmp);
4546 }
4547
4548 errno_t
4549 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
4550 {
4551 struct nameidata nd;
4552 int error;
4553 u_int32_t ndflags = 0;
4554
4555 if (ctx == NULL) { /* XXX technically an error */
4556 ctx = vfs_context_current();
4557 }
4558
4559 if (flags & VNODE_LOOKUP_NOFOLLOW)
4560 ndflags = NOFOLLOW;
4561 else
4562 ndflags = FOLLOW;
4563
4564 if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
4565 ndflags |= NOCROSSMOUNT;
4566 if (flags & VNODE_LOOKUP_DOWHITEOUT)
4567 ndflags |= DOWHITEOUT;
4568
4569 /* XXX AUDITVNPATH1 needed ? */
4570 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4571
4572 if ((error = namei(&nd)))
4573 return (error);
4574 *vpp = nd.ni_vp;
4575 nameidone(&nd);
4576
4577 return (0);
4578 }
4579
4580 errno_t
4581 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
4582 {
4583 struct nameidata nd;
4584 int error;
4585 u_int32_t ndflags = 0;
4586 int lflags = flags;
4587
4588 if (ctx == NULL) { /* XXX technically an error */
4589 ctx = vfs_context_current();
4590 }
4591
4592 if (fmode & O_NOFOLLOW)
4593 lflags |= VNODE_LOOKUP_NOFOLLOW;
4594
4595 if (lflags & VNODE_LOOKUP_NOFOLLOW)
4596 ndflags = NOFOLLOW;
4597 else
4598 ndflags = FOLLOW;
4599
4600 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
4601 ndflags |= NOCROSSMOUNT;
4602 if (lflags & VNODE_LOOKUP_DOWHITEOUT)
4603 ndflags |= DOWHITEOUT;
4604
4605 /* XXX AUDITVNPATH1 needed ? */
4606 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
4607
4608 if ((error = vn_open(&nd, fmode, cmode)))
4609 *vpp = NULL;
4610 else
4611 *vpp = nd.ni_vp;
4612
4613 return (error);
4614 }
4615
4616 errno_t
4617 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
4618 {
4619 int error;
4620
4621 if (ctx == NULL) {
4622 ctx = vfs_context_current();
4623 }
4624
4625 error = vn_close(vp, flags, ctx);
4626 vnode_put(vp);
4627 return (error);
4628 }
4629
4630 /*
4631 * Returns: 0 Success
4632 * vnode_getattr:???
4633 */
4634 errno_t
4635 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
4636 {
4637 struct vnode_attr va;
4638 int error;
4639
4640 VATTR_INIT(&va);
4641 VATTR_WANTED(&va, va_data_size);
4642 error = vnode_getattr(vp, &va, ctx);
4643 if (!error)
4644 *sizep = va.va_data_size;
4645 return(error);
4646 }
4647
4648 errno_t
4649 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
4650 {
4651 struct vnode_attr va;
4652
4653 VATTR_INIT(&va);
4654 VATTR_SET(&va, va_data_size, size);
4655 va.va_vaflags = ioflag & 0xffff;
4656 return(vnode_setattr(vp, &va, ctx));
4657 }
4658
4659 /*
4660 * Create a filesystem object of arbitrary type with arbitrary attributes in
4661 * the spevied directory with the specified name.
4662 *
4663 * Parameters: dvp Pointer to the vnode of the directory
4664 * in which to create the object.
4665 * vpp Pointer to the area into which to
4666 * return the vnode of the created object.
4667 * cnp Component name pointer from the namei
4668 * data structure, containing the name to
4669 * use for the create object.
4670 * vap Pointer to the vnode_attr structure
4671 * describing the object to be created,
4672 * including the type of object.
4673 * flags VN_* flags controlling ACL inheritance
4674 * and whether or not authorization is to
4675 * be required for the operation.
4676 *
4677 * Returns: 0 Success
4678 * !0 errno value
4679 *
4680 * Implicit: *vpp Contains the vnode of the object that
4681 * was created, if successful.
4682 * *cnp May be modified by the underlying VFS.
4683 * *vap May be modified by the underlying VFS.
4684 * modified by either ACL inheritance or
4685 *
4686 *
4687 * be modified, even if the operation is
4688 *
4689 *
4690 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
4691 *
4692 * Modification of '*cnp' and '*vap' by the underlying VFS is
4693 * strongly discouraged.
4694 *
4695 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
4696 *
4697 * XXX: We should enummerate the possible errno values here, and where
4698 * in the code they originated.
4699 */
4700 errno_t
4701 vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx)
4702 {
4703 kauth_acl_t oacl, nacl;
4704 int initial_acl;
4705 errno_t error;
4706 vnode_t vp = (vnode_t)0;
4707
4708 error = 0;
4709 oacl = nacl = NULL;
4710 initial_acl = 0;
4711
4712 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
4713
4714 /*
4715 * Handle ACL inheritance.
4716 */
4717 if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
4718 /* save the original filesec */
4719 if (VATTR_IS_ACTIVE(vap, va_acl)) {
4720 initial_acl = 1;
4721 oacl = vap->va_acl;
4722 }
4723
4724 vap->va_acl = NULL;
4725 if ((error = kauth_acl_inherit(dvp,
4726 oacl,
4727 &nacl,
4728 vap->va_type == VDIR,
4729 ctx)) != 0) {
4730 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
4731 return(error);
4732 }
4733
4734 /*
4735 * If the generated ACL is NULL, then we can save ourselves some effort
4736 * by clearing the active bit.
4737 */
4738 if (nacl == NULL) {
4739 VATTR_CLEAR_ACTIVE(vap, va_acl);
4740 } else {
4741 VATTR_SET(vap, va_acl, nacl);
4742 }
4743 }
4744
4745 /*
4746 * Check and default new attributes.
4747 * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller
4748 * hasn't supplied them.
4749 */
4750 if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) {
4751 KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error);
4752 goto out;
4753 }
4754
4755
4756 /*
4757 * Create the requested node.
4758 */
4759 switch(vap->va_type) {
4760 case VREG:
4761 error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx);
4762 break;
4763 case VDIR:
4764 error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx);
4765 break;
4766 case VSOCK:
4767 case VFIFO:
4768 case VBLK:
4769 case VCHR:
4770 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
4771 break;
4772 default:
4773 panic("vnode_create: unknown vtype %d", vap->va_type);
4774 }
4775 if (error != 0) {
4776 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
4777 goto out;
4778 }
4779
4780 vp = *vpp;
4781 #if CONFIG_MACF
4782 if (!(flags & VN_CREATE_NOLABEL)) {
4783 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
4784 if (error)
4785 goto error;
4786 }
4787 #endif
4788
4789 /*
4790 * If some of the requested attributes weren't handled by the VNOP,
4791 * use our fallback code.
4792 */
4793 if (!VATTR_ALL_SUPPORTED(vap) && *vpp) {
4794 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
4795 error = vnode_setattr_fallback(*vpp, vap, ctx);
4796 }
4797 #if CONFIG_MACF
4798 error:
4799 #endif
4800 if ((error != 0 ) && (vp != (vnode_t)0)) {
4801 *vpp = (vnode_t) 0;
4802 vnode_put(vp);
4803 }
4804
4805 out:
4806 /*
4807 * If the caller supplied a filesec in vap, it has been replaced
4808 * now by the post-inheritance copy. We need to put the original back
4809 * and free the inherited product.
4810 */
4811 if (initial_acl) {
4812 VATTR_SET(vap, va_acl, oacl);
4813 } else {
4814 VATTR_CLEAR_ACTIVE(vap, va_acl);
4815 }
4816 if (nacl != NULL)
4817 kauth_acl_free(nacl);
4818
4819 return(error);
4820 }
4821
4822 static kauth_scope_t vnode_scope;
4823 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
4824 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4825 static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action,
4826 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
4827
4828 typedef struct _vnode_authorize_context {
4829 vnode_t vp;
4830 struct vnode_attr *vap;
4831 vnode_t dvp;
4832 struct vnode_attr *dvap;
4833 vfs_context_t ctx;
4834 int flags;
4835 int flags_valid;
4836 #define _VAC_IS_OWNER (1<<0)
4837 #define _VAC_IN_GROUP (1<<1)
4838 #define _VAC_IS_DIR_OWNER (1<<2)
4839 #define _VAC_IN_DIR_GROUP (1<<3)
4840 } *vauth_ctx;
4841
4842 void
4843 vnode_authorize_init(void)
4844 {
4845 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
4846 }
4847
4848 /*
4849 * Authorize an operation on a vnode.
4850 *
4851 * This is KPI, but here because it needs vnode_scope.
4852 *
4853 * Returns: 0 Success
4854 * kauth_authorize_action:EPERM ...
4855 * xlate => EACCES Permission denied
4856 * kauth_authorize_action:0 Success
4857 * kauth_authorize_action: Depends on callback return; this is
4858 * usually only vnode_authorize_callback(),
4859 * but may include other listerners, if any
4860 * exist.
4861 * EROFS
4862 * EACCES
4863 * EPERM
4864 * ???
4865 */
4866 int
4867 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
4868 {
4869 int error, result;
4870
4871 /*
4872 * We can't authorize against a dead vnode; allow all operations through so that
4873 * the correct error can be returned.
4874 */
4875 if (vp->v_type == VBAD)
4876 return(0);
4877
4878 error = 0;
4879 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
4880 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
4881 if (result == EPERM) /* traditional behaviour */
4882 result = EACCES;
4883 /* did the lower layers give a better error return? */
4884 if ((result != 0) && (error != 0))
4885 return(error);
4886 return(result);
4887 }
4888
4889 /*
4890 * Test for vnode immutability.
4891 *
4892 * The 'append' flag is set when the authorization request is constrained
4893 * to operations which only request the right to append to a file.
4894 *
4895 * The 'ignore' flag is set when an operation modifying the immutability flags
4896 * is being authorized. We check the system securelevel to determine which
4897 * immutability flags we can ignore.
4898 */
4899 static int
4900 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
4901 {
4902 int mask;
4903
4904 /* start with all bits precluding the operation */
4905 mask = IMMUTABLE | APPEND;
4906
4907 /* if appending only, remove the append-only bits */
4908 if (append)
4909 mask &= ~APPEND;
4910
4911 /* ignore only set when authorizing flags changes */
4912 if (ignore) {
4913 if (securelevel <= 0) {
4914 /* in insecure state, flags do not inhibit changes */
4915 mask = 0;
4916 } else {
4917 /* in secure state, user flags don't inhibit */
4918 mask &= ~(UF_IMMUTABLE | UF_APPEND);
4919 }
4920 }
4921 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
4922 if ((vap->va_flags & mask) != 0)
4923 return(EPERM);
4924 return(0);
4925 }
4926
4927 static int
4928 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
4929 {
4930 int result;
4931
4932 /* default assumption is not-owner */
4933 result = 0;
4934
4935 /*
4936 * If the filesystem has given us a UID, we treat this as authoritative.
4937 */
4938 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
4939 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
4940 }
4941 /* we could test the owner UUID here if we had a policy for it */
4942
4943 return(result);
4944 }
4945
4946 /*
4947 * vauth_node_group
4948 *
4949 * Description: Ask if a cred is a member of the group owning the vnode object
4950 *
4951 * Parameters: vap vnode attribute
4952 * vap->va_gid group owner of vnode object
4953 * cred credential to check
4954 * ismember pointer to where to put the answer
4955 * idontknow Return this if we can't get an answer
4956 *
4957 * Returns: 0 Success
4958 * idontknow Can't get information
4959 * kauth_cred_ismember_gid:? Error from kauth subsystem
4960 * kauth_cred_ismember_gid:? Error from kauth subsystem
4961 */
4962 static int
4963 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
4964 {
4965 int error;
4966 int result;
4967
4968 error = 0;
4969 result = 0;
4970
4971 /*
4972 * The caller is expected to have asked the filesystem for a group
4973 * at some point prior to calling this function. The answer may
4974 * have been that there is no group ownership supported for the
4975 * vnode object, in which case we return
4976 */
4977 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
4978 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
4979 /*
4980 * Credentials which are opted into external group membership
4981 * resolution which are not known to the external resolver
4982 * will result in an ENOENT error. We translate this into
4983 * the appropriate 'idontknow' response for our caller.
4984 *
4985 * XXX We do not make a distinction here between an ENOENT
4986 * XXX arising from a response from the external resolver,
4987 * XXX and an ENOENT which is internally generated. This is
4988 * XXX a deficiency of the published kauth_cred_ismember_gid()
4989 * XXX KPI which can not be overcome without new KPI. For
4990 * XXX all currently known cases, however, this wil result
4991 * XXX in correct behaviour.
4992 */
4993 if (error == ENOENT)
4994 error = idontknow;
4995 }
4996 /*
4997 * XXX We could test the group UUID here if we had a policy for it,
4998 * XXX but this is problematic from the perspective of synchronizing
4999 * XXX group UUID and POSIX GID ownership of a file and keeping the
5000 * XXX values coherent over time. The problem is that the local
5001 * XXX system will vend transient group UUIDs for unknown POSIX GID
5002 * XXX values, and these are not persistent, whereas storage of values
5003 * XXX is persistent. One potential solution to this is a local
5004 * XXX (persistent) replica of remote directory entries and vended
5005 * XXX local ids in a local directory server (think in terms of a
5006 * XXX caching DNS server).
5007 */
5008
5009 if (!error)
5010 *ismember = result;
5011 return(error);
5012 }
5013
5014 static int
5015 vauth_file_owner(vauth_ctx vcp)
5016 {
5017 int result;
5018
5019 if (vcp->flags_valid & _VAC_IS_OWNER) {
5020 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
5021 } else {
5022 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
5023
5024 /* cache our result */
5025 vcp->flags_valid |= _VAC_IS_OWNER;
5026 if (result) {
5027 vcp->flags |= _VAC_IS_OWNER;
5028 } else {
5029 vcp->flags &= ~_VAC_IS_OWNER;
5030 }
5031 }
5032 return(result);
5033 }
5034
5035
5036 /*
5037 * vauth_file_ingroup
5038 *
5039 * Description: Ask if a user is a member of the group owning the directory
5040 *
5041 * Parameters: vcp The vnode authorization context that
5042 * contains the user and directory info
5043 * vcp->flags_valid Valid flags
5044 * vcp->flags Flags values
5045 * vcp->vap File vnode attributes
5046 * vcp->ctx VFS Context (for user)
5047 * ismember pointer to where to put the answer
5048 * idontknow Return this if we can't get an answer
5049 *
5050 * Returns: 0 Success
5051 * vauth_node_group:? Error from vauth_node_group()
5052 *
5053 * Implicit returns: *ismember 0 The user is not a group member
5054 * 1 The user is a group member
5055 */
5056 static int
5057 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5058 {
5059 int error;
5060
5061 /* Check for a cached answer first, to avoid the check if possible */
5062 if (vcp->flags_valid & _VAC_IN_GROUP) {
5063 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
5064 error = 0;
5065 } else {
5066 /* Otherwise, go look for it */
5067 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
5068
5069 if (!error) {
5070 /* cache our result */
5071 vcp->flags_valid |= _VAC_IN_GROUP;
5072 if (*ismember) {
5073 vcp->flags |= _VAC_IN_GROUP;
5074 } else {
5075 vcp->flags &= ~_VAC_IN_GROUP;
5076 }
5077 }
5078
5079 }
5080 return(error);
5081 }
5082
5083 static int
5084 vauth_dir_owner(vauth_ctx vcp)
5085 {
5086 int result;
5087
5088 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
5089 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
5090 } else {
5091 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
5092
5093 /* cache our result */
5094 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
5095 if (result) {
5096 vcp->flags |= _VAC_IS_DIR_OWNER;
5097 } else {
5098 vcp->flags &= ~_VAC_IS_DIR_OWNER;
5099 }
5100 }
5101 return(result);
5102 }
5103
5104 /*
5105 * vauth_dir_ingroup
5106 *
5107 * Description: Ask if a user is a member of the group owning the directory
5108 *
5109 * Parameters: vcp The vnode authorization context that
5110 * contains the user and directory info
5111 * vcp->flags_valid Valid flags
5112 * vcp->flags Flags values
5113 * vcp->dvap Dir vnode attributes
5114 * vcp->ctx VFS Context (for user)
5115 * ismember pointer to where to put the answer
5116 * idontknow Return this if we can't get an answer
5117 *
5118 * Returns: 0 Success
5119 * vauth_node_group:? Error from vauth_node_group()
5120 *
5121 * Implicit returns: *ismember 0 The user is not a group member
5122 * 1 The user is a group member
5123 */
5124 static int
5125 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
5126 {
5127 int error;
5128
5129 /* Check for a cached answer first, to avoid the check if possible */
5130 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
5131 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
5132 error = 0;
5133 } else {
5134 /* Otherwise, go look for it */
5135 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
5136
5137 if (!error) {
5138 /* cache our result */
5139 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
5140 if (*ismember) {
5141 vcp->flags |= _VAC_IN_DIR_GROUP;
5142 } else {
5143 vcp->flags &= ~_VAC_IN_DIR_GROUP;
5144 }
5145 }
5146 }
5147 return(error);
5148 }
5149
5150 /*
5151 * Test the posix permissions in (vap) to determine whether (credential)
5152 * may perform (action)
5153 */
5154 static int
5155 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
5156 {
5157 struct vnode_attr *vap;
5158 int needed, error, owner_ok, group_ok, world_ok, ismember;
5159 #ifdef KAUTH_DEBUG_ENABLE
5160 const char *where = "uninitialized";
5161 # define _SETWHERE(c) where = c;
5162 #else
5163 # define _SETWHERE(c)
5164 #endif
5165
5166 /* checking file or directory? */
5167 if (on_dir) {
5168 vap = vcp->dvap;
5169 } else {
5170 vap = vcp->vap;
5171 }
5172
5173 error = 0;
5174
5175 /*
5176 * We want to do as little work here as possible. So first we check
5177 * which sets of permissions grant us the access we need, and avoid checking
5178 * whether specific permissions grant access when more generic ones would.
5179 */
5180
5181 /* owner permissions */
5182 needed = 0;
5183 if (action & VREAD)
5184 needed |= S_IRUSR;
5185 if (action & VWRITE)
5186 needed |= S_IWUSR;
5187 if (action & VEXEC)
5188 needed |= S_IXUSR;
5189 owner_ok = (needed & vap->va_mode) == needed;
5190
5191 /* group permissions */
5192 needed = 0;
5193 if (action & VREAD)
5194 needed |= S_IRGRP;
5195 if (action & VWRITE)
5196 needed |= S_IWGRP;
5197 if (action & VEXEC)
5198 needed |= S_IXGRP;
5199 group_ok = (needed & vap->va_mode) == needed;
5200
5201 /* world permissions */
5202 needed = 0;
5203 if (action & VREAD)
5204 needed |= S_IROTH;
5205 if (action & VWRITE)
5206 needed |= S_IWOTH;
5207 if (action & VEXEC)
5208 needed |= S_IXOTH;
5209 world_ok = (needed & vap->va_mode) == needed;
5210
5211 /* If granted/denied by all three, we're done */
5212 if (owner_ok && group_ok && world_ok) {
5213 _SETWHERE("all");
5214 goto out;
5215 }
5216 if (!owner_ok && !group_ok && !world_ok) {
5217 _SETWHERE("all");
5218 error = EACCES;
5219 goto out;
5220 }
5221
5222 /* Check ownership (relatively cheap) */
5223 if ((on_dir && vauth_dir_owner(vcp)) ||
5224 (!on_dir && vauth_file_owner(vcp))) {
5225 _SETWHERE("user");
5226 if (!owner_ok)
5227 error = EACCES;
5228 goto out;
5229 }
5230
5231 /* Not owner; if group and world both grant it we're done */
5232 if (group_ok && world_ok) {
5233 _SETWHERE("group/world");
5234 goto out;
5235 }
5236 if (!group_ok && !world_ok) {
5237 _SETWHERE("group/world");
5238 error = EACCES;
5239 goto out;
5240 }
5241
5242 /* Check group membership (most expensive) */
5243 ismember = 0; /* Default to allow, if the target has no group owner */
5244
5245 /*
5246 * In the case we can't get an answer about the user from the call to
5247 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
5248 * the side of caution, rather than simply granting access, or we will
5249 * fail to correctly implement exclusion groups, so we set the third
5250 * parameter on the basis of the state of 'group_ok'.
5251 */
5252 if (on_dir) {
5253 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5254 } else {
5255 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
5256 }
5257 if (error)
5258 goto out;
5259 if (ismember) {
5260 _SETWHERE("group");
5261 if (!group_ok)
5262 error = EACCES;
5263 goto out;
5264 }
5265
5266 /* Not owner, not in group, use world result */
5267 _SETWHERE("world");
5268 if (!world_ok)
5269 error = EACCES;
5270
5271 /* FALLTHROUGH */
5272
5273 out:
5274 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
5275 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
5276 (action & VREAD) ? "r" : "-",
5277 (action & VWRITE) ? "w" : "-",
5278 (action & VEXEC) ? "x" : "-",
5279 needed,
5280 (vap->va_mode & S_IRUSR) ? "r" : "-",
5281 (vap->va_mode & S_IWUSR) ? "w" : "-",
5282 (vap->va_mode & S_IXUSR) ? "x" : "-",
5283 (vap->va_mode & S_IRGRP) ? "r" : "-",
5284 (vap->va_mode & S_IWGRP) ? "w" : "-",
5285 (vap->va_mode & S_IXGRP) ? "x" : "-",
5286 (vap->va_mode & S_IROTH) ? "r" : "-",
5287 (vap->va_mode & S_IWOTH) ? "w" : "-",
5288 (vap->va_mode & S_IXOTH) ? "x" : "-",
5289 kauth_cred_getuid(vcp->ctx->vc_ucred),
5290 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
5291 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
5292 return(error);
5293 }
5294
5295 /*
5296 * Authorize the deletion of the node vp from the directory dvp.
5297 *
5298 * We assume that:
5299 * - Neither the node nor the directory are immutable.
5300 * - The user is not the superuser.
5301 *
5302 * Deletion is not permitted if the directory is sticky and the caller is
5303 * not owner of the node or directory.
5304 *
5305 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
5306 * the node may be deleted. If neither denies the permission, and the
5307 * caller has Posix write access to the directory, then the node may be
5308 * deleted.
5309 *
5310 * As an optimization, we cache whether or not delete child is permitted
5311 * on directories without the sticky bit set.
5312 */
5313 int
5314 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child);
5315 /*static*/ int
5316 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
5317 {
5318 struct vnode_attr *vap = vcp->vap;
5319 struct vnode_attr *dvap = vcp->dvap;
5320 kauth_cred_t cred = vcp->ctx->vc_ucred;
5321 struct kauth_acl_eval eval;
5322 int error, delete_denied, delete_child_denied, ismember;
5323
5324 /* check the ACL on the directory */
5325 delete_child_denied = 0;
5326 if (!cached_delete_child && VATTR_IS_NOT(dvap, va_acl, NULL)) {
5327 errno_t posix_error;
5328
5329 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
5330 eval.ae_acl = &dvap->va_acl->acl_ace[0];
5331 eval.ae_count = dvap->va_acl->acl_entrycount;
5332 eval.ae_options = 0;
5333 if (vauth_dir_owner(vcp))
5334 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5335 /*
5336 * We use ENOENT as a marker to indicate we could not get
5337 * information in order to delay evaluation until after we
5338 * have the ACL evaluation answer. Previously, we would
5339 * always deny the operation at this point.
5340 */
5341 if ((posix_error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5342 return(posix_error);
5343 if (ismember)
5344 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5345 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5346 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5347 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5348 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5349
5350 /*
5351 * If there is no entry, we are going to defer to other
5352 * authorization mechanisms.
5353 */
5354 error = kauth_acl_evaluate(cred, &eval);
5355
5356 if (error != 0) {
5357 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5358 return(error);
5359 }
5360 switch(eval.ae_result) {
5361 case KAUTH_RESULT_DENY:
5362 delete_child_denied = 1;
5363 break;
5364 case KAUTH_RESULT_ALLOW:
5365 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
5366 return(0);
5367 case KAUTH_RESULT_DEFER:
5368 /*
5369 * If we don't have a POSIX answer of "yes", and we
5370 * can't get an ACL answer, then we deny it now.
5371 */
5372 if (posix_error == ENOENT) {
5373 delete_child_denied = 1;
5374 break;
5375 }
5376 default:
5377 /* Effectively the same as !delete_child_denied */
5378 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5379 break;
5380 }
5381 }
5382
5383 /* check the ACL on the node */
5384 delete_denied = 0;
5385 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5386 errno_t posix_error;
5387
5388 eval.ae_requested = KAUTH_VNODE_DELETE;
5389 eval.ae_acl = &vap->va_acl->acl_ace[0];
5390 eval.ae_count = vap->va_acl->acl_entrycount;
5391 eval.ae_options = 0;
5392 if (vauth_file_owner(vcp))
5393 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5394 /*
5395 * We use ENOENT as a marker to indicate we could not get
5396 * information in order to delay evaluation until after we
5397 * have the ACL evaluation answer. Previously, we would
5398 * always deny the operation at this point.
5399 */
5400 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5401 return(posix_error);
5402 if (ismember)
5403 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5404 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5405 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5406 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5407 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5408
5409 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5410 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5411 return(error);
5412 }
5413
5414 switch(eval.ae_result) {
5415 case KAUTH_RESULT_DENY:
5416 delete_denied = 1;
5417 break;
5418 case KAUTH_RESULT_ALLOW:
5419 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
5420 return(0);
5421 case KAUTH_RESULT_DEFER:
5422 /*
5423 * If we don't have a POSIX answer of "yes", and we
5424 * can't get an ACL answer, then we deny it now.
5425 */
5426 if (posix_error == ENOENT) {
5427 delete_denied = 1;
5428 }
5429 default:
5430 /* Effectively the same as !delete_child_denied */
5431 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
5432 break;
5433 }
5434 }
5435
5436 /* if denied by ACL on directory or node, return denial */
5437 if (delete_denied || delete_child_denied) {
5438 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
5439 return(EACCES);
5440 }
5441
5442 /*
5443 * enforce sticky bit behaviour; the cached_delete_child property will
5444 * be false and the dvap contents valis for sticky bit directories;
5445 * this makes us check the directory each time, but it's unavoidable,
5446 * as sticky bit is an exception to caching.
5447 */
5448 if (!cached_delete_child && (dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
5449 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
5450 vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid);
5451 return(EACCES);
5452 }
5453
5454 /* check the directory */
5455 if (!cached_delete_child && (error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) {
5456 KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp);
5457 return(error);
5458 }
5459
5460 /* not denied, must be OK */
5461 return(0);
5462 }
5463
5464
5465 /*
5466 * Authorize an operation based on the node's attributes.
5467 */
5468 static int
5469 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
5470 {
5471 struct vnode_attr *vap = vcp->vap;
5472 kauth_cred_t cred = vcp->ctx->vc_ucred;
5473 struct kauth_acl_eval eval;
5474 int error, ismember;
5475 mode_t posix_action;
5476
5477 /*
5478 * If we are the file owner, we automatically have some rights.
5479 *
5480 * Do we need to expand this to support group ownership?
5481 */
5482 if (vauth_file_owner(vcp))
5483 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
5484
5485 /*
5486 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
5487 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
5488 * change ownership to themselves, and WRITE_SECURITY is implicitly
5489 * granted to the owner. We need to do this because at this point
5490 * WRITE_SECURITY may not be granted as the caller is not currently
5491 * the owner.
5492 */
5493 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
5494 (acl_rights & KAUTH_VNODE_WRITE_SECURITY))
5495 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
5496
5497 if (acl_rights == 0) {
5498 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
5499 return(0);
5500 }
5501
5502 /* if we have an ACL, evaluate it */
5503 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
5504 errno_t posix_error;
5505
5506 eval.ae_requested = acl_rights;
5507 eval.ae_acl = &vap->va_acl->acl_ace[0];
5508 eval.ae_count = vap->va_acl->acl_entrycount;
5509 eval.ae_options = 0;
5510 if (vauth_file_owner(vcp))
5511 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
5512 /*
5513 * We use ENOENT as a marker to indicate we could not get
5514 * information in order to delay evaluation until after we
5515 * have the ACL evaluation answer. Previously, we would
5516 * always deny the operation at this point.
5517 */
5518 if ((posix_error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && posix_error != ENOENT)
5519 return(posix_error);
5520 if (ismember)
5521 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
5522 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
5523 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
5524 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
5525 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
5526
5527 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
5528 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
5529 return(error);
5530 }
5531
5532 switch(eval.ae_result) {
5533 case KAUTH_RESULT_DENY:
5534 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
5535 return(EACCES); /* deny, deny, counter-allege */
5536 case KAUTH_RESULT_ALLOW:
5537 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
5538 return(0);
5539 case KAUTH_RESULT_DEFER:
5540 /*
5541 * If we don't have a POSIX answer of "yes", and we
5542 * can't get an ACL answer, then we deny it now.
5543 */
5544 if (posix_error == ENOENT) {
5545 KAUTH_DEBUG("%p DENIED(DEFERRED) - by ACL", vcp->vp);
5546 return(EACCES); /* deny, deny, counter-allege */
5547 }
5548 default:
5549 /* Effectively the same as !delete_child_denied */
5550 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
5551 break;
5552 }
5553
5554 *found_deny = eval.ae_found_deny;
5555
5556 /* fall through and evaluate residual rights */
5557 } else {
5558 /* no ACL, everything is residual */
5559 eval.ae_residual = acl_rights;
5560 }
5561
5562 /*
5563 * Grant residual rights that have been pre-authorized.
5564 */
5565 eval.ae_residual &= ~preauth_rights;
5566
5567 /*
5568 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
5569 */
5570 if (vauth_file_owner(vcp))
5571 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
5572
5573 if (eval.ae_residual == 0) {
5574 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
5575 return(0);
5576 }
5577
5578 /*
5579 * Bail if we have residual rights that can't be granted by posix permissions,
5580 * or aren't presumed granted at this point.
5581 *
5582 * XXX these can be collapsed for performance
5583 */
5584 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
5585 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
5586 return(EACCES);
5587 }
5588 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
5589 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
5590 return(EACCES);
5591 }
5592
5593 #if DIAGNOSTIC
5594 if (eval.ae_residual & KAUTH_VNODE_DELETE)
5595 panic("vnode_authorize: can't be checking delete permission here");
5596 #endif
5597
5598 /*
5599 * Compute the fallback posix permissions that will satisfy the remaining
5600 * rights.
5601 */
5602 posix_action = 0;
5603 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
5604 KAUTH_VNODE_LIST_DIRECTORY |
5605 KAUTH_VNODE_READ_EXTATTRIBUTES))
5606 posix_action |= VREAD;
5607 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
5608 KAUTH_VNODE_ADD_FILE |
5609 KAUTH_VNODE_ADD_SUBDIRECTORY |
5610 KAUTH_VNODE_DELETE_CHILD |
5611 KAUTH_VNODE_WRITE_ATTRIBUTES |
5612 KAUTH_VNODE_WRITE_EXTATTRIBUTES))
5613 posix_action |= VWRITE;
5614 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
5615 KAUTH_VNODE_SEARCH))
5616 posix_action |= VEXEC;
5617
5618 if (posix_action != 0) {
5619 return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */));
5620 } else {
5621 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
5622 vcp->vp,
5623 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
5624 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5625 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
5626 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5627 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
5628 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
5629 (eval.ae_residual & KAUTH_VNODE_DELETE)
5630 ? " DELETE" : "",
5631 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
5632 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5633 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
5634 ? " DELETE_CHILD" : "",
5635 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
5636 ? " READ_ATTRIBUTES" : "",
5637 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
5638 ? " WRITE_ATTRIBUTES" : "",
5639 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
5640 ? " READ_EXTATTRIBUTES" : "",
5641 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
5642 ? " WRITE_EXTATTRIBUTES" : "",
5643 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
5644 ? " READ_SECURITY" : "",
5645 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
5646 ? " WRITE_SECURITY" : "",
5647 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
5648 ? " CHECKIMMUTABLE" : "",
5649 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
5650 ? " CHANGE_OWNER" : "");
5651 }
5652
5653 /*
5654 * Lack of required Posix permissions implies no reason to deny access.
5655 */
5656 return(0);
5657 }
5658
5659 /*
5660 * Check for file immutability.
5661 */
5662 static int
5663 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
5664 {
5665 mount_t mp;
5666 int error;
5667 int append;
5668
5669 /*
5670 * Perform immutability checks for operations that change data.
5671 *
5672 * Sockets, fifos and devices require special handling.
5673 */
5674 switch(vp->v_type) {
5675 case VSOCK:
5676 case VFIFO:
5677 case VBLK:
5678 case VCHR:
5679 /*
5680 * Writing to these nodes does not change the filesystem data,
5681 * so forget that it's being tried.
5682 */
5683 rights &= ~KAUTH_VNODE_WRITE_DATA;
5684 break;
5685 default:
5686 break;
5687 }
5688
5689 error = 0;
5690 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
5691
5692 /* check per-filesystem options if possible */
5693 mp = vp->v_mount;
5694 if (mp != NULL) {
5695
5696 /* check for no-EA filesystems */
5697 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
5698 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
5699 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
5700 error = EACCES; /* User attributes disabled */
5701 goto out;
5702 }
5703 }
5704
5705 /*
5706 * check for file immutability. first, check if the requested rights are
5707 * allowable for a UF_APPEND file.
5708 */
5709 append = 0;
5710 if (vp->v_type == VDIR) {
5711 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5712 append = 1;
5713 } else {
5714 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights)
5715 append = 1;
5716 }
5717 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
5718 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
5719 goto out;
5720 }
5721 }
5722 out:
5723 return(error);
5724 }
5725
5726 /*
5727 * Handle authorization actions for filesystems that advertise that the
5728 * server will be enforcing.
5729 *
5730 * Returns: 0 Authorization should be handled locally
5731 * 1 Authorization was handled by the FS
5732 *
5733 * Note: Imputed returns will only occur if the authorization request
5734 * was handled by the FS.
5735 *
5736 * Imputed: *resultp, modified Return code from FS when the request is
5737 * handled by the FS.
5738 * VNOP_ACCESS:???
5739 * VNOP_OPEN:???
5740 */
5741 static int
5742 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
5743 {
5744 int error;
5745
5746 /*
5747 * If the vp is a device node, socket or FIFO it actually represents a local
5748 * endpoint, so we need to handle it locally.
5749 */
5750 switch(vp->v_type) {
5751 case VBLK:
5752 case VCHR:
5753 case VSOCK:
5754 case VFIFO:
5755 return(0);
5756 default:
5757 break;
5758 }
5759
5760 /*
5761 * In the advisory request case, if the filesystem doesn't think it's reliable
5762 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
5763 */
5764 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
5765 return(0);
5766
5767 /*
5768 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
5769 * VNOP_ACCESS, as most will authorise inline with the actual request.
5770 */
5771 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
5772 *resultp = error;
5773 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
5774 return(1);
5775 }
5776
5777 /*
5778 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
5779 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
5780 */
5781 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
5782 /* try a VNOP_OPEN for readonly access */
5783 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
5784 *resultp = error;
5785 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
5786 return(1);
5787 }
5788 VNOP_CLOSE(vp, FREAD, ctx);
5789 }
5790
5791 /*
5792 * We don't have any reason to believe that the request has to be denied at this point,
5793 * so go ahead and allow it.
5794 */
5795 *resultp = 0;
5796 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
5797 return(1);
5798 }
5799
5800
5801
5802
5803 /*
5804 * Returns: KAUTH_RESULT_ALLOW
5805 * KAUTH_RESULT_DENY
5806 *
5807 * Imputed: *arg3, modified Error code in the deny case
5808 * EROFS Read-only file system
5809 * EACCES Permission denied
5810 * EPERM Operation not permitted [no execute]
5811 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
5812 * vnode_getattr:???
5813 * vnode_authorize_opaque:*arg2 ???
5814 * vnode_authorize_checkimmutable:???
5815 * vnode_authorize_delete:???
5816 * vnode_authorize_simple:???
5817 */
5818
5819
5820 static int
5821 vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action,
5822 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5823 {
5824 vfs_context_t ctx;
5825 vnode_t cvp = NULLVP;
5826 vnode_t vp, dvp;
5827 int result = KAUTH_RESULT_DENY;
5828 int parent_iocount = 0;
5829 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
5830
5831 ctx = (vfs_context_t)arg0;
5832 vp = (vnode_t)arg1;
5833 dvp = (vnode_t)arg2;
5834
5835 /*
5836 * if there are 2 vnodes passed in, we don't know at
5837 * this point which rights to look at based on the
5838 * combined action being passed in... defer until later...
5839 * otherwise check the kauth 'rights' cache hung
5840 * off of the vnode we're interested in... if we've already
5841 * been granted the right we're currently interested in,
5842 * we can just return success... otherwise we'll go through
5843 * the process of authorizing the requested right(s)... if that
5844 * succeeds, we'll add the right(s) to the cache.
5845 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
5846 */
5847 if (dvp && vp)
5848 goto defer;
5849 if (dvp) {
5850 cvp = dvp;
5851 } else {
5852 /*
5853 * For named streams on local-authorization volumes, rights are cached on the parent;
5854 * authorization is determined by looking at the parent's properties anyway, so storing
5855 * on the parent means that we don't recompute for the named stream and that if
5856 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
5857 * stream to flush its cache separately. If we miss in the cache, then we authorize
5858 * as if there were no cached rights (passing the named stream vnode and desired rights to
5859 * vnode_authorize_callback_int()).
5860 *
5861 * On an opaquely authorized volume, we don't know the relationship between the
5862 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
5863 * on such a volume are authorized directly (rather than using the parent) and have their
5864 * own caches. When a named stream vnode is created, we mark the parent as having a named
5865 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
5866 * find the stream and flush its cache.
5867 */
5868 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
5869 cvp = vp->v_parent;
5870 if ((cvp != NULLVP) && (vnode_getwithref(cvp) == 0)) {
5871 parent_iocount = 1;
5872 } else {
5873 cvp = NULL;
5874 goto defer; /* If we can't use the parent, take the slow path */
5875 }
5876
5877 /* Have to translate some actions */
5878 parent_action = action;
5879 if (parent_action & KAUTH_VNODE_READ_DATA) {
5880 parent_action &= ~KAUTH_VNODE_READ_DATA;
5881 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
5882 }
5883 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
5884 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
5885 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
5886 }
5887
5888 } else {
5889 cvp = vp;
5890 }
5891 }
5892
5893 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
5894 result = KAUTH_RESULT_ALLOW;
5895 goto out;
5896 }
5897 defer:
5898 result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3);
5899
5900 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP)
5901 vnode_cache_authorized_action(cvp, ctx, action);
5902
5903 out:
5904 if (parent_iocount) {
5905 vnode_put(cvp);
5906 }
5907
5908 return result;
5909 }
5910
5911
5912 static int
5913 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action,
5914 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
5915 {
5916 struct _vnode_authorize_context auth_context;
5917 vauth_ctx vcp;
5918 vfs_context_t ctx;
5919 vnode_t vp, dvp;
5920 kauth_cred_t cred;
5921 kauth_ace_rights_t rights;
5922 struct vnode_attr va, dva;
5923 int result;
5924 int *errorp;
5925 int noimmutable;
5926 boolean_t parent_authorized_for_delete_child = FALSE;
5927 boolean_t found_deny = FALSE;
5928 boolean_t parent_ref= FALSE;
5929
5930 vcp = &auth_context;
5931 ctx = vcp->ctx = (vfs_context_t)arg0;
5932 vp = vcp->vp = (vnode_t)arg1;
5933 dvp = vcp->dvp = (vnode_t)arg2;
5934 errorp = (int *)arg3;
5935 /*
5936 * Note that we authorize against the context, not the passed cred
5937 * (the same thing anyway)
5938 */
5939 cred = ctx->vc_ucred;
5940
5941 VATTR_INIT(&va);
5942 vcp->vap = &va;
5943 VATTR_INIT(&dva);
5944 vcp->dvap = &dva;
5945
5946 vcp->flags = vcp->flags_valid = 0;
5947
5948 #if DIAGNOSTIC
5949 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
5950 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
5951 #endif
5952
5953 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
5954 vp, vfs_context_proc(ctx)->p_comm,
5955 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
5956 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
5957 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
5958 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
5959 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
5960 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
5961 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
5962 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
5963 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
5964 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
5965 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
5966 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
5967 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
5968 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
5969 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
5970 vnode_isdir(vp) ? "directory" : "file",
5971 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
5972
5973 /*
5974 * Extract the control bits from the action, everything else is
5975 * requested rights.
5976 */
5977 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
5978 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
5979
5980 if (rights & KAUTH_VNODE_DELETE) {
5981 #if DIAGNOSTIC
5982 if (dvp == NULL)
5983 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
5984 #endif
5985 /*
5986 * check to see if we've already authorized the parent
5987 * directory for deletion of its children... if so, we
5988 * can skip a whole bunch of work... we will still have to
5989 * authorize that this specific child can be removed
5990 */
5991 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE)
5992 parent_authorized_for_delete_child = TRUE;
5993 } else {
5994 dvp = NULL;
5995 }
5996
5997 /*
5998 * Check for read-only filesystems.
5999 */
6000 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
6001 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
6002 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
6003 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
6004 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
6005 result = EROFS;
6006 goto out;
6007 }
6008
6009 /*
6010 * Check for noexec filesystems.
6011 */
6012 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
6013 result = EACCES;
6014 goto out;
6015 }
6016
6017 /*
6018 * Handle cases related to filesystems with non-local enforcement.
6019 * This call can return 0, in which case we will fall through to perform a
6020 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
6021 * an appropriate result, at which point we can return immediately.
6022 */
6023 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
6024 goto out;
6025
6026 /*
6027 * Get vnode attributes and extended security information for the vnode
6028 * and directory if required.
6029 */
6030 VATTR_WANTED(&va, va_mode);
6031 VATTR_WANTED(&va, va_uid);
6032 VATTR_WANTED(&va, va_gid);
6033 VATTR_WANTED(&va, va_flags);
6034 VATTR_WANTED(&va, va_acl);
6035 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
6036 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
6037 goto out;
6038 }
6039 if (dvp && parent_authorized_for_delete_child == FALSE) {
6040 VATTR_WANTED(&dva, va_mode);
6041 VATTR_WANTED(&dva, va_uid);
6042 VATTR_WANTED(&dva, va_gid);
6043 VATTR_WANTED(&dva, va_flags);
6044 VATTR_WANTED(&dva, va_acl);
6045 if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) {
6046 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
6047 goto out;
6048 }
6049 }
6050
6051 /*
6052 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
6053 * *_EXTATTRIBUTES.
6054 */
6055 if (vnode_isnamedstream(vp)) {
6056 if (rights & KAUTH_VNODE_READ_DATA) {
6057 rights &= ~KAUTH_VNODE_READ_DATA;
6058 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
6059 }
6060 if (rights & KAUTH_VNODE_WRITE_DATA) {
6061 rights &= ~KAUTH_VNODE_WRITE_DATA;
6062 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
6063 }
6064 }
6065
6066 /*
6067 * Point 'vp' to the resource fork's parent for ACL checking
6068 */
6069 if (vnode_isnamedstream(vp) &&
6070 (vp->v_parent != NULL) &&
6071 (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) {
6072 parent_ref = TRUE;
6073 vcp->vp = vp = vp->v_parent;
6074 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6075 kauth_acl_free(va.va_acl);
6076 VATTR_INIT(&va);
6077 VATTR_WANTED(&va, va_mode);
6078 VATTR_WANTED(&va, va_uid);
6079 VATTR_WANTED(&va, va_gid);
6080 VATTR_WANTED(&va, va_flags);
6081 VATTR_WANTED(&va, va_acl);
6082 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
6083 goto out;
6084 }
6085
6086 /*
6087 * Check for immutability.
6088 *
6089 * In the deletion case, parent directory immutability vetoes specific
6090 * file rights.
6091 */
6092 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
6093 goto out;
6094 if ((rights & KAUTH_VNODE_DELETE) &&
6095 parent_authorized_for_delete_child == FALSE &&
6096 ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0))
6097 goto out;
6098
6099 /*
6100 * Clear rights that have been authorized by reaching this point, bail if nothing left to
6101 * check.
6102 */
6103 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
6104 if (rights == 0)
6105 goto out;
6106
6107 /*
6108 * If we're not the superuser, authorize based on file properties;
6109 * note that even if parent_authorized_for_delete_child is TRUE, we
6110 * need to check on the node itself.
6111 */
6112 if (!vfs_context_issuser(ctx)) {
6113 /* process delete rights */
6114 if ((rights & KAUTH_VNODE_DELETE) &&
6115 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0))
6116 goto out;
6117
6118 /* process remaining rights */
6119 if ((rights & ~KAUTH_VNODE_DELETE) &&
6120 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0)
6121 goto out;
6122 } else {
6123
6124 /*
6125 * Execute is only granted to root if one of the x bits is set. This check only
6126 * makes sense if the posix mode bits are actually supported.
6127 */
6128 if ((rights & KAUTH_VNODE_EXECUTE) &&
6129 (vp->v_type == VREG) &&
6130 VATTR_IS_SUPPORTED(&va, va_mode) &&
6131 !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
6132 result = EPERM;
6133 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
6134 goto out;
6135 }
6136
6137 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
6138 }
6139 out:
6140 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL))
6141 kauth_acl_free(va.va_acl);
6142 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL))
6143 kauth_acl_free(dva.va_acl);
6144
6145 if (result) {
6146 if (parent_ref)
6147 vnode_put(vp);
6148 *errorp = result;
6149 KAUTH_DEBUG("%p DENIED - auth denied", vp);
6150 return(KAUTH_RESULT_DENY);
6151 }
6152 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
6153 /*
6154 * if we were successfully granted the right to search this directory
6155 * and there were NO ACL DENYs for search and the posix permissions also don't
6156 * deny execute, we can synthesize a global right that allows anyone to
6157 * traverse this directory during a pathname lookup without having to
6158 * match the credential associated with this cache of rights.
6159 */
6160 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
6161 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
6162 (S_IXUSR | S_IXGRP | S_IXOTH))) {
6163 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
6164 }
6165 }
6166 if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete_child == FALSE) {
6167 /*
6168 * parent was successfully and newly authorized for content deletions
6169 * add it to the cache, but only if it doesn't have the sticky
6170 * bit set on it. This same check is done earlier guarding
6171 * fetching of dva, and if we jumped to out without having done
6172 * this, we will have returned already because of a non-zero
6173 * 'result' value.
6174 */
6175 if (VATTR_IS_SUPPORTED(&dva, va_mode) &&
6176 !(dva.va_mode & (S_ISVTX))) {
6177 /* OK to cache delete rights */
6178 vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE_CHILD);
6179 }
6180 }
6181 if (parent_ref)
6182 vnode_put(vp);
6183 /*
6184 * Note that this implies that we will allow requests for no rights, as well as
6185 * for rights that we do not recognise. There should be none of these.
6186 */
6187 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
6188 return(KAUTH_RESULT_ALLOW);
6189 }
6190
6191 /*
6192 * Check that the attribute information in vattr can be legally applied to
6193 * a new file by the context.
6194 */
6195 int
6196 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
6197 {
6198 int error;
6199 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
6200 kauth_cred_t cred;
6201 guid_t changer;
6202 mount_t dmp;
6203
6204 error = 0;
6205 defaulted_owner = defaulted_group = defaulted_mode = 0;
6206
6207 /*
6208 * Require that the filesystem support extended security to apply any.
6209 */
6210 if (!vfs_extendedsecurity(dvp->v_mount) &&
6211 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
6212 error = EINVAL;
6213 goto out;
6214 }
6215
6216 /*
6217 * Default some fields.
6218 */
6219 dmp = dvp->v_mount;
6220
6221 /*
6222 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
6223 * owner takes ownership of all new files.
6224 */
6225 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
6226 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
6227 defaulted_owner = 1;
6228 } else {
6229 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
6230 /* default owner is current user */
6231 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
6232 defaulted_owner = 1;
6233 }
6234 }
6235
6236 /*
6237 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
6238 * group takes ownership of all new files.
6239 */
6240 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
6241 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
6242 defaulted_group = 1;
6243 } else {
6244 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
6245 /* default group comes from parent object, fallback to current user */
6246 struct vnode_attr dva;
6247 VATTR_INIT(&dva);
6248 VATTR_WANTED(&dva, va_gid);
6249 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0)
6250 goto out;
6251 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
6252 VATTR_SET(vap, va_gid, dva.va_gid);
6253 } else {
6254 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
6255 }
6256 defaulted_group = 1;
6257 }
6258 }
6259
6260 if (!VATTR_IS_ACTIVE(vap, va_flags))
6261 VATTR_SET(vap, va_flags, 0);
6262
6263 /* default mode is everything, masked with current umask */
6264 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
6265 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask);
6266 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask);
6267 defaulted_mode = 1;
6268 }
6269 /* set timestamps to now */
6270 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
6271 nanotime(&vap->va_create_time);
6272 VATTR_SET_ACTIVE(vap, va_create_time);
6273 }
6274
6275 /*
6276 * Check for attempts to set nonsensical fields.
6277 */
6278 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
6279 error = EINVAL;
6280 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
6281 vap->va_active & ~VNODE_ATTR_NEWOBJ);
6282 goto out;
6283 }
6284
6285 /*
6286 * Quickly check for the applicability of any enforcement here.
6287 * Tests below maintain the integrity of the local security model.
6288 */
6289 if (vfs_authopaque(dvp->v_mount))
6290 goto out;
6291
6292 /*
6293 * We need to know if the caller is the superuser, or if the work is
6294 * otherwise already authorised.
6295 */
6296 cred = vfs_context_ucred(ctx);
6297 if (noauth) {
6298 /* doing work for the kernel */
6299 has_priv_suser = 1;
6300 } else {
6301 has_priv_suser = vfs_context_issuser(ctx);
6302 }
6303
6304
6305 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6306 if (has_priv_suser) {
6307 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
6308 error = EPERM;
6309 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6310 goto out;
6311 }
6312 } else {
6313 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
6314 error = EPERM;
6315 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6316 goto out;
6317 }
6318 }
6319 }
6320
6321 /* if not superuser, validate legality of new-item attributes */
6322 if (!has_priv_suser) {
6323 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
6324 /* setgid? */
6325 if (vap->va_mode & S_ISGID) {
6326 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6327 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6328 goto out;
6329 }
6330 if (!ismember) {
6331 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
6332 error = EPERM;
6333 goto out;
6334 }
6335 }
6336
6337 /* setuid? */
6338 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
6339 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6340 error = EPERM;
6341 goto out;
6342 }
6343 }
6344 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
6345 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
6346 error = EPERM;
6347 goto out;
6348 }
6349 if (!defaulted_group) {
6350 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6351 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6352 goto out;
6353 }
6354 if (!ismember) {
6355 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
6356 error = EPERM;
6357 goto out;
6358 }
6359 }
6360
6361 /* initialising owner/group UUID */
6362 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6363 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6364 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6365 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
6366 goto out;
6367 }
6368 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
6369 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
6370 error = EPERM;
6371 goto out;
6372 }
6373 }
6374 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6375 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6376 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6377 goto out;
6378 }
6379 if (!ismember) {
6380 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
6381 error = EPERM;
6382 goto out;
6383 }
6384 }
6385 }
6386 out:
6387 return(error);
6388 }
6389
6390 /*
6391 * Check that the attribute information in vap can be legally written by the
6392 * context.
6393 *
6394 * Call this when you're not sure about the vnode_attr; either its contents
6395 * have come from an unknown source, or when they are variable.
6396 *
6397 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
6398 * must be authorized to be permitted to write the vattr.
6399 */
6400 int
6401 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
6402 {
6403 struct vnode_attr ova;
6404 kauth_action_t required_action;
6405 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
6406 guid_t changer;
6407 gid_t group;
6408 uid_t owner;
6409 mode_t newmode;
6410 kauth_cred_t cred;
6411 uint32_t fdelta;
6412
6413 VATTR_INIT(&ova);
6414 required_action = 0;
6415 error = 0;
6416
6417 /*
6418 * Quickly check for enforcement applicability.
6419 */
6420 if (vfs_authopaque(vp->v_mount))
6421 goto out;
6422
6423 /*
6424 * Check for attempts to set nonsensical fields.
6425 */
6426 if (vap->va_active & VNODE_ATTR_RDONLY) {
6427 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
6428 error = EINVAL;
6429 goto out;
6430 }
6431
6432 /*
6433 * We need to know if the caller is the superuser.
6434 */
6435 cred = vfs_context_ucred(ctx);
6436 has_priv_suser = kauth_cred_issuser(cred);
6437
6438 /*
6439 * If any of the following are changing, we need information from the old file:
6440 * va_uid
6441 * va_gid
6442 * va_mode
6443 * va_uuuid
6444 * va_guuid
6445 */
6446 if (VATTR_IS_ACTIVE(vap, va_uid) ||
6447 VATTR_IS_ACTIVE(vap, va_gid) ||
6448 VATTR_IS_ACTIVE(vap, va_mode) ||
6449 VATTR_IS_ACTIVE(vap, va_uuuid) ||
6450 VATTR_IS_ACTIVE(vap, va_guuid)) {
6451 VATTR_WANTED(&ova, va_mode);
6452 VATTR_WANTED(&ova, va_uid);
6453 VATTR_WANTED(&ova, va_gid);
6454 VATTR_WANTED(&ova, va_uuuid);
6455 VATTR_WANTED(&ova, va_guuid);
6456 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
6457 }
6458
6459 /*
6460 * If timestamps are being changed, we need to know who the file is owned
6461 * by.
6462 */
6463 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6464 VATTR_IS_ACTIVE(vap, va_change_time) ||
6465 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6466 VATTR_IS_ACTIVE(vap, va_access_time) ||
6467 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6468
6469 VATTR_WANTED(&ova, va_uid);
6470 #if 0 /* enable this when we support UUIDs as official owners */
6471 VATTR_WANTED(&ova, va_uuuid);
6472 #endif
6473 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
6474 }
6475
6476 /*
6477 * If flags are being changed, we need the old flags.
6478 */
6479 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6480 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
6481 VATTR_WANTED(&ova, va_flags);
6482 }
6483
6484 /*
6485 * If the size is being set, make sure it's not a directory.
6486 */
6487 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6488 /* size is meaningless on a directory, don't permit this */
6489 if (vnode_isdir(vp)) {
6490 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
6491 error = EISDIR;
6492 goto out;
6493 }
6494 }
6495
6496 /*
6497 * Get old data.
6498 */
6499 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
6500 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
6501 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
6502 goto out;
6503 }
6504
6505 /*
6506 * Size changes require write access to the file data.
6507 */
6508 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
6509 /* if we can't get the size, or it's different, we need write access */
6510 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
6511 required_action |= KAUTH_VNODE_WRITE_DATA;
6512 }
6513
6514 /*
6515 * Changing timestamps?
6516 *
6517 * Note that we are only called to authorize user-requested time changes;
6518 * side-effect time changes are not authorized. Authorisation is only
6519 * required for existing files.
6520 *
6521 * Non-owners are not permitted to change the time on an existing
6522 * file to anything other than the current time.
6523 */
6524 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
6525 VATTR_IS_ACTIVE(vap, va_change_time) ||
6526 VATTR_IS_ACTIVE(vap, va_modify_time) ||
6527 VATTR_IS_ACTIVE(vap, va_access_time) ||
6528 VATTR_IS_ACTIVE(vap, va_backup_time)) {
6529 /*
6530 * The owner and root may set any timestamps they like,
6531 * provided that the file is not immutable. The owner still needs
6532 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
6533 */
6534 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
6535 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
6536 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
6537 } else {
6538 /* just setting the current time? */
6539 if (vap->va_vaflags & VA_UTIMES_NULL) {
6540 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
6541 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6542 } else {
6543 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
6544 error = EACCES;
6545 goto out;
6546 }
6547 }
6548 }
6549
6550 /*
6551 * Changing file mode?
6552 */
6553 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
6554 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
6555
6556 /*
6557 * Mode changes always have the same basic auth requirements.
6558 */
6559 if (has_priv_suser) {
6560 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
6561 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6562 } else {
6563 /* need WRITE_SECURITY */
6564 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
6565 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6566 }
6567
6568 /*
6569 * Can't set the setgid bit if you're not in the group and not root. Have to have
6570 * existing group information in the case we're not setting it right now.
6571 */
6572 if (vap->va_mode & S_ISGID) {
6573 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6574 if (!has_priv_suser) {
6575 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6576 group = vap->va_gid;
6577 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
6578 group = ova.va_gid;
6579 } else {
6580 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
6581 error = EINVAL;
6582 goto out;
6583 }
6584 /*
6585 * This might be too restrictive; WRITE_SECURITY might be implied by
6586 * membership in this case, rather than being an additional requirement.
6587 */
6588 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
6589 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
6590 goto out;
6591 }
6592 if (!ismember) {
6593 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
6594 error = EPERM;
6595 goto out;
6596 }
6597 }
6598 }
6599
6600 /*
6601 * Can't set the setuid bit unless you're root or the file's owner.
6602 */
6603 if (vap->va_mode & S_ISUID) {
6604 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
6605 if (!has_priv_suser) {
6606 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6607 owner = vap->va_uid;
6608 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
6609 owner = ova.va_uid;
6610 } else {
6611 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
6612 error = EINVAL;
6613 goto out;
6614 }
6615 if (owner != kauth_cred_getuid(cred)) {
6616 /*
6617 * We could allow this if WRITE_SECURITY is permitted, perhaps.
6618 */
6619 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
6620 error = EPERM;
6621 goto out;
6622 }
6623 }
6624 }
6625 }
6626
6627 /*
6628 * Validate/mask flags changes. This checks that only the flags in
6629 * the UF_SETTABLE mask are being set, and preserves the flags in
6630 * the SF_SETTABLE case.
6631 *
6632 * Since flags changes may be made in conjunction with other changes,
6633 * we will ask the auth code to ignore immutability in the case that
6634 * the SF_* flags are not set and we are only manipulating the file flags.
6635 *
6636 */
6637 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6638 /* compute changing flags bits */
6639 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
6640 fdelta = vap->va_flags ^ ova.va_flags;
6641 } else {
6642 fdelta = vap->va_flags;
6643 }
6644
6645 if (fdelta != 0) {
6646 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
6647 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6648
6649 /* check that changing bits are legal */
6650 if (has_priv_suser) {
6651 /*
6652 * The immutability check will prevent us from clearing the SF_*
6653 * flags unless the system securelevel permits it, so just check
6654 * for legal flags here.
6655 */
6656 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
6657 error = EPERM;
6658 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
6659 goto out;
6660 }
6661 } else {
6662 if (fdelta & ~UF_SETTABLE) {
6663 error = EPERM;
6664 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
6665 goto out;
6666 }
6667 }
6668 /*
6669 * If the caller has the ability to manipulate file flags,
6670 * security is not reduced by ignoring them for this operation.
6671 *
6672 * A more complete test here would consider the 'after' states of the flags
6673 * to determine whether it would permit the operation, but this becomes
6674 * very complex.
6675 *
6676 * Ignoring immutability is conditional on securelevel; this does not bypass
6677 * the SF_* flags if securelevel > 0.
6678 */
6679 required_action |= KAUTH_VNODE_NOIMMUTABLE;
6680 }
6681 }
6682
6683 /*
6684 * Validate ownership information.
6685 */
6686 chowner = 0;
6687 chgroup = 0;
6688 clear_suid = 0;
6689 clear_sgid = 0;
6690
6691 /*
6692 * uid changing
6693 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
6694 * support them in general, and will ignore it if/when we try to set it.
6695 * We might want to clear the uid out of vap completely here.
6696 */
6697 if (VATTR_IS_ACTIVE(vap, va_uid)) {
6698 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
6699 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
6700 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
6701 error = EPERM;
6702 goto out;
6703 }
6704 chowner = 1;
6705 }
6706 clear_suid = 1;
6707 }
6708
6709 /*
6710 * gid changing
6711 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
6712 * support them in general, and will ignore it if/when we try to set it.
6713 * We might want to clear the gid out of vap completely here.
6714 */
6715 if (VATTR_IS_ACTIVE(vap, va_gid)) {
6716 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
6717 if (!has_priv_suser) {
6718 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
6719 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
6720 goto out;
6721 }
6722 if (!ismember) {
6723 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
6724 ova.va_gid, vap->va_gid);
6725 error = EPERM;
6726 goto out;
6727 }
6728 }
6729 chgroup = 1;
6730 }
6731 clear_sgid = 1;
6732 }
6733
6734 /*
6735 * Owner UUID being set or changed.
6736 */
6737 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
6738 /* if the owner UUID is not actually changing ... */
6739 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
6740 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid))
6741 goto no_uuuid_change;
6742
6743 /*
6744 * If the current owner UUID is a null GUID, check
6745 * it against the UUID corresponding to the owner UID.
6746 */
6747 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
6748 VATTR_IS_SUPPORTED(&ova, va_uid)) {
6749 guid_t uid_guid;
6750
6751 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
6752 kauth_guid_equal(&vap->va_uuuid, &uid_guid))
6753 goto no_uuuid_change;
6754 }
6755 }
6756
6757 /*
6758 * The owner UUID cannot be set by a non-superuser to anything other than
6759 * their own or a null GUID (to "unset" the owner UUID).
6760 * Note that file systems must be prepared to handle the
6761 * null UUID case in a manner appropriate for that file
6762 * system.
6763 */
6764 if (!has_priv_suser) {
6765 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
6766 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
6767 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
6768 goto out;
6769 }
6770 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
6771 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
6772 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
6773 error = EPERM;
6774 goto out;
6775 }
6776 }
6777 chowner = 1;
6778 clear_suid = 1;
6779 }
6780 no_uuuid_change:
6781 /*
6782 * Group UUID being set or changed.
6783 */
6784 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
6785 /* if the group UUID is not actually changing ... */
6786 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
6787 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid))
6788 goto no_guuid_change;
6789
6790 /*
6791 * If the current group UUID is a null UUID, check
6792 * it against the UUID corresponding to the group GID.
6793 */
6794 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
6795 VATTR_IS_SUPPORTED(&ova, va_gid)) {
6796 guid_t gid_guid;
6797
6798 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
6799 kauth_guid_equal(&vap->va_guuid, &gid_guid))
6800 goto no_guuid_change;
6801 }
6802 }
6803
6804 /*
6805 * The group UUID cannot be set by a non-superuser to anything other than
6806 * one of which they are a member or a null GUID (to "unset"
6807 * the group UUID).
6808 * Note that file systems must be prepared to handle the
6809 * null UUID case in a manner appropriate for that file
6810 * system.
6811 */
6812 if (!has_priv_suser) {
6813 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid))
6814 ismember = 1;
6815 else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
6816 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
6817 goto out;
6818 }
6819 if (!ismember) {
6820 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
6821 error = EPERM;
6822 goto out;
6823 }
6824 }
6825 chgroup = 1;
6826 }
6827 no_guuid_change:
6828
6829 /*
6830 * Compute authorisation for group/ownership changes.
6831 */
6832 if (chowner || chgroup || clear_suid || clear_sgid) {
6833 if (has_priv_suser) {
6834 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
6835 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
6836 } else {
6837 if (chowner) {
6838 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
6839 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
6840 }
6841 if (chgroup && !chowner) {
6842 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
6843 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6844 }
6845
6846 /* clear set-uid and set-gid bits as required by Posix */
6847 if (VATTR_IS_ACTIVE(vap, va_mode)) {
6848 newmode = vap->va_mode;
6849 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
6850 newmode = ova.va_mode;
6851 } else {
6852 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
6853 newmode = 0;
6854 }
6855 if (newmode & (S_ISUID | S_ISGID)) {
6856 VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID));
6857 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode);
6858 }
6859 }
6860 }
6861
6862 /*
6863 * Authorise changes in the ACL.
6864 */
6865 if (VATTR_IS_ACTIVE(vap, va_acl)) {
6866
6867 /* no existing ACL */
6868 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
6869
6870 /* adding an ACL */
6871 if (vap->va_acl != NULL) {
6872 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6873 KAUTH_DEBUG("CHMOD - adding ACL");
6874 }
6875
6876 /* removing an existing ACL */
6877 } else if (vap->va_acl == NULL) {
6878 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6879 KAUTH_DEBUG("CHMOD - removing ACL");
6880
6881 /* updating an existing ACL */
6882 } else {
6883 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
6884 /* entry count changed, must be different */
6885 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6886 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
6887 } else if (vap->va_acl->acl_entrycount > 0) {
6888 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
6889 if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
6890 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
6891 required_action |= KAUTH_VNODE_WRITE_SECURITY;
6892 KAUTH_DEBUG("CHMOD - changing ACL entries");
6893 }
6894 }
6895 }
6896 }
6897
6898 /*
6899 * Other attributes that require authorisation.
6900 */
6901 if (VATTR_IS_ACTIVE(vap, va_encoding))
6902 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
6903
6904 out:
6905 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL))
6906 kauth_acl_free(ova.va_acl);
6907 if (error == 0)
6908 *actionp = required_action;
6909 return(error);
6910 }
6911
6912
6913 void
6914 vfs_setlocklocal(mount_t mp)
6915 {
6916 vnode_t vp;
6917
6918 mount_lock(mp);
6919 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
6920
6921 /*
6922 * We do not expect anyone to be using any vnodes at the
6923 * time this routine is called. So no need for vnode locking
6924 */
6925 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
6926 vp->v_flag |= VLOCKLOCAL;
6927 }
6928 TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) {
6929 vp->v_flag |= VLOCKLOCAL;
6930 }
6931 TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) {
6932 vp->v_flag |= VLOCKLOCAL;
6933 }
6934 mount_unlock(mp);
6935 }
6936
6937 void
6938 vfs_setunmountpreflight(mount_t mp)
6939 {
6940 mount_lock_spin(mp);
6941 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
6942 mount_unlock(mp);
6943 }
6944
6945 void
6946 vn_setunionwait(vnode_t vp)
6947 {
6948 vnode_lock_spin(vp);
6949 vp->v_flag |= VISUNION;
6950 vnode_unlock(vp);
6951 }
6952
6953
6954 void
6955 vn_checkunionwait(vnode_t vp)
6956 {
6957 vnode_lock_spin(vp);
6958 while ((vp->v_flag & VISUNION) == VISUNION)
6959 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
6960 vnode_unlock(vp);
6961 }
6962
6963 void
6964 vn_clearunionwait(vnode_t vp, int locked)
6965 {
6966 if (!locked)
6967 vnode_lock_spin(vp);
6968 if((vp->v_flag & VISUNION) == VISUNION) {
6969 vp->v_flag &= ~VISUNION;
6970 wakeup((caddr_t)&vp->v_flag);
6971 }
6972 if (!locked)
6973 vnode_unlock(vp);
6974 }
6975
6976 /*
6977 * XXX - get "don't trigger mounts" flag for thread; used by autofs.
6978 */
6979 extern int thread_notrigger(void);
6980
6981 int
6982 thread_notrigger(void)
6983 {
6984 struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
6985 return (uth->uu_notrigger);
6986 }
6987
6988 /*
6989 * Removes orphaned apple double files during a rmdir
6990 * Works by:
6991 * 1. vnode_suspend().
6992 * 2. Call VNOP_READDIR() till the end of directory is reached.
6993 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
6994 * 4. Continue (2) and (3) till end of directory is reached.
6995 * 5. If all the entries in the directory were files with "._" name, delete all the files.
6996 * 6. vnode_resume()
6997 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
6998 */
6999
7000 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
7001 {
7002
7003 #define UIO_BUFF_SIZE 2048
7004 uio_t auio = NULL;
7005 int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
7006 int open_flag = 0, full_erase_flag = 0;
7007 char uio_buf[ UIO_SIZEOF(1) ];
7008 char *rbuf = NULL, *cpos, *cend;
7009 struct nameidata nd_temp;
7010 struct dirent *dp;
7011 errno_t error;
7012
7013 error = vnode_suspend(vp);
7014
7015 /*
7016 * restart_flag is set so that the calling rmdir sleeps and resets
7017 */
7018 if (error == EBUSY)
7019 *restart_flag = 1;
7020 if (error != 0)
7021 goto outsc;
7022
7023 /*
7024 * set up UIO
7025 */
7026 MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK);
7027 if (rbuf)
7028 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
7029 &uio_buf[0], sizeof(uio_buf));
7030 if (!rbuf || !auio) {
7031 error = ENOMEM;
7032 goto outsc;
7033 }
7034
7035 uio_setoffset(auio,0);
7036
7037 eofflag = 0;
7038
7039 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
7040 goto outsc;
7041 else
7042 open_flag = 1;
7043
7044 /*
7045 * First pass checks if all files are appleDouble files.
7046 */
7047
7048 do {
7049 siz = UIO_BUFF_SIZE;
7050 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7051 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7052
7053 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
7054 goto outsc;
7055
7056 if (uio_resid(auio) != 0)
7057 siz -= uio_resid(auio);
7058
7059 /*
7060 * Iterate through directory
7061 */
7062 cpos = rbuf;
7063 cend = rbuf + siz;
7064 dp = (struct dirent*) cpos;
7065
7066 if (cpos == cend)
7067 eofflag = 1;
7068
7069 while ((cpos < cend)) {
7070 /*
7071 * Check for . and .. as well as directories
7072 */
7073 if (dp->d_ino != 0 &&
7074 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7075 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
7076 /*
7077 * Check for irregular files and ._ files
7078 * If there is a ._._ file abort the op
7079 */
7080 if ( dp->d_namlen < 2 ||
7081 strncmp(dp->d_name,"._",2) ||
7082 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) {
7083 error = ENOTEMPTY;
7084 goto outsc;
7085 }
7086 }
7087 cpos += dp->d_reclen;
7088 dp = (struct dirent*)cpos;
7089 }
7090
7091 /*
7092 * workaround for HFS/NFS setting eofflag before end of file
7093 */
7094 if (vp->v_tag == VT_HFS && nentries > 2)
7095 eofflag=0;
7096
7097 if (vp->v_tag == VT_NFS) {
7098 if (eofflag && !full_erase_flag) {
7099 full_erase_flag = 1;
7100 eofflag = 0;
7101 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7102 }
7103 else if (!eofflag && full_erase_flag)
7104 full_erase_flag = 0;
7105 }
7106
7107 } while (!eofflag);
7108 /*
7109 * If we've made it here all the files in the dir are ._ files.
7110 * We can delete the files even though the node is suspended
7111 * because we are the owner of the file.
7112 */
7113
7114 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7115 eofflag = 0;
7116 full_erase_flag = 0;
7117
7118 do {
7119 siz = UIO_BUFF_SIZE;
7120 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
7121 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
7122
7123 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
7124
7125 if (error != 0)
7126 goto outsc;
7127
7128 if (uio_resid(auio) != 0)
7129 siz -= uio_resid(auio);
7130
7131 /*
7132 * Iterate through directory
7133 */
7134 cpos = rbuf;
7135 cend = rbuf + siz;
7136 dp = (struct dirent*) cpos;
7137
7138 if (cpos == cend)
7139 eofflag = 1;
7140
7141 while ((cpos < cend)) {
7142 /*
7143 * Check for . and .. as well as directories
7144 */
7145 if (dp->d_ino != 0 &&
7146 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
7147 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
7148 ) {
7149
7150 NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx);
7151 nd_temp.ni_dvp = vp;
7152 error = unlink1(ctx, &nd_temp, 0);
7153 if (error && error != ENOENT) {
7154 goto outsc;
7155 }
7156 }
7157 cpos += dp->d_reclen;
7158 dp = (struct dirent*)cpos;
7159 }
7160
7161 /*
7162 * workaround for HFS/NFS setting eofflag before end of file
7163 */
7164 if (vp->v_tag == VT_HFS && nentries > 2)
7165 eofflag=0;
7166
7167 if (vp->v_tag == VT_NFS) {
7168 if (eofflag && !full_erase_flag) {
7169 full_erase_flag = 1;
7170 eofflag = 0;
7171 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
7172 }
7173 else if (!eofflag && full_erase_flag)
7174 full_erase_flag = 0;
7175 }
7176
7177 } while (!eofflag);
7178
7179
7180 error = 0;
7181
7182 outsc:
7183 if (open_flag)
7184 VNOP_CLOSE(vp, FREAD, ctx);
7185
7186 uio_free(auio);
7187 FREE(rbuf, M_TEMP);
7188
7189 vnode_resume(vp);
7190
7191
7192 return(error);
7193
7194 }
7195
7196
7197 void
7198 lock_vnode_and_post(vnode_t vp, int kevent_num)
7199 {
7200 /* Only take the lock if there's something there! */
7201 if (vp->v_knotes.slh_first != NULL) {
7202 vnode_lock(vp);
7203 KNOTE(&vp->v_knotes, kevent_num);
7204 vnode_unlock(vp);
7205 }
7206 }
7207
7208 #ifdef JOE_DEBUG
7209 static void record_vp(vnode_t vp, int count) {
7210 struct uthread *ut;
7211 int i;
7212
7213 if ((vp->v_flag & VSYSTEM))
7214 return;
7215
7216 ut = get_bsdthread_info(current_thread());
7217 ut->uu_iocount += count;
7218
7219 if (ut->uu_vpindex < 32) {
7220 for (i = 0; i < ut->uu_vpindex; i++) {
7221 if (ut->uu_vps[i] == vp)
7222 return;
7223 }
7224 ut->uu_vps[ut->uu_vpindex] = vp;
7225 ut->uu_vpindex++;
7226 }
7227 }
7228 #endif