]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
cf7d32b8 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
5d5c5d0d | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1989, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * (c) UNIX System Laboratories, Inc. | |
33 | * All or some portions of this file are derived from material licensed | |
34 | * to the University of California by American Telephone and Telegraph | |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | |
36 | * the permission of UNIX System Laboratories, Inc. | |
37 | * | |
38 | * Redistribution and use in source and binary forms, with or without | |
39 | * modification, are permitted provided that the following conditions | |
40 | * are met: | |
41 | * 1. Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * 2. Redistributions in binary form must reproduce the above copyright | |
44 | * notice, this list of conditions and the following disclaimer in the | |
45 | * documentation and/or other materials provided with the distribution. | |
46 | * 3. All advertising materials mentioning features or use of this software | |
47 | * must display the following acknowledgement: | |
48 | * This product includes software developed by the University of | |
49 | * California, Berkeley and its contributors. | |
50 | * 4. Neither the name of the University nor the names of its contributors | |
51 | * may be used to endorse or promote products derived from this software | |
52 | * without specific prior written permission. | |
53 | * | |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
64 | * SUCH DAMAGE. | |
65 | * | |
66 | * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 | |
67 | */ | |
2d21ac55 A |
68 | /* |
69 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |
70 | * support for mandatory and extensible security protections. This notice | |
71 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
72 | * Version 2.0. | |
73 | */ | |
1c79356b A |
74 | |
75 | /* | |
76 | * External virtual filesystem routines | |
77 | */ | |
78 | ||
1c79356b A |
79 | |
80 | #include <sys/param.h> | |
81 | #include <sys/systm.h> | |
91447636 A |
82 | #include <sys/proc_internal.h> |
83 | #include <sys/kauth.h> | |
84 | #include <sys/mount_internal.h> | |
1c79356b | 85 | #include <sys/time.h> |
91447636 | 86 | #include <sys/lock.h> |
2d21ac55 | 87 | #include <sys/vnode.h> |
91447636 | 88 | #include <sys/vnode_internal.h> |
1c79356b A |
89 | #include <sys/stat.h> |
90 | #include <sys/namei.h> | |
91 | #include <sys/ucred.h> | |
91447636 | 92 | #include <sys/buf_internal.h> |
1c79356b A |
93 | #include <sys/errno.h> |
94 | #include <sys/malloc.h> | |
2d21ac55 A |
95 | #include <sys/uio_internal.h> |
96 | #include <sys/uio.h> | |
1c79356b A |
97 | #include <sys/domain.h> |
98 | #include <sys/mbuf.h> | |
99 | #include <sys/syslog.h> | |
91447636 | 100 | #include <sys/ubc_internal.h> |
1c79356b A |
101 | #include <sys/vm.h> |
102 | #include <sys/sysctl.h> | |
55e303ae A |
103 | #include <sys/filedesc.h> |
104 | #include <sys/event.h> | |
91447636 A |
105 | #include <sys/kdebug.h> |
106 | #include <sys/kauth.h> | |
107 | #include <sys/user.h> | |
108 | #include <miscfs/fifofs/fifo.h> | |
55e303ae A |
109 | |
110 | #include <string.h> | |
111 | #include <machine/spl.h> | |
112 | ||
1c79356b A |
113 | |
114 | #include <kern/assert.h> | |
115 | ||
116 | #include <miscfs/specfs/specdev.h> | |
117 | ||
0b4e3aa0 A |
118 | #include <mach/mach_types.h> |
119 | #include <mach/memory_object_types.h> | |
120 | ||
2d21ac55 A |
121 | #include <kern/kalloc.h> /* kalloc()/kfree() */ |
122 | #include <kern/clock.h> /* delay_for_interval() */ | |
123 | #include <libkern/OSAtomic.h> /* OSAddAtomic() */ | |
124 | ||
125 | ||
126 | #include <vm/vm_protos.h> /* vnode_pager_vrele() */ | |
127 | ||
128 | #if CONFIG_MACF | |
129 | #include <security/mac_framework.h> | |
130 | #endif | |
131 | ||
91447636 A |
132 | extern lck_grp_t *vnode_lck_grp; |
133 | extern lck_attr_t *vnode_lck_attr; | |
134 | ||
135 | ||
136 | extern lck_mtx_t * mnt_list_mtx_lock; | |
0b4e3aa0 | 137 | |
1c79356b A |
138 | enum vtype iftovt_tab[16] = { |
139 | VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, | |
140 | VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, | |
141 | }; | |
142 | int vttoif_tab[9] = { | |
143 | 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, | |
144 | S_IFSOCK, S_IFIFO, S_IFMT, | |
145 | }; | |
146 | ||
2d21ac55 A |
147 | /* XXX next protptype should be from <nfs/nfs.h> */ |
148 | extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int); | |
149 | ||
150 | /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */ | |
151 | __private_extern__ void qsort( | |
152 | void * array, | |
153 | size_t nmembers, | |
154 | size_t member_size, | |
155 | int (*)(const void *, const void *)); | |
156 | ||
91447636 | 157 | extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval); |
2d21ac55 A |
158 | __private_extern__ void vntblinit(void); |
159 | __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1, | |
160 | unsigned int val2); | |
161 | __private_extern__ int unlink1(vfs_context_t, struct nameidata *, int); | |
91447636 A |
162 | |
163 | static void vnode_list_add(vnode_t); | |
164 | static void vnode_list_remove(vnode_t); | |
cf7d32b8 | 165 | static void vnode_list_remove_locked(vnode_t); |
91447636 A |
166 | |
167 | static errno_t vnode_drain(vnode_t); | |
2d21ac55 A |
168 | static void vgone(vnode_t, int flags); |
169 | static void vclean(vnode_t vp, int flag); | |
170 | static void vnode_reclaim_internal(vnode_t, int, int, int); | |
91447636 | 171 | |
2d21ac55 A |
172 | static void vnode_dropiocount (vnode_t); |
173 | static errno_t vnode_getiocount(vnode_t vp, int vid, int vflags); | |
91447636 A |
174 | static int vget_internal(vnode_t, int, int); |
175 | ||
176 | static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev); | |
177 | static int vnode_reload(vnode_t); | |
178 | static int vnode_isinuse_locked(vnode_t, int, int); | |
179 | ||
180 | static void insmntque(vnode_t vp, mount_t mp); | |
91447636 A |
181 | static int mount_getvfscnt(void); |
182 | static int mount_fillfsids(fsid_t *, int ); | |
183 | static void vnode_iterate_setup(mount_t); | |
184 | static int vnode_umount_preflight(mount_t, vnode_t, int); | |
185 | static int vnode_iterate_prepare(mount_t); | |
186 | static int vnode_iterate_reloadq(mount_t); | |
187 | static void vnode_iterate_clear(mount_t); | |
1c79356b | 188 | |
2d21ac55 A |
189 | errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); |
190 | ||
1c79356b | 191 | TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ |
2d21ac55 A |
192 | TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */ |
193 | ||
194 | TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */ | |
195 | struct timeval rage_tv; | |
196 | int rage_limit = 0; | |
197 | int ragevnodes = 0; | |
198 | ||
199 | #define RAGE_LIMIT_MIN 100 | |
200 | #define RAGE_TIME_LIMIT 5 | |
201 | ||
1c79356b | 202 | struct mntlist mountlist; /* mounted filesystem list */ |
91447636 | 203 | static int nummounts = 0; |
1c79356b A |
204 | |
205 | #if DIAGNOSTIC | |
206 | #define VLISTCHECK(fun, vp, list) \ | |
207 | if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \ | |
208 | panic("%s: %s vnode not on %slist", (fun), (list), (list)); | |
1c79356b A |
209 | #else |
210 | #define VLISTCHECK(fun, vp, list) | |
1c79356b A |
211 | #endif /* DIAGNOSTIC */ |
212 | ||
213 | #define VLISTNONE(vp) \ | |
214 | do { \ | |
215 | (vp)->v_freelist.tqe_next = (struct vnode *)0; \ | |
216 | (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \ | |
217 | } while(0) | |
218 | ||
219 | #define VONLIST(vp) \ | |
220 | ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb) | |
221 | ||
222 | /* remove a vnode from free vnode list */ | |
223 | #define VREMFREE(fun, vp) \ | |
224 | do { \ | |
225 | VLISTCHECK((fun), (vp), "free"); \ | |
226 | TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \ | |
227 | VLISTNONE((vp)); \ | |
228 | freevnodes--; \ | |
229 | } while(0) | |
230 | ||
2d21ac55 A |
231 | |
232 | ||
233 | /* remove a vnode from dead vnode list */ | |
234 | #define VREMDEAD(fun, vp) \ | |
1c79356b | 235 | do { \ |
2d21ac55 A |
236 | VLISTCHECK((fun), (vp), "dead"); \ |
237 | TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \ | |
1c79356b | 238 | VLISTNONE((vp)); \ |
2d21ac55 A |
239 | vp->v_listflag &= ~VLIST_DEAD; \ |
240 | deadvnodes--; \ | |
241 | } while(0) | |
242 | ||
243 | ||
244 | /* remove a vnode from rage vnode list */ | |
245 | #define VREMRAGE(fun, vp) \ | |
246 | do { \ | |
247 | if ( !(vp->v_listflag & VLIST_RAGE)) \ | |
248 | panic("VREMRAGE: vp not on rage list"); \ | |
249 | VLISTCHECK((fun), (vp), "rage"); \ | |
250 | TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \ | |
251 | VLISTNONE((vp)); \ | |
252 | vp->v_listflag &= ~VLIST_RAGE; \ | |
253 | ragevnodes--; \ | |
1c79356b A |
254 | } while(0) |
255 | ||
1c79356b A |
256 | |
257 | /* | |
2d21ac55 A |
258 | * vnodetarget hasn't been used in a long time, but |
259 | * it was exported for some reason... I'm leaving in | |
260 | * place for now... it should be deprecated out of the | |
261 | * exports and removed eventually. | |
1c79356b A |
262 | */ |
263 | unsigned long vnodetarget; /* target for vnreclaim() */ | |
264 | #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */ | |
265 | ||
266 | /* | |
267 | * We need quite a few vnodes on the free list to sustain the | |
268 | * rapid stat() the compilation process does, and still benefit from the name | |
269 | * cache. Having too few vnodes on the free list causes serious disk | |
270 | * thrashing as we cycle through them. | |
271 | */ | |
2d21ac55 | 272 | #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */ |
1c79356b A |
273 | |
274 | /* | |
275 | * Initialize the vnode management data structures. | |
276 | */ | |
0b4e3aa0 | 277 | __private_extern__ void |
91447636 | 278 | vntblinit(void) |
1c79356b | 279 | { |
1c79356b | 280 | TAILQ_INIT(&vnode_free_list); |
2d21ac55 A |
281 | TAILQ_INIT(&vnode_rage_list); |
282 | TAILQ_INIT(&vnode_dead_list); | |
91447636 | 283 | TAILQ_INIT(&mountlist); |
1c79356b A |
284 | |
285 | if (!vnodetarget) | |
286 | vnodetarget = VNODE_FREE_TARGET; | |
287 | ||
2d21ac55 A |
288 | microuptime(&rage_tv); |
289 | rage_limit = desiredvnodes / 100; | |
290 | ||
291 | if (rage_limit < RAGE_LIMIT_MIN) | |
292 | rage_limit = RAGE_LIMIT_MIN; | |
293 | ||
1c79356b A |
294 | /* |
295 | * Scale the vm_object_cache to accomodate the vnodes | |
296 | * we want to cache | |
297 | */ | |
298 | (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN); | |
299 | } | |
300 | ||
301 | /* Reset the VM Object Cache with the values passed in */ | |
0b4e3aa0 | 302 | __private_extern__ kern_return_t |
1c79356b A |
303 | reset_vmobjectcache(unsigned int val1, unsigned int val2) |
304 | { | |
305 | vm_size_t oval = val1 - VNODE_FREE_MIN; | |
9bccf70c A |
306 | vm_size_t nval; |
307 | ||
308 | if(val2 < VNODE_FREE_MIN) | |
309 | nval = 0; | |
310 | else | |
311 | nval = val2 - VNODE_FREE_MIN; | |
1c79356b A |
312 | |
313 | return(adjust_vm_object_cache(oval, nval)); | |
314 | } | |
315 | ||
91447636 A |
316 | |
317 | /* the timeout is in 10 msecs */ | |
1c79356b | 318 | int |
2d21ac55 | 319 | vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) { |
91447636 A |
320 | int error = 0; |
321 | struct timespec ts; | |
1c79356b | 322 | |
91447636 A |
323 | KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0); |
324 | ||
325 | if (vp->v_numoutput > output_target) { | |
326 | ||
327 | slpflag &= ~PDROP; | |
328 | ||
329 | vnode_lock(vp); | |
330 | ||
331 | while ((vp->v_numoutput > output_target) && error == 0) { | |
332 | if (output_target) | |
333 | vp->v_flag |= VTHROTTLED; | |
334 | else | |
335 | vp->v_flag |= VBWAIT; | |
2d21ac55 | 336 | |
91447636 A |
337 | ts.tv_sec = (slptimeout/100); |
338 | ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ; | |
339 | error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts); | |
340 | } | |
341 | vnode_unlock(vp); | |
1c79356b | 342 | } |
91447636 A |
343 | KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0); |
344 | ||
345 | return error; | |
1c79356b A |
346 | } |
347 | ||
91447636 | 348 | |
1c79356b | 349 | void |
91447636 A |
350 | vnode_startwrite(vnode_t vp) { |
351 | ||
352 | OSAddAtomic(1, &vp->v_numoutput); | |
353 | } | |
354 | ||
355 | ||
356 | void | |
357 | vnode_writedone(vnode_t vp) | |
1c79356b | 358 | { |
91447636 | 359 | if (vp) { |
91447636 A |
360 | OSAddAtomic(-1, &vp->v_numoutput); |
361 | ||
2d21ac55 A |
362 | if (vp->v_numoutput <= 1) { |
363 | int need_wakeup = 0; | |
1c79356b | 364 | |
2d21ac55 | 365 | vnode_lock_spin(vp); |
91447636 | 366 | |
2d21ac55 A |
367 | if (vp->v_numoutput < 0) |
368 | panic("vnode_writedone: numoutput < 0"); | |
369 | ||
370 | if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= 1)) { | |
371 | vp->v_flag &= ~VTHROTTLED; | |
372 | need_wakeup = 1; | |
373 | } | |
374 | if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) { | |
375 | vp->v_flag &= ~VBWAIT; | |
376 | need_wakeup = 1; | |
377 | } | |
378 | vnode_unlock(vp); | |
91447636 | 379 | |
2d21ac55 A |
380 | if (need_wakeup) |
381 | wakeup((caddr_t)&vp->v_numoutput); | |
382 | } | |
91447636 | 383 | } |
1c79356b A |
384 | } |
385 | ||
91447636 A |
386 | |
387 | ||
1c79356b | 388 | int |
91447636 | 389 | vnode_hasdirtyblks(vnode_t vp) |
1c79356b | 390 | { |
91447636 | 391 | struct cl_writebehind *wbp; |
1c79356b | 392 | |
91447636 A |
393 | /* |
394 | * Not taking the buf_mtxp as there is little | |
395 | * point doing it. Even if the lock is taken the | |
396 | * state can change right after that. If their | |
397 | * needs to be a synchronization, it must be driven | |
398 | * by the caller | |
399 | */ | |
400 | if (vp->v_dirtyblkhd.lh_first) | |
401 | return (1); | |
402 | ||
403 | if (!UBCINFOEXISTS(vp)) | |
404 | return (0); | |
0b4e3aa0 | 405 | |
91447636 A |
406 | wbp = vp->v_ubcinfo->cl_wbehind; |
407 | ||
408 | if (wbp && (wbp->cl_number || wbp->cl_scmap)) | |
409 | return (1); | |
0b4e3aa0 | 410 | |
1c79356b A |
411 | return (0); |
412 | } | |
413 | ||
1c79356b | 414 | int |
91447636 | 415 | vnode_hascleanblks(vnode_t vp) |
1c79356b | 416 | { |
91447636 A |
417 | /* |
418 | * Not taking the buf_mtxp as there is little | |
419 | * point doing it. Even if the lock is taken the | |
420 | * state can change right after that. If their | |
421 | * needs to be a synchronization, it must be driven | |
422 | * by the caller | |
423 | */ | |
424 | if (vp->v_cleanblkhd.lh_first) | |
425 | return (1); | |
426 | return (0); | |
427 | } | |
1c79356b | 428 | |
91447636 A |
429 | void |
430 | vnode_iterate_setup(mount_t mp) | |
431 | { | |
432 | while (mp->mnt_lflag & MNT_LITER) { | |
433 | mp->mnt_lflag |= MNT_LITERWAIT; | |
2d21ac55 | 434 | msleep((caddr_t)mp, &mp->mnt_mlock, PVFS, "vnode_iterate_setup", NULL); |
1c79356b | 435 | } |
91447636 A |
436 | |
437 | mp->mnt_lflag |= MNT_LITER; | |
438 | ||
1c79356b A |
439 | } |
440 | ||
91447636 A |
441 | static int |
442 | vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags) | |
1c79356b | 443 | { |
91447636 | 444 | vnode_t vp; |
1c79356b | 445 | |
91447636 | 446 | TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { |
2d21ac55 A |
447 | /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */ |
448 | if (vp->v_tag == VT_UDF) | |
449 | return 0; | |
450 | if (vp->v_type == VDIR) | |
451 | continue; | |
91447636 A |
452 | if (vp == skipvp) |
453 | continue; | |
454 | if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || | |
455 | (vp->v_flag & VNOFLUSH))) | |
456 | continue; | |
457 | if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) | |
458 | continue; | |
459 | if ((flags & WRITECLOSE) && | |
460 | (vp->v_writecount == 0 || vp->v_type != VREG)) | |
461 | continue; | |
462 | /* Look for busy vnode */ | |
463 | if (((vp->v_usecount != 0) && | |
464 | ((vp->v_usecount - vp->v_kusecount) != 0))) | |
465 | return(1); | |
1c79356b | 466 | } |
91447636 A |
467 | |
468 | return(0); | |
1c79356b A |
469 | } |
470 | ||
91447636 A |
471 | /* |
472 | * This routine prepares iteration by moving all the vnodes to worker queue | |
473 | * called with mount lock held | |
1c79356b | 474 | */ |
91447636 A |
475 | int |
476 | vnode_iterate_prepare(mount_t mp) | |
1c79356b | 477 | { |
91447636 | 478 | vnode_t vp; |
1c79356b | 479 | |
91447636 A |
480 | if (TAILQ_EMPTY(&mp->mnt_vnodelist)) { |
481 | /* nothing to do */ | |
482 | return (0); | |
483 | } | |
1c79356b | 484 | |
91447636 A |
485 | vp = TAILQ_FIRST(&mp->mnt_vnodelist); |
486 | vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first); | |
487 | mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first; | |
488 | mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last; | |
489 | ||
490 | TAILQ_INIT(&mp->mnt_vnodelist); | |
491 | if (mp->mnt_newvnodes.tqh_first != NULL) | |
492 | panic("vnode_iterate_prepare: newvnode when entering vnode"); | |
493 | TAILQ_INIT(&mp->mnt_newvnodes); | |
494 | ||
495 | return (1); | |
1c79356b A |
496 | } |
497 | ||
91447636 A |
498 | |
499 | /* called with mount lock held */ | |
500 | int | |
501 | vnode_iterate_reloadq(mount_t mp) | |
1c79356b | 502 | { |
91447636 A |
503 | int moved = 0; |
504 | ||
505 | /* add the remaining entries in workerq to the end of mount vnode list */ | |
506 | if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { | |
507 | struct vnode * mvp; | |
508 | mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst); | |
509 | ||
510 | /* Joining the workerque entities to mount vnode list */ | |
511 | if (mvp) | |
512 | mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first; | |
513 | else | |
514 | mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first; | |
515 | mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last; | |
516 | mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last; | |
517 | TAILQ_INIT(&mp->mnt_workerqueue); | |
518 | } | |
519 | ||
520 | /* add the newvnodes to the head of mount vnode list */ | |
521 | if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) { | |
522 | struct vnode * nlvp; | |
523 | nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst); | |
524 | ||
525 | mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first; | |
526 | nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first; | |
527 | if(mp->mnt_vnodelist.tqh_first) | |
528 | mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next; | |
529 | else | |
530 | mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last; | |
531 | mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first; | |
532 | TAILQ_INIT(&mp->mnt_newvnodes); | |
533 | moved = 1; | |
534 | } | |
1c79356b | 535 | |
91447636 | 536 | return(moved); |
1c79356b A |
537 | } |
538 | ||
1c79356b | 539 | |
91447636 A |
540 | void |
541 | vnode_iterate_clear(mount_t mp) | |
542 | { | |
543 | mp->mnt_lflag &= ~MNT_LITER; | |
544 | if (mp->mnt_lflag & MNT_LITERWAIT) { | |
545 | mp->mnt_lflag &= ~MNT_LITERWAIT; | |
546 | wakeup(mp); | |
547 | } | |
548 | } | |
1c79356b | 549 | |
1c79356b | 550 | |
1c79356b | 551 | int |
2d21ac55 A |
552 | vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), |
553 | void *arg) | |
1c79356b | 554 | { |
1c79356b | 555 | struct vnode *vp; |
91447636 A |
556 | int vid, retval; |
557 | int ret = 0; | |
1c79356b | 558 | |
91447636 | 559 | mount_lock(mp); |
1c79356b | 560 | |
91447636 | 561 | vnode_iterate_setup(mp); |
1c79356b | 562 | |
91447636 A |
563 | /* it is returns 0 then there is nothing to do */ |
564 | retval = vnode_iterate_prepare(mp); | |
1c79356b | 565 | |
91447636 A |
566 | if (retval == 0) { |
567 | vnode_iterate_clear(mp); | |
568 | mount_unlock(mp); | |
569 | return(ret); | |
1c79356b | 570 | } |
91447636 A |
571 | |
572 | /* iterate over all the vnodes */ | |
573 | while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { | |
574 | vp = TAILQ_FIRST(&mp->mnt_workerqueue); | |
575 | TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes); | |
576 | TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes); | |
577 | vid = vp->v_id; | |
578 | if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) { | |
579 | continue; | |
580 | } | |
581 | mount_unlock(mp); | |
1c79356b | 582 | |
91447636 A |
583 | if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) { |
584 | mount_lock(mp); | |
585 | continue; | |
586 | } | |
587 | if (flags & VNODE_RELOAD) { | |
588 | /* | |
589 | * we're reloading the filesystem | |
590 | * cast out any inactive vnodes... | |
591 | */ | |
592 | if (vnode_reload(vp)) { | |
593 | /* vnode will be recycled on the refcount drop */ | |
594 | vnode_put(vp); | |
595 | mount_lock(mp); | |
596 | continue; | |
597 | } | |
598 | } | |
55e303ae | 599 | |
91447636 A |
600 | retval = callout(vp, arg); |
601 | ||
602 | switch (retval) { | |
603 | case VNODE_RETURNED: | |
604 | case VNODE_RETURNED_DONE: | |
605 | vnode_put(vp); | |
606 | if (retval == VNODE_RETURNED_DONE) { | |
607 | mount_lock(mp); | |
608 | ret = 0; | |
609 | goto out; | |
610 | } | |
611 | break; | |
612 | ||
613 | case VNODE_CLAIMED_DONE: | |
614 | mount_lock(mp); | |
615 | ret = 0; | |
616 | goto out; | |
617 | case VNODE_CLAIMED: | |
618 | default: | |
619 | break; | |
620 | } | |
621 | mount_lock(mp); | |
55e303ae | 622 | } |
1c79356b | 623 | |
91447636 A |
624 | out: |
625 | (void)vnode_iterate_reloadq(mp); | |
626 | vnode_iterate_clear(mp); | |
627 | mount_unlock(mp); | |
628 | return (ret); | |
629 | } | |
55e303ae | 630 | |
91447636 A |
631 | void |
632 | mount_lock_renames(mount_t mp) | |
633 | { | |
634 | lck_mtx_lock(&mp->mnt_renamelock); | |
1c79356b A |
635 | } |
636 | ||
1c79356b | 637 | void |
91447636 | 638 | mount_unlock_renames(mount_t mp) |
1c79356b | 639 | { |
91447636 A |
640 | lck_mtx_unlock(&mp->mnt_renamelock); |
641 | } | |
1c79356b | 642 | |
91447636 A |
643 | void |
644 | mount_lock(mount_t mp) | |
645 | { | |
646 | lck_mtx_lock(&mp->mnt_mlock); | |
1c79356b A |
647 | } |
648 | ||
91447636 A |
649 | void |
650 | mount_unlock(mount_t mp) | |
fa4905b1 | 651 | { |
91447636 | 652 | lck_mtx_unlock(&mp->mnt_mlock); |
fa4905b1 A |
653 | } |
654 | ||
91447636 | 655 | |
1c79356b | 656 | void |
91447636 | 657 | mount_ref(mount_t mp, int locked) |
1c79356b | 658 | { |
91447636 A |
659 | if ( !locked) |
660 | mount_lock(mp); | |
661 | ||
662 | mp->mnt_count++; | |
663 | ||
664 | if ( !locked) | |
665 | mount_unlock(mp); | |
1c79356b A |
666 | } |
667 | ||
91447636 A |
668 | |
669 | void | |
670 | mount_drop(mount_t mp, int locked) | |
671 | { | |
672 | if ( !locked) | |
673 | mount_lock(mp); | |
674 | ||
675 | mp->mnt_count--; | |
676 | ||
677 | if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) | |
678 | wakeup(&mp->mnt_lflag); | |
679 | ||
680 | if ( !locked) | |
681 | mount_unlock(mp); | |
682 | } | |
683 | ||
684 | ||
1c79356b | 685 | int |
91447636 | 686 | mount_iterref(mount_t mp, int locked) |
1c79356b | 687 | { |
91447636 | 688 | int retval = 0; |
1c79356b | 689 | |
91447636 A |
690 | if (!locked) |
691 | mount_list_lock(); | |
692 | if (mp->mnt_iterref < 0) { | |
693 | retval = 1; | |
694 | } else { | |
695 | mp->mnt_iterref++; | |
1c79356b | 696 | } |
91447636 A |
697 | if (!locked) |
698 | mount_list_unlock(); | |
699 | return(retval); | |
700 | } | |
1c79356b | 701 | |
91447636 A |
702 | int |
703 | mount_isdrained(mount_t mp, int locked) | |
704 | { | |
705 | int retval; | |
1c79356b | 706 | |
91447636 A |
707 | if (!locked) |
708 | mount_list_lock(); | |
709 | if (mp->mnt_iterref < 0) | |
710 | retval = 1; | |
711 | else | |
712 | retval = 0; | |
713 | if (!locked) | |
714 | mount_list_unlock(); | |
715 | return(retval); | |
716 | } | |
717 | ||
718 | void | |
719 | mount_iterdrop(mount_t mp) | |
720 | { | |
721 | mount_list_lock(); | |
722 | mp->mnt_iterref--; | |
723 | wakeup(&mp->mnt_iterref); | |
724 | mount_list_unlock(); | |
725 | } | |
726 | ||
727 | void | |
728 | mount_iterdrain(mount_t mp) | |
729 | { | |
730 | mount_list_lock(); | |
731 | while (mp->mnt_iterref) | |
2d21ac55 | 732 | msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL); |
91447636 A |
733 | /* mount iterations drained */ |
734 | mp->mnt_iterref = -1; | |
735 | mount_list_unlock(); | |
736 | } | |
737 | void | |
738 | mount_iterreset(mount_t mp) | |
739 | { | |
740 | mount_list_lock(); | |
741 | if (mp->mnt_iterref == -1) | |
742 | mp->mnt_iterref = 0; | |
743 | mount_list_unlock(); | |
744 | } | |
745 | ||
746 | /* always called with mount lock held */ | |
747 | int | |
748 | mount_refdrain(mount_t mp) | |
749 | { | |
750 | if (mp->mnt_lflag & MNT_LDRAIN) | |
751 | panic("already in drain"); | |
752 | mp->mnt_lflag |= MNT_LDRAIN; | |
753 | ||
754 | while (mp->mnt_count) | |
2d21ac55 | 755 | msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL); |
91447636 A |
756 | |
757 | if (mp->mnt_vnodelist.tqh_first != NULL) | |
758 | panic("mount_refdrain: dangling vnode"); | |
759 | ||
760 | mp->mnt_lflag &= ~MNT_LDRAIN; | |
761 | ||
762 | return(0); | |
763 | } | |
764 | ||
765 | ||
766 | /* | |
767 | * Mark a mount point as busy. Used to synchronize access and to delay | |
768 | * unmounting. | |
769 | */ | |
770 | int | |
771 | vfs_busy(mount_t mp, int flags) | |
772 | { | |
773 | ||
774 | restart: | |
775 | if (mp->mnt_lflag & MNT_LDEAD) | |
776 | return(ENOENT); | |
777 | ||
778 | if (mp->mnt_lflag & MNT_LUNMOUNT) { | |
779 | if (flags & LK_NOWAIT) | |
780 | return (ENOENT); | |
781 | ||
782 | mount_lock(mp); | |
783 | ||
784 | if (mp->mnt_lflag & MNT_LDEAD) { | |
785 | mount_unlock(mp); | |
786 | return(ENOENT); | |
787 | } | |
788 | if (mp->mnt_lflag & MNT_LUNMOUNT) { | |
789 | mp->mnt_lflag |= MNT_LWAIT; | |
1c79356b | 790 | /* |
91447636 A |
791 | * Since all busy locks are shared except the exclusive |
792 | * lock granted when unmounting, the only place that a | |
793 | * wakeup needs to be done is at the release of the | |
794 | * exclusive lock at the end of dounmount. | |
1c79356b | 795 | */ |
2d21ac55 | 796 | msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL); |
91447636 | 797 | return (ENOENT); |
1c79356b | 798 | } |
91447636 A |
799 | mount_unlock(mp); |
800 | } | |
801 | ||
802 | lck_rw_lock_shared(&mp->mnt_rwlock); | |
803 | ||
804 | /* | |
805 | * until we are granted the rwlock, it's possible for the mount point to | |
806 | * change state, so reevaluate before granting the vfs_busy | |
807 | */ | |
808 | if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) { | |
809 | lck_rw_done(&mp->mnt_rwlock); | |
810 | goto restart; | |
1c79356b | 811 | } |
1c79356b A |
812 | return (0); |
813 | } | |
814 | ||
91447636 A |
815 | /* |
816 | * Free a busy filesystem. | |
817 | */ | |
818 | ||
819 | void | |
820 | vfs_unbusy(mount_t mp) | |
821 | { | |
822 | lck_rw_done(&mp->mnt_rwlock); | |
823 | } | |
824 | ||
825 | ||
826 | ||
827 | static void | |
828 | vfs_rootmountfailed(mount_t mp) { | |
829 | ||
830 | mount_list_lock(); | |
831 | mp->mnt_vtable->vfc_refcount--; | |
832 | mount_list_unlock(); | |
833 | ||
834 | vfs_unbusy(mp); | |
835 | ||
836 | mount_lock_destroy(mp); | |
837 | ||
2d21ac55 A |
838 | #if CONFIG_MACF |
839 | mac_mount_label_destroy(mp); | |
840 | #endif | |
841 | ||
91447636 A |
842 | FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); |
843 | } | |
844 | ||
845 | /* | |
846 | * Lookup a filesystem type, and if found allocate and initialize | |
847 | * a mount structure for it. | |
848 | * | |
849 | * Devname is usually updated by mount(8) after booting. | |
850 | */ | |
851 | static mount_t | |
852 | vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname) | |
853 | { | |
854 | mount_t mp; | |
855 | ||
856 | mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); | |
857 | bzero((char *)mp, (u_long)sizeof(struct mount)); | |
858 | ||
859 | /* Initialize the default IO constraints */ | |
860 | mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; | |
861 | mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; | |
862 | mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt; | |
863 | mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt; | |
864 | mp->mnt_devblocksize = DEV_BSIZE; | |
2d21ac55 A |
865 | mp->mnt_alignmentmask = PAGE_MASK; |
866 | mp->mnt_ioflags = 0; | |
867 | mp->mnt_realrootvp = NULLVP; | |
868 | mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; | |
91447636 A |
869 | |
870 | mount_lock_init(mp); | |
871 | (void)vfs_busy(mp, LK_NOWAIT); | |
872 | ||
873 | TAILQ_INIT(&mp->mnt_vnodelist); | |
874 | TAILQ_INIT(&mp->mnt_workerqueue); | |
875 | TAILQ_INIT(&mp->mnt_newvnodes); | |
876 | ||
877 | mp->mnt_vtable = vfsp; | |
878 | mp->mnt_op = vfsp->vfc_vfsops; | |
879 | mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS; | |
880 | mp->mnt_vnodecovered = NULLVP; | |
881 | //mp->mnt_stat.f_type = vfsp->vfc_typenum; | |
882 | mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; | |
883 | ||
884 | mount_list_lock(); | |
885 | vfsp->vfc_refcount++; | |
886 | mount_list_unlock(); | |
887 | ||
888 | strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN); | |
889 | mp->mnt_vfsstat.f_mntonname[0] = '/'; | |
2d21ac55 A |
890 | /* XXX const poisoning layering violation */ |
891 | (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL); | |
91447636 | 892 | |
2d21ac55 A |
893 | #if CONFIG_MACF |
894 | mac_mount_label_init(mp); | |
895 | mac_mount_label_associate(vfs_context_kernel(), mp); | |
896 | #endif | |
91447636 A |
897 | return (mp); |
898 | } | |
899 | ||
900 | errno_t | |
901 | vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp) | |
902 | { | |
903 | struct vfstable *vfsp; | |
904 | ||
905 | for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) | |
2d21ac55 A |
906 | if (!strncmp(vfsp->vfc_name, fstypename, |
907 | sizeof(vfsp->vfc_name))) | |
91447636 A |
908 | break; |
909 | if (vfsp == NULL) | |
910 | return (ENODEV); | |
911 | ||
912 | *mpp = vfs_rootmountalloc_internal(vfsp, devname); | |
913 | ||
914 | if (*mpp) | |
915 | return (0); | |
916 | ||
917 | return (ENOMEM); | |
918 | } | |
919 | ||
920 | ||
921 | /* | |
922 | * Find an appropriate filesystem to use for the root. If a filesystem | |
923 | * has not been preselected, walk through the list of known filesystems | |
924 | * trying those that have mountroot routines, and try them until one | |
925 | * works or we have tried them all. | |
926 | */ | |
927 | extern int (*mountroot)(void); | |
928 | ||
929 | int | |
2d21ac55 | 930 | vfs_mountroot(void) |
91447636 | 931 | { |
2d21ac55 A |
932 | #if CONFIG_MACF |
933 | struct vnode *vp; | |
934 | #endif | |
91447636 | 935 | struct vfstable *vfsp; |
2d21ac55 A |
936 | vfs_context_t ctx = vfs_context_kernel(); |
937 | struct vfs_attr vfsattr; | |
91447636 A |
938 | int error; |
939 | mount_t mp; | |
2d21ac55 | 940 | vnode_t bdevvp_rootvp; |
91447636 A |
941 | |
942 | if (mountroot != NULL) { | |
2d21ac55 | 943 | /* |
91447636 A |
944 | * used for netboot which follows a different set of rules |
945 | */ | |
2d21ac55 | 946 | error = (*mountroot)(); |
91447636 A |
947 | return (error); |
948 | } | |
949 | if ((error = bdevvp(rootdev, &rootvp))) { | |
2d21ac55 | 950 | printf("vfs_mountroot: can't setup bdevvp\n"); |
91447636 A |
951 | return (error); |
952 | } | |
2d21ac55 A |
953 | /* |
954 | * 4951998 - code we call in vfc_mountroot may replace rootvp | |
955 | * so keep a local copy for some house keeping. | |
956 | */ | |
957 | bdevvp_rootvp = rootvp; | |
91447636 A |
958 | |
959 | for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { | |
960 | if (vfsp->vfc_mountroot == NULL) | |
961 | continue; | |
962 | ||
963 | mp = vfs_rootmountalloc_internal(vfsp, "root_device"); | |
964 | mp->mnt_devvp = rootvp; | |
965 | ||
2d21ac55 A |
966 | if ((error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx)) == 0) { |
967 | if ( bdevvp_rootvp != rootvp ) { | |
968 | /* | |
969 | * rootvp changed... | |
970 | * bump the iocount and fix up mnt_devvp for the | |
971 | * new rootvp (it will already have a usecount taken)... | |
972 | * drop the iocount and the usecount on the orignal | |
973 | * since we are no longer going to use it... | |
974 | */ | |
975 | vnode_getwithref(rootvp); | |
976 | mp->mnt_devvp = rootvp; | |
977 | ||
978 | vnode_rele(bdevvp_rootvp); | |
979 | vnode_put(bdevvp_rootvp); | |
980 | } | |
981 | mp->mnt_devvp->v_specflags |= SI_MOUNTEDON; | |
91447636 | 982 | |
2d21ac55 | 983 | vfs_unbusy(mp); |
91447636 A |
984 | |
985 | mount_list_add(mp); | |
986 | ||
987 | /* | |
988 | * cache the IO attributes for the underlying physical media... | |
989 | * an error return indicates the underlying driver doesn't | |
990 | * support all the queries necessary... however, reasonable | |
991 | * defaults will have been set, so no reason to bail or care | |
992 | */ | |
993 | vfs_init_io_attributes(rootvp, mp); | |
2d21ac55 A |
994 | |
995 | /* | |
996 | * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS. | |
997 | */ | |
998 | if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) { | |
999 | mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS; | |
1000 | } | |
1001 | if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) { | |
1002 | mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT; | |
1003 | } | |
1004 | ||
1005 | /* | |
1006 | * Probe root file system for additional features. | |
1007 | */ | |
1008 | (void)VFS_START(mp, 0, ctx); | |
1009 | ||
1010 | VFSATTR_INIT(&vfsattr); | |
1011 | VFSATTR_WANTED(&vfsattr, f_capabilities); | |
1012 | if (vfs_getattr(mp, &vfsattr, ctx) == 0 && | |
1013 | VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) { | |
1014 | if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) && | |
1015 | (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) { | |
1016 | mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS; | |
1017 | } | |
1018 | #if NAMEDSTREAMS | |
1019 | if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) && | |
1020 | (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) { | |
1021 | mp->mnt_kern_flag |= MNTK_NAMED_STREAMS; | |
1022 | } | |
1023 | #endif | |
1024 | if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) && | |
1025 | (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) { | |
1026 | mp->mnt_kern_flag |= MNTK_PATH_FROM_ID; | |
1027 | } | |
1028 | } | |
1029 | ||
91447636 A |
1030 | /* |
1031 | * get rid of iocount reference returned | |
2d21ac55 A |
1032 | * by bdevvp (or picked up by us on the substitued |
1033 | * rootvp)... it (or we) will have also taken | |
91447636 A |
1034 | * a usecount reference which we want to keep |
1035 | */ | |
1036 | vnode_put(rootvp); | |
1037 | ||
2d21ac55 A |
1038 | #if CONFIG_MACF |
1039 | if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) | |
1040 | return (0); | |
1041 | ||
1042 | error = VFS_ROOT(mp, &vp, ctx); | |
1043 | if (error) { | |
1044 | printf("%s() VFS_ROOT() returned %d\n", | |
1045 | __func__, error); | |
1046 | dounmount(mp, MNT_FORCE, 0, ctx); | |
1047 | goto fail; | |
1048 | } | |
1049 | ||
1050 | /* VFS_ROOT provides reference so flags = 0 */ | |
1051 | error = vnode_label(mp, NULL, vp, NULL, 0, ctx); | |
1052 | if (error) { | |
1053 | printf("%s() vnode_label() returned %d\n", | |
1054 | __func__, error); | |
1055 | dounmount(mp, MNT_FORCE, 0, ctx); | |
1056 | goto fail; | |
1057 | } | |
1058 | #endif | |
91447636 A |
1059 | return (0); |
1060 | } | |
2d21ac55 A |
1061 | #if CONFIG_MACF |
1062 | fail: | |
1063 | #endif | |
91447636 A |
1064 | vfs_rootmountfailed(mp); |
1065 | ||
1066 | if (error != EINVAL) | |
1067 | printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); | |
1068 | } | |
1069 | return (ENODEV); | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * Lookup a mount point by filesystem identifier. | |
1074 | */ | |
1075 | extern mount_t vfs_getvfs_locked(fsid_t *); | |
1076 | ||
1077 | struct mount * | |
2d21ac55 | 1078 | vfs_getvfs(fsid_t *fsid) |
91447636 A |
1079 | { |
1080 | return (mount_list_lookupby_fsid(fsid, 0, 0)); | |
1081 | } | |
1082 | ||
1083 | struct mount * | |
2d21ac55 | 1084 | vfs_getvfs_locked(fsid_t *fsid) |
91447636 A |
1085 | { |
1086 | return(mount_list_lookupby_fsid(fsid, 1, 0)); | |
1087 | } | |
1088 | ||
1089 | struct mount * | |
2d21ac55 | 1090 | vfs_getvfs_by_mntonname(char *path) |
91447636 A |
1091 | { |
1092 | mount_t retmp = (mount_t)0; | |
1093 | mount_t mp; | |
1094 | ||
1095 | mount_list_lock(); | |
1096 | TAILQ_FOREACH(mp, &mountlist, mnt_list) { | |
2d21ac55 A |
1097 | if (!strncmp(mp->mnt_vfsstat.f_mntonname, path, |
1098 | sizeof(mp->mnt_vfsstat.f_mntonname))) { | |
91447636 A |
1099 | retmp = mp; |
1100 | goto out; | |
1101 | } | |
1102 | } | |
1103 | out: | |
1104 | mount_list_unlock(); | |
1105 | return (retmp); | |
1106 | } | |
1107 | ||
1108 | /* generation number for creation of new fsids */ | |
1109 | u_short mntid_gen = 0; | |
1110 | /* | |
1111 | * Get a new unique fsid | |
1112 | */ | |
1113 | void | |
2d21ac55 | 1114 | vfs_getnewfsid(struct mount *mp) |
91447636 A |
1115 | { |
1116 | ||
1117 | fsid_t tfsid; | |
1118 | int mtype; | |
1119 | mount_t nmp; | |
1120 | ||
1121 | mount_list_lock(); | |
1122 | ||
1123 | /* generate a new fsid */ | |
1124 | mtype = mp->mnt_vtable->vfc_typenum; | |
1125 | if (++mntid_gen == 0) | |
1126 | mntid_gen++; | |
1127 | tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); | |
1128 | tfsid.val[1] = mtype; | |
1129 | ||
1130 | TAILQ_FOREACH(nmp, &mountlist, mnt_list) { | |
1131 | while (vfs_getvfs_locked(&tfsid)) { | |
1132 | if (++mntid_gen == 0) | |
1133 | mntid_gen++; | |
1134 | tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); | |
1135 | } | |
1136 | } | |
1137 | mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0]; | |
1138 | mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1]; | |
1139 | mount_list_unlock(); | |
1140 | } | |
1141 | ||
1142 | /* | |
1143 | * Routines having to do with the management of the vnode table. | |
1144 | */ | |
1145 | extern int (**dead_vnodeop_p)(void *); | |
2d21ac55 | 1146 | long numvnodes, freevnodes, deadvnodes; |
91447636 A |
1147 | |
1148 | ||
1149 | /* | |
1150 | * Move a vnode from one mount queue to another. | |
1151 | */ | |
1152 | static void | |
1153 | insmntque(vnode_t vp, mount_t mp) | |
1154 | { | |
1155 | mount_t lmp; | |
1156 | /* | |
1157 | * Delete from old mount point vnode list, if on one. | |
1158 | */ | |
3a60a9f5 | 1159 | if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) { |
91447636 A |
1160 | if ((vp->v_lflag & VNAMED_MOUNT) == 0) |
1161 | panic("insmntque: vp not in mount vnode list"); | |
1162 | vp->v_lflag &= ~VNAMED_MOUNT; | |
1163 | ||
1164 | mount_lock(lmp); | |
1165 | ||
1166 | mount_drop(lmp, 1); | |
1167 | ||
1168 | if (vp->v_mntvnodes.tqe_next == NULL) { | |
1169 | if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) | |
1170 | TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes); | |
1171 | else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) | |
1172 | TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes); | |
1173 | else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) | |
1174 | TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes); | |
1175 | } else { | |
1176 | vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev; | |
1177 | *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next; | |
1178 | } | |
2d21ac55 A |
1179 | vp->v_mntvnodes.tqe_next = NULL; |
1180 | vp->v_mntvnodes.tqe_prev = NULL; | |
91447636 A |
1181 | mount_unlock(lmp); |
1182 | return; | |
1183 | } | |
1184 | ||
1185 | /* | |
1186 | * Insert into list of vnodes for the new mount point, if available. | |
1187 | */ | |
1188 | if ((vp->v_mount = mp) != NULL) { | |
1189 | mount_lock(mp); | |
1190 | if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) | |
1191 | panic("vp already in mount list"); | |
1192 | if (mp->mnt_lflag & MNT_LITER) | |
1193 | TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes); | |
1194 | else | |
1195 | TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); | |
1196 | if (vp->v_lflag & VNAMED_MOUNT) | |
1197 | panic("insmntque: vp already in mount vnode list"); | |
1198 | if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) | |
1199 | panic("insmntque: vp on the free list\n"); | |
1200 | vp->v_lflag |= VNAMED_MOUNT; | |
1201 | mount_ref(mp, 1); | |
1202 | mount_unlock(mp); | |
1203 | } | |
1204 | } | |
1205 | ||
1206 | ||
1c79356b A |
1207 | /* |
1208 | * Create a vnode for a block device. | |
1209 | * Used for root filesystem, argdev, and swap areas. | |
1210 | * Also used for memory file system special devices. | |
1211 | */ | |
1212 | int | |
91447636 | 1213 | bdevvp(dev_t dev, vnode_t *vpp) |
1c79356b | 1214 | { |
91447636 A |
1215 | vnode_t nvp; |
1216 | int error; | |
1217 | struct vnode_fsparam vfsp; | |
1218 | struct vfs_context context; | |
1c79356b A |
1219 | |
1220 | if (dev == NODEV) { | |
1221 | *vpp = NULLVP; | |
1222 | return (ENODEV); | |
1223 | } | |
91447636 | 1224 | |
2d21ac55 | 1225 | context.vc_thread = current_thread(); |
91447636 A |
1226 | context.vc_ucred = FSCRED; |
1227 | ||
1228 | vfsp.vnfs_mp = (struct mount *)0; | |
1229 | vfsp.vnfs_vtype = VBLK; | |
1230 | vfsp.vnfs_str = "bdevvp"; | |
2d21ac55 A |
1231 | vfsp.vnfs_dvp = NULL; |
1232 | vfsp.vnfs_fsnode = NULL; | |
1233 | vfsp.vnfs_cnp = NULL; | |
91447636 A |
1234 | vfsp.vnfs_vops = spec_vnodeop_p; |
1235 | vfsp.vnfs_rdev = dev; | |
1236 | vfsp.vnfs_filesize = 0; | |
1237 | ||
1238 | vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE; | |
1239 | ||
1240 | vfsp.vnfs_marksystem = 0; | |
1241 | vfsp.vnfs_markroot = 0; | |
1242 | ||
1243 | if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) { | |
1c79356b A |
1244 | *vpp = NULLVP; |
1245 | return (error); | |
1246 | } | |
2d21ac55 A |
1247 | vnode_lock_spin(nvp); |
1248 | nvp->v_flag |= VBDEVVP; | |
1249 | nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */ | |
1250 | vnode_unlock(nvp); | |
91447636 A |
1251 | if ( (error = vnode_ref(nvp)) ) { |
1252 | panic("bdevvp failed: vnode_ref"); | |
1253 | return (error); | |
1c79356b | 1254 | } |
91447636 A |
1255 | if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) { |
1256 | panic("bdevvp failed: fsync"); | |
1257 | return (error); | |
1258 | } | |
1259 | if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) { | |
1260 | panic("bdevvp failed: invalidateblks"); | |
1261 | return (error); | |
1262 | } | |
2d21ac55 A |
1263 | |
1264 | #if CONFIG_MACF | |
1265 | /* | |
1266 | * XXXMAC: We can't put a MAC check here, the system will | |
1267 | * panic without this vnode. | |
1268 | */ | |
1269 | #endif /* MAC */ | |
1270 | ||
91447636 A |
1271 | if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) { |
1272 | panic("bdevvp failed: open"); | |
1273 | return (error); | |
1274 | } | |
1275 | *vpp = nvp; | |
1276 | ||
1c79356b A |
1277 | return (0); |
1278 | } | |
1279 | ||
1280 | /* | |
1281 | * Check to see if the new vnode represents a special device | |
1282 | * for which we already have a vnode (either because of | |
1283 | * bdevvp() or because of a different vnode representing | |
1284 | * the same block device). If such an alias exists, deallocate | |
1285 | * the existing contents and return the aliased vnode. The | |
1286 | * caller is responsible for filling it with its new contents. | |
1287 | */ | |
91447636 | 1288 | static vnode_t |
2d21ac55 | 1289 | checkalias(struct vnode *nvp, dev_t nvp_rdev) |
1c79356b | 1290 | { |
1c79356b A |
1291 | struct vnode *vp; |
1292 | struct vnode **vpp; | |
91447636 | 1293 | int vid = 0; |
1c79356b | 1294 | |
1c79356b A |
1295 | vpp = &speclisth[SPECHASH(nvp_rdev)]; |
1296 | loop: | |
91447636 A |
1297 | SPECHASH_LOCK(); |
1298 | ||
1c79356b | 1299 | for (vp = *vpp; vp; vp = vp->v_specnext) { |
91447636 A |
1300 | if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) { |
1301 | vid = vp->v_id; | |
1302 | break; | |
1303 | } | |
1304 | } | |
1305 | SPECHASH_UNLOCK(); | |
1306 | ||
1307 | if (vp) { | |
1308 | if (vnode_getwithvid(vp,vid)) { | |
1309 | goto loop; | |
1310 | } | |
1311 | /* | |
1312 | * Termination state is checked in vnode_getwithvid | |
1313 | */ | |
1314 | vnode_lock(vp); | |
1315 | ||
1c79356b A |
1316 | /* |
1317 | * Alias, but not in use, so flush it out. | |
1318 | */ | |
91447636 | 1319 | if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) { |
cf7d32b8 A |
1320 | vnode_reclaim_internal(vp, 1, 1, 0); |
1321 | vnode_put_locked(vp); | |
91447636 | 1322 | vnode_unlock(vp); |
1c79356b A |
1323 | goto loop; |
1324 | } | |
1c79356b A |
1325 | } |
1326 | if (vp == NULL || vp->v_tag != VT_NON) { | |
2d21ac55 | 1327 | retnullvp: |
91447636 A |
1328 | MALLOC_ZONE(nvp->v_specinfo, struct specinfo *, sizeof(struct specinfo), |
1329 | M_SPECINFO, M_WAITOK); | |
1c79356b A |
1330 | bzero(nvp->v_specinfo, sizeof(struct specinfo)); |
1331 | nvp->v_rdev = nvp_rdev; | |
91447636 A |
1332 | nvp->v_specflags = 0; |
1333 | nvp->v_speclastr = -1; | |
1334 | ||
1335 | SPECHASH_LOCK(); | |
1c79356b A |
1336 | nvp->v_hashchain = vpp; |
1337 | nvp->v_specnext = *vpp; | |
1c79356b | 1338 | *vpp = nvp; |
91447636 A |
1339 | SPECHASH_UNLOCK(); |
1340 | ||
1c79356b A |
1341 | if (vp != NULLVP) { |
1342 | nvp->v_flag |= VALIASED; | |
1343 | vp->v_flag |= VALIASED; | |
cf7d32b8 | 1344 | vnode_put_locked(vp); |
91447636 | 1345 | vnode_unlock(vp); |
1c79356b | 1346 | } |
1c79356b A |
1347 | return (NULLVP); |
1348 | } | |
2d21ac55 A |
1349 | if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) |
1350 | return(vp); | |
1351 | else { | |
1352 | panic("checkalias with VT_NON vp that shouldn't: %x", (unsigned int)vp); | |
1353 | goto retnullvp; | |
1354 | } | |
1c79356b A |
1355 | return (vp); |
1356 | } | |
1357 | ||
91447636 | 1358 | |
1c79356b | 1359 | /* |
0b4e3aa0 A |
1360 | * Get a reference on a particular vnode and lock it if requested. |
1361 | * If the vnode was on the inactive list, remove it from the list. | |
1362 | * If the vnode was on the free list, remove it from the list and | |
1363 | * move it to inactive list as needed. | |
1364 | * The vnode lock bit is set if the vnode is being eliminated in | |
1365 | * vgone. The process is awakened when the transition is completed, | |
1366 | * and an error returned to indicate that the vnode is no longer | |
1367 | * usable (possibly having been changed to a new file system type). | |
1c79356b | 1368 | */ |
91447636 A |
1369 | static int |
1370 | vget_internal(vnode_t vp, int vid, int vflags) | |
1c79356b A |
1371 | { |
1372 | int error = 0; | |
2d21ac55 | 1373 | int vpid; |
55e303ae | 1374 | |
2d21ac55 | 1375 | vnode_lock_spin(vp); |
55e303ae | 1376 | |
91447636 A |
1377 | if (vflags & VNODE_WITHID) |
1378 | vpid = vid; | |
1379 | else | |
1380 | vpid = vp->v_id; // save off the original v_id | |
0b4e3aa0 | 1381 | |
91447636 A |
1382 | if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) |
1383 | /* | |
1384 | * vnode to be returned only if it has writers opened | |
1385 | */ | |
1386 | error = EINVAL; | |
1387 | else | |
2d21ac55 | 1388 | error = vnode_getiocount(vp, vpid, vflags); |
55e303ae | 1389 | |
91447636 | 1390 | vnode_unlock(vp); |
55e303ae | 1391 | |
0b4e3aa0 A |
1392 | return (error); |
1393 | } | |
1394 | ||
2d21ac55 A |
1395 | /* |
1396 | * Returns: 0 Success | |
1397 | * ENOENT No such file or directory [terminating] | |
1398 | */ | |
1c79356b | 1399 | int |
91447636 | 1400 | vnode_ref(vnode_t vp) |
1c79356b | 1401 | { |
1c79356b | 1402 | |
91447636 | 1403 | return (vnode_ref_ext(vp, 0)); |
1c79356b A |
1404 | } |
1405 | ||
2d21ac55 A |
1406 | /* |
1407 | * Returns: 0 Success | |
1408 | * ENOENT No such file or directory [terminating] | |
1409 | */ | |
1c79356b | 1410 | int |
91447636 | 1411 | vnode_ref_ext(vnode_t vp, int fmode) |
1c79356b | 1412 | { |
91447636 | 1413 | int error = 0; |
1c79356b | 1414 | |
2d21ac55 | 1415 | vnode_lock_spin(vp); |
1c79356b | 1416 | |
91447636 A |
1417 | /* |
1418 | * once all the current call sites have been fixed to insure they have | |
1419 | * taken an iocount, we can toughen this assert up and insist that the | |
1420 | * iocount is non-zero... a non-zero usecount doesn't insure correctness | |
1421 | */ | |
1422 | if (vp->v_iocount <= 0 && vp->v_usecount <= 0) | |
2d21ac55 | 1423 | panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount); |
1c79356b | 1424 | |
91447636 A |
1425 | /* |
1426 | * if you are the owner of drain/termination, can acquire usecount | |
1427 | */ | |
1428 | if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) { | |
1429 | if (vp->v_owner != current_thread()) { | |
1430 | error = ENOENT; | |
1431 | goto out; | |
1432 | } | |
1433 | } | |
1434 | vp->v_usecount++; | |
1c79356b | 1435 | |
91447636 A |
1436 | if (fmode & FWRITE) { |
1437 | if (++vp->v_writecount <= 0) | |
1438 | panic("vnode_ref_ext: v_writecount"); | |
55e303ae | 1439 | } |
91447636 A |
1440 | if (fmode & O_EVTONLY) { |
1441 | if (++vp->v_kusecount <= 0) | |
1442 | panic("vnode_ref_ext: v_kusecount"); | |
55e303ae | 1443 | } |
2d21ac55 A |
1444 | if (vp->v_flag & VRAGE) { |
1445 | struct uthread *ut; | |
1446 | ||
1447 | ut = get_bsdthread_info(current_thread()); | |
1448 | ||
1449 | if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) && | |
1450 | !(ut->uu_flag & UT_RAGE_VNODES)) { | |
1451 | /* | |
1452 | * a 'normal' process accessed this vnode | |
1453 | * so make sure its no longer marked | |
1454 | * for rapid aging... also, make sure | |
1455 | * it gets removed from the rage list... | |
1456 | * when v_usecount drops back to 0, it | |
1457 | * will be put back on the real free list | |
1458 | */ | |
1459 | vp->v_flag &= ~VRAGE; | |
1460 | vp->v_references = 0; | |
1461 | vnode_list_remove(vp); | |
1462 | } | |
1463 | } | |
91447636 A |
1464 | out: |
1465 | vnode_unlock(vp); | |
1466 | ||
1467 | return (error); | |
55e303ae A |
1468 | } |
1469 | ||
1470 | ||
1c79356b A |
1471 | /* |
1472 | * put the vnode on appropriate free list. | |
91447636 | 1473 | * called with vnode LOCKED |
1c79356b A |
1474 | */ |
1475 | static void | |
91447636 | 1476 | vnode_list_add(vnode_t vp) |
1c79356b A |
1477 | { |
1478 | /* | |
91447636 | 1479 | * if it is already on a list or non zero references return |
1c79356b | 1480 | */ |
91447636 | 1481 | if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0)) |
1c79356b | 1482 | return; |
91447636 | 1483 | vnode_list_lock(); |
1c79356b | 1484 | |
2d21ac55 A |
1485 | if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) { |
1486 | /* | |
1487 | * add the new guy to the appropriate end of the RAGE list | |
1488 | */ | |
1489 | if ((vp->v_flag & VAGE)) | |
1490 | TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist); | |
1491 | else | |
1492 | TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist); | |
1493 | ||
1494 | vp->v_listflag |= VLIST_RAGE; | |
1495 | ragevnodes++; | |
1496 | ||
1497 | /* | |
1498 | * reset the timestamp for the last inserted vp on the RAGE | |
1499 | * queue to let new_vnode know that its not ok to start stealing | |
1500 | * from this list... as long as we're actively adding to this list | |
1501 | * we'll push out the vnodes we want to donate to the real free list | |
1502 | * once we stop pushing, we'll let some time elapse before we start | |
1503 | * stealing them in the new_vnode routine | |
1504 | */ | |
1505 | microuptime(&rage_tv); | |
91447636 | 1506 | } else { |
2d21ac55 A |
1507 | /* |
1508 | * if VL_DEAD, insert it at head of the dead list | |
1509 | * else insert at tail of LRU list or at head if VAGE is set | |
1510 | */ | |
1511 | if ( (vp->v_lflag & VL_DEAD)) { | |
1512 | TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist); | |
1513 | vp->v_listflag |= VLIST_DEAD; | |
1514 | deadvnodes++; | |
1515 | } else if ((vp->v_flag & VAGE)) { | |
1516 | TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); | |
1517 | vp->v_flag &= ~VAGE; | |
1518 | freevnodes++; | |
1519 | } else { | |
1520 | TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); | |
1521 | freevnodes++; | |
1522 | } | |
91447636 | 1523 | } |
91447636 | 1524 | vnode_list_unlock(); |
1c79356b A |
1525 | } |
1526 | ||
cf7d32b8 A |
1527 | |
1528 | /* | |
1529 | * remove the vnode from appropriate free list. | |
1530 | * called with vnode LOCKED and | |
1531 | * the list lock held | |
1532 | */ | |
1533 | static void | |
1534 | vnode_list_remove_locked(vnode_t vp) | |
1535 | { | |
1536 | if (VONLIST(vp)) { | |
1537 | /* | |
1538 | * the v_listflag field is | |
1539 | * protected by the vnode_list_lock | |
1540 | */ | |
1541 | if (vp->v_listflag & VLIST_RAGE) | |
1542 | VREMRAGE("vnode_list_remove", vp); | |
1543 | else if (vp->v_listflag & VLIST_DEAD) | |
1544 | VREMDEAD("vnode_list_remove", vp); | |
1545 | else | |
1546 | VREMFREE("vnode_list_remove", vp); | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | ||
1c79356b | 1551 | /* |
91447636 | 1552 | * remove the vnode from appropriate free list. |
cf7d32b8 | 1553 | * called with vnode LOCKED |
1c79356b A |
1554 | */ |
1555 | static void | |
91447636 | 1556 | vnode_list_remove(vnode_t vp) |
1c79356b | 1557 | { |
91447636 A |
1558 | /* |
1559 | * we want to avoid taking the list lock | |
1560 | * in the case where we're not on the free | |
1561 | * list... this will be true for most | |
1562 | * directories and any currently in use files | |
1563 | * | |
1564 | * we're guaranteed that we can't go from | |
1565 | * the not-on-list state to the on-list | |
1566 | * state since we hold the vnode lock... | |
1567 | * all calls to vnode_list_add are done | |
1568 | * under the vnode lock... so we can | |
1569 | * check for that condition (the prevelant one) | |
1570 | * without taking the list lock | |
1571 | */ | |
1572 | if (VONLIST(vp)) { | |
1573 | vnode_list_lock(); | |
1574 | /* | |
1575 | * however, we're not guaranteed that | |
1576 | * we won't go from the on-list state | |
cf7d32b8 | 1577 | * to the not-on-list state until we |
91447636 | 1578 | * hold the vnode_list_lock... this |
cf7d32b8 | 1579 | * is due to "new_vnode" removing vnodes |
91447636 A |
1580 | * from the free list uder the list_lock |
1581 | * w/o the vnode lock... so we need to | |
1582 | * check again whether we're currently | |
1583 | * on the free list | |
1584 | */ | |
cf7d32b8 | 1585 | vnode_list_remove_locked(vp); |
2d21ac55 | 1586 | |
91447636 A |
1587 | vnode_list_unlock(); |
1588 | } | |
1c79356b A |
1589 | } |
1590 | ||
1591 | ||
1c79356b | 1592 | void |
91447636 | 1593 | vnode_rele(vnode_t vp) |
1c79356b | 1594 | { |
91447636 A |
1595 | vnode_rele_internal(vp, 0, 0, 0); |
1596 | } | |
1c79356b | 1597 | |
1c79356b | 1598 | |
91447636 A |
1599 | void |
1600 | vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter) | |
1601 | { | |
1602 | vnode_rele_internal(vp, fmode, dont_reenter, 0); | |
1c79356b A |
1603 | } |
1604 | ||
91447636 | 1605 | |
1c79356b | 1606 | void |
91447636 | 1607 | vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) |
1c79356b | 1608 | { |
91447636 | 1609 | if ( !locked) |
2d21ac55 | 1610 | vnode_lock_spin(vp); |
1c79356b | 1611 | |
91447636 | 1612 | if (--vp->v_usecount < 0) |
2d21ac55 | 1613 | panic("vnode_rele_ext: vp %p usecount -ve : %d", vp, vp->v_usecount); |
91447636 A |
1614 | |
1615 | if (fmode & FWRITE) { | |
1616 | if (--vp->v_writecount < 0) | |
2d21ac55 | 1617 | panic("vnode_rele_ext: vp %p writecount -ve : %ld", vp, vp->v_writecount); |
1c79356b | 1618 | } |
91447636 A |
1619 | if (fmode & O_EVTONLY) { |
1620 | if (--vp->v_kusecount < 0) | |
2d21ac55 | 1621 | panic("vnode_rele_ext: vp %p kusecount -ve : %d", vp, vp->v_kusecount); |
1c79356b | 1622 | } |
2d21ac55 A |
1623 | if (vp->v_kusecount > vp->v_usecount) |
1624 | panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d)\n",vp, vp->v_kusecount, vp->v_usecount); | |
91447636 A |
1625 | if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) { |
1626 | /* | |
1627 | * vnode is still busy... if we're the last | |
1628 | * usecount, mark for a future call to VNOP_INACTIVE | |
1629 | * when the iocount finally drops to 0 | |
1630 | */ | |
1631 | if (vp->v_usecount == 0) { | |
1632 | vp->v_lflag |= VL_NEEDINACTIVE; | |
2d21ac55 | 1633 | vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT); |
91447636 A |
1634 | } |
1635 | if ( !locked) | |
1636 | vnode_unlock(vp); | |
1c79356b A |
1637 | return; |
1638 | } | |
2d21ac55 | 1639 | vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT); |
91447636 A |
1640 | |
1641 | if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) { | |
1642 | /* | |
1643 | * vnode is being cleaned, or | |
1644 | * we've requested that we don't reenter | |
1645 | * the filesystem on this release... in | |
1646 | * this case, we'll mark the vnode aged | |
1647 | * if it's been marked for termination | |
1c79356b | 1648 | */ |
91447636 A |
1649 | if (dont_reenter) { |
1650 | if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) | |
1651 | vp->v_lflag |= VL_NEEDINACTIVE; | |
1652 | vp->v_flag |= VAGE; | |
1c79356b | 1653 | } |
91447636 A |
1654 | vnode_list_add(vp); |
1655 | if ( !locked) | |
1656 | vnode_unlock(vp); | |
1657 | return; | |
1c79356b | 1658 | } |
91447636 A |
1659 | /* |
1660 | * at this point both the iocount and usecount | |
1661 | * are zero | |
1662 | * pick up an iocount so that we can call | |
1663 | * VNOP_INACTIVE with the vnode lock unheld | |
1664 | */ | |
1665 | vp->v_iocount++; | |
1666 | #ifdef JOE_DEBUG | |
1667 | record_vp(vp, 1); | |
1c79356b | 1668 | #endif |
91447636 A |
1669 | vp->v_lflag &= ~VL_NEEDINACTIVE; |
1670 | vnode_unlock(vp); | |
1c79356b | 1671 | |
2d21ac55 | 1672 | VNOP_INACTIVE(vp, vfs_context_current()); |
1c79356b | 1673 | |
2d21ac55 | 1674 | vnode_lock_spin(vp); |
91447636 A |
1675 | /* |
1676 | * because we dropped the vnode lock to call VNOP_INACTIVE | |
1677 | * the state of the vnode may have changed... we may have | |
1678 | * picked up an iocount, usecount or the MARKTERM may have | |
1679 | * been set... we need to reevaluate the reference counts | |
1680 | * to determine if we can call vnode_reclaim_internal at | |
1681 | * this point... if the reference counts are up, we'll pick | |
1682 | * up the MARKTERM state when they get subsequently dropped | |
1683 | */ | |
1684 | if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && | |
1685 | ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { | |
1686 | struct uthread *ut; | |
1c79356b | 1687 | |
91447636 A |
1688 | ut = get_bsdthread_info(current_thread()); |
1689 | ||
1690 | if (ut->uu_defer_reclaims) { | |
1691 | vp->v_defer_reclaimlist = ut->uu_vreclaims; | |
1692 | ut->uu_vreclaims = vp; | |
1693 | goto defer_reclaim; | |
1694 | } | |
2d21ac55 | 1695 | vnode_lock_convert(vp); |
cf7d32b8 | 1696 | vnode_reclaim_internal(vp, 1, 1, 0); |
91447636 | 1697 | } |
2d21ac55 | 1698 | vnode_dropiocount(vp); |
91447636 A |
1699 | vnode_list_add(vp); |
1700 | defer_reclaim: | |
1701 | if ( !locked) | |
1702 | vnode_unlock(vp); | |
1703 | return; | |
1c79356b A |
1704 | } |
1705 | ||
1706 | /* | |
1707 | * Remove any vnodes in the vnode table belonging to mount point mp. | |
1708 | * | |
1709 | * If MNT_NOFORCE is specified, there should not be any active ones, | |
1710 | * return error if any are found (nb: this is a user error, not a | |
1711 | * system error). If MNT_FORCE is specified, detach any active vnodes | |
1712 | * that are found. | |
1713 | */ | |
1714 | #if DIAGNOSTIC | |
1715 | int busyprt = 0; /* print out busy vnodes */ | |
1716 | #if 0 | |
1717 | struct ctldebug debug1 = { "busyprt", &busyprt }; | |
1718 | #endif /* 0 */ | |
1719 | #endif | |
1720 | ||
1721 | int | |
2d21ac55 | 1722 | vflush(struct mount *mp, struct vnode *skipvp, int flags) |
1c79356b | 1723 | { |
91447636 | 1724 | struct vnode *vp; |
1c79356b | 1725 | int busy = 0; |
91447636 | 1726 | int reclaimed = 0; |
2d21ac55 A |
1727 | int retval; |
1728 | int vid; | |
1c79356b | 1729 | |
91447636 A |
1730 | mount_lock(mp); |
1731 | vnode_iterate_setup(mp); | |
1732 | /* | |
1733 | * On regular unmounts(not forced) do a | |
1734 | * quick check for vnodes to be in use. This | |
1735 | * preserves the caching of vnodes. automounter | |
1736 | * tries unmounting every so often to see whether | |
1737 | * it is still busy or not. | |
1738 | */ | |
2d21ac55 | 1739 | if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) { |
91447636 A |
1740 | if (vnode_umount_preflight(mp, skipvp, flags)) { |
1741 | vnode_iterate_clear(mp); | |
1742 | mount_unlock(mp); | |
1743 | return(EBUSY); | |
1744 | } | |
1745 | } | |
1c79356b | 1746 | loop: |
91447636 A |
1747 | /* it is returns 0 then there is nothing to do */ |
1748 | retval = vnode_iterate_prepare(mp); | |
1749 | ||
1750 | if (retval == 0) { | |
1751 | vnode_iterate_clear(mp); | |
1752 | mount_unlock(mp); | |
1753 | return(retval); | |
1754 | } | |
1755 | ||
1756 | /* iterate over all the vnodes */ | |
1757 | while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { | |
1758 | vp = TAILQ_FIRST(&mp->mnt_workerqueue); | |
1759 | TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes); | |
1760 | TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes); | |
1761 | if ( (vp->v_mount != mp) || (vp == skipvp)) { | |
1762 | continue; | |
1763 | } | |
1764 | vid = vp->v_id; | |
1765 | mount_unlock(mp); | |
1766 | vnode_lock(vp); | |
1767 | ||
1768 | if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) { | |
1769 | vnode_unlock(vp); | |
1770 | mount_lock(mp); | |
1771 | continue; | |
1772 | } | |
1773 | ||
1c79356b | 1774 | /* |
91447636 A |
1775 | * If requested, skip over vnodes marked VSYSTEM. |
1776 | * Skip over all vnodes marked VNOFLUSH. | |
1777 | */ | |
1778 | if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || | |
1779 | (vp->v_flag & VNOFLUSH))) { | |
1780 | vnode_unlock(vp); | |
1781 | mount_lock(mp); | |
1c79356b | 1782 | continue; |
91447636 | 1783 | } |
1c79356b | 1784 | /* |
91447636 | 1785 | * If requested, skip over vnodes marked VSWAP. |
1c79356b | 1786 | */ |
91447636 A |
1787 | if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) { |
1788 | vnode_unlock(vp); | |
1789 | mount_lock(mp); | |
1c79356b A |
1790 | continue; |
1791 | } | |
1792 | /* | |
91447636 | 1793 | * If requested, skip over vnodes marked VSWAP. |
1c79356b | 1794 | */ |
91447636 A |
1795 | if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) { |
1796 | vnode_unlock(vp); | |
1797 | mount_lock(mp); | |
1c79356b A |
1798 | continue; |
1799 | } | |
1800 | /* | |
1801 | * If WRITECLOSE is set, only flush out regular file | |
1802 | * vnodes open for writing. | |
1803 | */ | |
1804 | if ((flags & WRITECLOSE) && | |
1805 | (vp->v_writecount == 0 || vp->v_type != VREG)) { | |
91447636 A |
1806 | vnode_unlock(vp); |
1807 | mount_lock(mp); | |
1c79356b A |
1808 | continue; |
1809 | } | |
1810 | /* | |
91447636 | 1811 | * If the real usecount is 0, all we need to do is clear |
1c79356b A |
1812 | * out the vnode data structures and we are done. |
1813 | */ | |
91447636 A |
1814 | if (((vp->v_usecount == 0) || |
1815 | ((vp->v_usecount - vp->v_kusecount) == 0))) { | |
1816 | vp->v_iocount++; /* so that drain waits for * other iocounts */ | |
1817 | #ifdef JOE_DEBUG | |
1818 | record_vp(vp, 1); | |
1819 | #endif | |
cf7d32b8 | 1820 | vnode_reclaim_internal(vp, 1, 1, 0); |
2d21ac55 | 1821 | vnode_dropiocount(vp); |
91447636 | 1822 | vnode_list_add(vp); |
91447636 | 1823 | vnode_unlock(vp); |
cf7d32b8 | 1824 | |
91447636 A |
1825 | reclaimed++; |
1826 | mount_lock(mp); | |
1c79356b A |
1827 | continue; |
1828 | } | |
1829 | /* | |
1830 | * If FORCECLOSE is set, forcibly close the vnode. | |
1831 | * For block or character devices, revert to an | |
1832 | * anonymous device. For all other files, just kill them. | |
1833 | */ | |
1834 | if (flags & FORCECLOSE) { | |
1c79356b | 1835 | if (vp->v_type != VBLK && vp->v_type != VCHR) { |
91447636 A |
1836 | vp->v_iocount++; /* so that drain waits * for other iocounts */ |
1837 | #ifdef JOE_DEBUG | |
1838 | record_vp(vp, 1); | |
1839 | #endif | |
cf7d32b8 | 1840 | vnode_reclaim_internal(vp, 1, 1, 0); |
2d21ac55 | 1841 | vnode_dropiocount(vp); |
91447636 A |
1842 | vnode_list_add(vp); |
1843 | vnode_unlock(vp); | |
1c79356b | 1844 | } else { |
2d21ac55 | 1845 | vclean(vp, 0); |
91447636 | 1846 | vp->v_lflag &= ~VL_DEAD; |
1c79356b | 1847 | vp->v_op = spec_vnodeop_p; |
2d21ac55 | 1848 | vp->v_flag |= VDEVFLUSH; |
91447636 | 1849 | vnode_unlock(vp); |
1c79356b | 1850 | } |
91447636 | 1851 | mount_lock(mp); |
1c79356b A |
1852 | continue; |
1853 | } | |
1854 | #if DIAGNOSTIC | |
1855 | if (busyprt) | |
1856 | vprint("vflush: busy vnode", vp); | |
1857 | #endif | |
91447636 A |
1858 | vnode_unlock(vp); |
1859 | mount_lock(mp); | |
1c79356b A |
1860 | busy++; |
1861 | } | |
91447636 A |
1862 | |
1863 | /* At this point the worker queue is completed */ | |
1864 | if (busy && ((flags & FORCECLOSE)==0) && reclaimed) { | |
1865 | busy = 0; | |
1866 | reclaimed = 0; | |
1867 | (void)vnode_iterate_reloadq(mp); | |
1868 | /* returned with mount lock held */ | |
1869 | goto loop; | |
1870 | } | |
1871 | ||
1872 | /* if new vnodes were created in between retry the reclaim */ | |
1873 | if ( vnode_iterate_reloadq(mp) != 0) { | |
1874 | if (!(busy && ((flags & FORCECLOSE)==0))) | |
1875 | goto loop; | |
1876 | } | |
1877 | vnode_iterate_clear(mp); | |
1878 | mount_unlock(mp); | |
1879 | ||
9bccf70c | 1880 | if (busy && ((flags & FORCECLOSE)==0)) |
1c79356b A |
1881 | return (EBUSY); |
1882 | return (0); | |
1883 | } | |
1884 | ||
2d21ac55 | 1885 | long num_recycledvnodes = 0; /* long for OSAddAtomic */ |
1c79356b A |
1886 | /* |
1887 | * Disassociate the underlying file system from a vnode. | |
91447636 | 1888 | * The vnode lock is held on entry. |
1c79356b A |
1889 | */ |
1890 | static void | |
2d21ac55 | 1891 | vclean(vnode_t vp, int flags) |
1c79356b | 1892 | { |
2d21ac55 | 1893 | vfs_context_t ctx = vfs_context_current(); |
1c79356b | 1894 | int active; |
91447636 A |
1895 | int need_inactive; |
1896 | int already_terminating; | |
2d21ac55 | 1897 | int clflags = 0; |
1c79356b | 1898 | |
cf7d32b8 A |
1899 | #if NAMEDSTREAMS |
1900 | int is_namedstream; | |
1901 | #endif | |
1902 | ||
1c79356b A |
1903 | /* |
1904 | * Check to see if the vnode is in use. | |
1905 | * If so we have to reference it before we clean it out | |
1906 | * so that its count cannot fall to zero and generate a | |
1907 | * race against ourselves to recycle it. | |
1908 | */ | |
91447636 | 1909 | active = vp->v_usecount; |
55e303ae | 1910 | |
91447636 A |
1911 | /* |
1912 | * just in case we missed sending a needed | |
1913 | * VNOP_INACTIVE, we'll do it now | |
1914 | */ | |
1915 | need_inactive = (vp->v_lflag & VL_NEEDINACTIVE); | |
1916 | ||
1917 | vp->v_lflag &= ~VL_NEEDINACTIVE; | |
55e303ae | 1918 | |
1c79356b A |
1919 | /* |
1920 | * Prevent the vnode from being recycled or | |
1921 | * brought into use while we clean it out. | |
1922 | */ | |
91447636 A |
1923 | already_terminating = (vp->v_lflag & VL_TERMINATE); |
1924 | ||
1925 | vp->v_lflag |= VL_TERMINATE; | |
1c79356b A |
1926 | |
1927 | /* | |
91447636 A |
1928 | * remove the vnode from any mount list |
1929 | * it might be on... | |
1c79356b | 1930 | */ |
91447636 | 1931 | insmntque(vp, (struct mount *)0); |
1c79356b | 1932 | |
cf7d32b8 A |
1933 | #if NAMEDSTREAMS |
1934 | is_namedstream = vnode_isnamedstream(vp); | |
1935 | #endif | |
1936 | ||
91447636 A |
1937 | vnode_unlock(vp); |
1938 | ||
91447636 | 1939 | OSAddAtomic(1, &num_recycledvnodes); |
1c79356b | 1940 | /* |
91447636 | 1941 | * purge from the name cache as early as possible... |
1c79356b | 1942 | */ |
91447636 | 1943 | cache_purge(vp); |
1c79356b | 1944 | |
2d21ac55 A |
1945 | if (flags & DOCLOSE) |
1946 | clflags |= IO_NDELAY; | |
1947 | if (flags & REVOKEALL) | |
1948 | clflags |= IO_REVOKE; | |
1949 | ||
0b4e3aa0 | 1950 | if (active && (flags & DOCLOSE)) |
2d21ac55 | 1951 | VNOP_CLOSE(vp, clflags, ctx); |
1c79356b A |
1952 | |
1953 | /* | |
1954 | * Clean out any buffers associated with the vnode. | |
1955 | */ | |
1956 | if (flags & DOCLOSE) { | |
91447636 | 1957 | #if NFSCLIENT |
1c79356b | 1958 | if (vp->v_tag == VT_NFS) |
2d21ac55 | 1959 | nfs_vinvalbuf(vp, V_SAVE, ctx, 0); |
55e303ae | 1960 | else |
91447636 A |
1961 | #endif |
1962 | { | |
2d21ac55 | 1963 | VNOP_FSYNC(vp, MNT_WAIT, ctx); |
91447636 A |
1964 | buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0); |
1965 | } | |
1966 | if (UBCINFOEXISTS(vp)) | |
1967 | /* | |
1968 | * Clean the pages in VM. | |
1969 | */ | |
1970 | (void)ubc_sync_range(vp, (off_t)0, ubc_getsize(vp), UBC_PUSHALL); | |
55e303ae | 1971 | } |
91447636 | 1972 | if (active || need_inactive) |
2d21ac55 | 1973 | VNOP_INACTIVE(vp, ctx); |
0b4e3aa0 | 1974 | |
cf7d32b8 A |
1975 | #if NAMEDSTREAMS |
1976 | /* Delete the shadow stream file before we reclaim its vnode */ | |
1977 | if ((is_namedstream != 0) && | |
1978 | (vp->v_parent != NULLVP) && | |
1979 | ((vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0)) { | |
1980 | vnode_relenamedstream(vp->v_parent, vp, ctx); | |
1981 | } | |
1982 | #endif | |
1983 | ||
2d21ac55 A |
1984 | /* |
1985 | * Destroy ubc named reference | |
1986 | * cluster_release is done on this path | |
1987 | * along with dropping the reference on the ucred | |
1988 | */ | |
91447636 | 1989 | ubc_destroy_named(vp); |
0b4e3aa0 | 1990 | |
1c79356b A |
1991 | /* |
1992 | * Reclaim the vnode. | |
1993 | */ | |
2d21ac55 | 1994 | if (VNOP_RECLAIM(vp, ctx)) |
1c79356b | 1995 | panic("vclean: cannot reclaim"); |
55e303ae A |
1996 | |
1997 | // make sure the name & parent ptrs get cleaned out! | |
91447636 | 1998 | vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME); |
55e303ae | 1999 | |
91447636 | 2000 | vnode_lock(vp); |
1c79356b | 2001 | |
91447636 | 2002 | vp->v_mount = dead_mountp; |
1c79356b A |
2003 | vp->v_op = dead_vnodeop_p; |
2004 | vp->v_tag = VT_NON; | |
91447636 | 2005 | vp->v_data = NULL; |
1c79356b | 2006 | |
91447636 | 2007 | vp->v_lflag |= VL_DEAD; |
55e303ae | 2008 | |
91447636 A |
2009 | if (already_terminating == 0) { |
2010 | vp->v_lflag &= ~VL_TERMINATE; | |
2011 | /* | |
2012 | * Done with purge, notify sleepers of the grim news. | |
2013 | */ | |
2014 | if (vp->v_lflag & VL_TERMWANT) { | |
2015 | vp->v_lflag &= ~VL_TERMWANT; | |
2016 | wakeup(&vp->v_lflag); | |
2017 | } | |
1c79356b A |
2018 | } |
2019 | } | |
2020 | ||
2021 | /* | |
2022 | * Eliminate all activity associated with the requested vnode | |
2023 | * and with all vnodes aliased to the requested vnode. | |
2024 | */ | |
2025 | int | |
2d21ac55 | 2026 | #if DIAGNOSTIC |
91447636 | 2027 | vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context) |
2d21ac55 A |
2028 | #else |
2029 | vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) | |
2030 | #endif | |
1c79356b | 2031 | { |
91447636 A |
2032 | struct vnode *vq; |
2033 | int vid; | |
1c79356b A |
2034 | |
2035 | #if DIAGNOSTIC | |
91447636 A |
2036 | if ((flags & REVOKEALL) == 0) |
2037 | panic("vnop_revoke"); | |
1c79356b A |
2038 | #endif |
2039 | ||
1c79356b A |
2040 | if (vp->v_flag & VALIASED) { |
2041 | /* | |
2042 | * If a vgone (or vclean) is already in progress, | |
2043 | * wait until it is done and return. | |
2044 | */ | |
91447636 A |
2045 | vnode_lock(vp); |
2046 | if (vp->v_lflag & VL_TERMINATE) { | |
2047 | vnode_unlock(vp); | |
2048 | return(ENOENT); | |
1c79356b | 2049 | } |
91447636 | 2050 | vnode_unlock(vp); |
1c79356b A |
2051 | /* |
2052 | * Ensure that vp will not be vgone'd while we | |
2053 | * are eliminating its aliases. | |
2054 | */ | |
91447636 | 2055 | SPECHASH_LOCK(); |
1c79356b | 2056 | while (vp->v_flag & VALIASED) { |
1c79356b A |
2057 | for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { |
2058 | if (vq->v_rdev != vp->v_rdev || | |
2059 | vq->v_type != vp->v_type || vp == vq) | |
2060 | continue; | |
91447636 A |
2061 | vid = vq->v_id; |
2062 | SPECHASH_UNLOCK(); | |
2063 | if (vnode_getwithvid(vq,vid)){ | |
2064 | SPECHASH_LOCK(); | |
2065 | break; | |
2066 | } | |
cf7d32b8 | 2067 | vnode_reclaim_internal(vq, 0, 1, 0); |
91447636 A |
2068 | vnode_put(vq); |
2069 | SPECHASH_LOCK(); | |
1c79356b A |
2070 | break; |
2071 | } | |
1c79356b | 2072 | } |
91447636 | 2073 | SPECHASH_UNLOCK(); |
1c79356b | 2074 | } |
2d21ac55 | 2075 | vnode_reclaim_internal(vp, 0, 0, REVOKEALL); |
91447636 | 2076 | |
1c79356b A |
2077 | return (0); |
2078 | } | |
2079 | ||
2080 | /* | |
2081 | * Recycle an unused vnode to the front of the free list. | |
2082 | * Release the passed interlock if the vnode will be recycled. | |
2083 | */ | |
2084 | int | |
2d21ac55 | 2085 | vnode_recycle(struct vnode *vp) |
1c79356b | 2086 | { |
91447636 | 2087 | vnode_lock(vp); |
1c79356b | 2088 | |
91447636 A |
2089 | if (vp->v_iocount || vp->v_usecount) { |
2090 | vp->v_lflag |= VL_MARKTERM; | |
2091 | vnode_unlock(vp); | |
2092 | return(0); | |
2093 | } | |
2d21ac55 | 2094 | vnode_reclaim_internal(vp, 1, 0, 0); |
cf7d32b8 | 2095 | |
91447636 A |
2096 | vnode_unlock(vp); |
2097 | ||
2098 | return (1); | |
1c79356b A |
2099 | } |
2100 | ||
91447636 A |
2101 | static int |
2102 | vnode_reload(vnode_t vp) | |
1c79356b | 2103 | { |
2d21ac55 | 2104 | vnode_lock_spin(vp); |
1c79356b | 2105 | |
91447636 A |
2106 | if ((vp->v_iocount > 1) || vp->v_usecount) { |
2107 | vnode_unlock(vp); | |
2108 | return(0); | |
2109 | } | |
2110 | if (vp->v_iocount <= 0) | |
2111 | panic("vnode_reload with no iocount %d", vp->v_iocount); | |
2112 | ||
2113 | /* mark for release when iocount is dopped */ | |
2114 | vp->v_lflag |= VL_MARKTERM; | |
2115 | vnode_unlock(vp); | |
2116 | ||
2117 | return (1); | |
1c79356b A |
2118 | } |
2119 | ||
91447636 A |
2120 | |
2121 | static void | |
2d21ac55 | 2122 | vgone(vnode_t vp, int flags) |
1c79356b A |
2123 | { |
2124 | struct vnode *vq; | |
2125 | struct vnode *vx; | |
2126 | ||
1c79356b A |
2127 | /* |
2128 | * Clean out the filesystem specific data. | |
91447636 A |
2129 | * vclean also takes care of removing the |
2130 | * vnode from any mount list it might be on | |
1c79356b | 2131 | */ |
2d21ac55 | 2132 | vclean(vp, flags | DOCLOSE); |
91447636 | 2133 | |
1c79356b A |
2134 | /* |
2135 | * If special device, remove it from special device alias list | |
2136 | * if it is on one. | |
2137 | */ | |
2138 | if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { | |
91447636 A |
2139 | SPECHASH_LOCK(); |
2140 | if (*vp->v_hashchain == vp) { | |
2141 | *vp->v_hashchain = vp->v_specnext; | |
2142 | } else { | |
2143 | for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { | |
2144 | if (vq->v_specnext != vp) | |
2145 | continue; | |
2146 | vq->v_specnext = vp->v_specnext; | |
2147 | break; | |
2148 | } | |
1c79356b A |
2149 | if (vq == NULL) |
2150 | panic("missing bdev"); | |
1c79356b | 2151 | } |
91447636 A |
2152 | if (vp->v_flag & VALIASED) { |
2153 | vx = NULL; | |
2154 | for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { | |
2155 | if (vq->v_rdev != vp->v_rdev || | |
2156 | vq->v_type != vp->v_type) | |
2157 | continue; | |
2158 | if (vx) | |
2159 | break; | |
2160 | vx = vq; | |
2161 | } | |
2162 | if (vx == NULL) | |
2163 | panic("missing alias"); | |
2164 | if (vq == NULL) | |
2165 | vx->v_flag &= ~VALIASED; | |
2166 | vp->v_flag &= ~VALIASED; | |
2167 | } | |
2168 | SPECHASH_UNLOCK(); | |
2169 | { | |
2170 | struct specinfo *tmp = vp->v_specinfo; | |
2171 | vp->v_specinfo = NULL; | |
2172 | FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO); | |
2173 | } | |
1c79356b | 2174 | } |
1c79356b A |
2175 | } |
2176 | ||
2177 | /* | |
2178 | * Lookup a vnode by device number. | |
2179 | */ | |
2180 | int | |
91447636 | 2181 | check_mountedon(dev_t dev, enum vtype type, int *errorp) |
1c79356b | 2182 | { |
91447636 | 2183 | vnode_t vp; |
1c79356b | 2184 | int rc = 0; |
91447636 | 2185 | int vid; |
1c79356b | 2186 | |
91447636 A |
2187 | loop: |
2188 | SPECHASH_LOCK(); | |
1c79356b A |
2189 | for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { |
2190 | if (dev != vp->v_rdev || type != vp->v_type) | |
2191 | continue; | |
91447636 A |
2192 | vid = vp->v_id; |
2193 | SPECHASH_UNLOCK(); | |
2194 | if (vnode_getwithvid(vp,vid)) | |
2195 | goto loop; | |
2d21ac55 | 2196 | vnode_lock_spin(vp); |
91447636 A |
2197 | if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) { |
2198 | vnode_unlock(vp); | |
2199 | if ((*errorp = vfs_mountedon(vp)) != 0) | |
2200 | rc = 1; | |
2201 | } else | |
2202 | vnode_unlock(vp); | |
2203 | vnode_put(vp); | |
2204 | return(rc); | |
1c79356b | 2205 | } |
91447636 A |
2206 | SPECHASH_UNLOCK(); |
2207 | return (0); | |
1c79356b A |
2208 | } |
2209 | ||
2210 | /* | |
2211 | * Calculate the total number of references to a special device. | |
2212 | */ | |
2213 | int | |
91447636 | 2214 | vcount(vnode_t vp) |
1c79356b | 2215 | { |
91447636 | 2216 | vnode_t vq, vnext; |
1c79356b | 2217 | int count; |
91447636 | 2218 | int vid; |
1c79356b A |
2219 | |
2220 | loop: | |
2221 | if ((vp->v_flag & VALIASED) == 0) | |
91447636 | 2222 | return (vp->v_usecount - vp->v_kusecount); |
2d21ac55 | 2223 | count = 0; |
91447636 A |
2224 | |
2225 | SPECHASH_LOCK(); | |
2d21ac55 A |
2226 | /* |
2227 | * Grab first vnode and its vid. | |
2228 | */ | |
2229 | vq = *vp->v_hashchain; | |
2230 | vid = vq ? vq->v_id : 0; | |
2231 | ||
2232 | SPECHASH_UNLOCK(); | |
91447636 | 2233 | |
2d21ac55 A |
2234 | while (vq) { |
2235 | /* | |
2236 | * Attempt to get the vnode outside the SPECHASH lock. | |
2237 | */ | |
91447636 A |
2238 | if (vnode_getwithvid(vq, vid)) { |
2239 | goto loop; | |
2240 | } | |
91447636 | 2241 | vnode_lock(vq); |
2d21ac55 A |
2242 | |
2243 | if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) { | |
2244 | if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) { | |
2245 | /* | |
2246 | * Alias, but not in use, so flush it out. | |
2247 | */ | |
cf7d32b8 A |
2248 | vnode_reclaim_internal(vq, 1, 1, 0); |
2249 | vnode_put_locked(vq); | |
2d21ac55 | 2250 | vnode_unlock(vq); |
2d21ac55 A |
2251 | goto loop; |
2252 | } | |
2253 | count += (vq->v_usecount - vq->v_kusecount); | |
1c79356b | 2254 | } |
91447636 | 2255 | vnode_unlock(vq); |
91447636 A |
2256 | |
2257 | SPECHASH_LOCK(); | |
2d21ac55 A |
2258 | /* |
2259 | * must do this with the reference still held on 'vq' | |
2260 | * so that it can't be destroyed while we're poking | |
2261 | * through v_specnext | |
2262 | */ | |
2263 | vnext = vq->v_specnext; | |
2264 | vid = vnext ? vnext->v_id : 0; | |
2265 | ||
2266 | SPECHASH_UNLOCK(); | |
2267 | ||
2268 | vnode_put(vq); | |
2269 | ||
2270 | vq = vnext; | |
1c79356b | 2271 | } |
91447636 | 2272 | |
1c79356b A |
2273 | return (count); |
2274 | } | |
2275 | ||
2276 | int prtactive = 0; /* 1 => print out reclaim of active vnodes */ | |
2277 | ||
2278 | /* | |
2279 | * Print out a description of a vnode. | |
2280 | */ | |
2d21ac55 A |
2281 | #if !CONFIG_NO_PRINTF_STRINGS |
2282 | static const char *typename[] = | |
1c79356b | 2283 | { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; |
2d21ac55 | 2284 | #endif |
1c79356b A |
2285 | |
2286 | void | |
91447636 | 2287 | vprint(const char *label, struct vnode *vp) |
1c79356b | 2288 | { |
91447636 | 2289 | char sbuf[64]; |
1c79356b A |
2290 | |
2291 | if (label != NULL) | |
2292 | printf("%s: ", label); | |
2d21ac55 | 2293 | printf("type %s, usecount %d, writecount %ld", |
91447636 A |
2294 | typename[vp->v_type], vp->v_usecount, vp->v_writecount); |
2295 | sbuf[0] = '\0'; | |
1c79356b | 2296 | if (vp->v_flag & VROOT) |
2d21ac55 | 2297 | strlcat(sbuf, "|VROOT", sizeof(sbuf)); |
1c79356b | 2298 | if (vp->v_flag & VTEXT) |
2d21ac55 | 2299 | strlcat(sbuf, "|VTEXT", sizeof(sbuf)); |
1c79356b | 2300 | if (vp->v_flag & VSYSTEM) |
2d21ac55 | 2301 | strlcat(sbuf, "|VSYSTEM", sizeof(sbuf)); |
9bccf70c | 2302 | if (vp->v_flag & VNOFLUSH) |
2d21ac55 | 2303 | strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf)); |
1c79356b | 2304 | if (vp->v_flag & VBWAIT) |
2d21ac55 | 2305 | strlcat(sbuf, "|VBWAIT", sizeof(sbuf)); |
1c79356b | 2306 | if (vp->v_flag & VALIASED) |
2d21ac55 | 2307 | strlcat(sbuf, "|VALIASED", sizeof(sbuf)); |
91447636 A |
2308 | if (sbuf[0] != '\0') |
2309 | printf(" flags (%s)", &sbuf[1]); | |
1c79356b A |
2310 | } |
2311 | ||
1c79356b | 2312 | |
91447636 A |
2313 | int |
2314 | vn_getpath(struct vnode *vp, char *pathbuf, int *len) | |
2315 | { | |
2d21ac55 A |
2316 | return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current()); |
2317 | } | |
2318 | ||
2319 | ||
2320 | int | |
2321 | vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash) | |
2322 | { | |
2323 | return ubc_cs_getcdhash(vp, offset, cdhash); | |
1c79356b | 2324 | } |
91447636 A |
2325 | |
2326 | ||
2327 | static char *extension_table=NULL; | |
2328 | static int nexts; | |
2329 | static int max_ext_width; | |
1c79356b | 2330 | |
55e303ae | 2331 | static int |
2d21ac55 | 2332 | extension_cmp(const void *a, const void *b) |
91447636 | 2333 | { |
2d21ac55 | 2334 | return (strlen((const char *)a) - strlen((const char *)b)); |
91447636 | 2335 | } |
55e303ae | 2336 | |
55e303ae | 2337 | |
91447636 A |
2338 | // |
2339 | // This is the api LaunchServices uses to inform the kernel | |
2340 | // the list of package extensions to ignore. | |
2341 | // | |
2342 | // Internally we keep the list sorted by the length of the | |
2343 | // the extension (from longest to shortest). We sort the | |
2344 | // list of extensions so that we can speed up our searches | |
2345 | // when comparing file names -- we only compare extensions | |
2346 | // that could possibly fit into the file name, not all of | |
2347 | // them (i.e. a short 8 character name can't have an 8 | |
2348 | // character extension). | |
2349 | // | |
2350 | __private_extern__ int | |
2351 | set_package_extensions_table(void *data, int nentries, int maxwidth) | |
2352 | { | |
2d21ac55 A |
2353 | char *new_exts; |
2354 | int error; | |
91447636 A |
2355 | |
2356 | if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) { | |
2357 | return EINVAL; | |
2358 | } | |
2359 | ||
2360 | MALLOC(new_exts, char *, nentries * maxwidth, M_TEMP, M_WAITOK); | |
2361 | ||
2362 | error = copyin(CAST_USER_ADDR_T(data), new_exts, nentries * maxwidth); | |
2363 | if (error) { | |
2364 | FREE(new_exts, M_TEMP); | |
2365 | return error; | |
2366 | } | |
2367 | ||
2368 | if (extension_table) { | |
2369 | FREE(extension_table, M_TEMP); | |
2370 | } | |
2371 | extension_table = new_exts; | |
2372 | nexts = nentries; | |
2373 | max_ext_width = maxwidth; | |
2374 | ||
2375 | qsort(extension_table, nexts, maxwidth, extension_cmp); | |
2376 | ||
2377 | return 0; | |
2378 | } | |
2379 | ||
2380 | ||
2381 | __private_extern__ int | |
2d21ac55 | 2382 | is_package_name(const char *name, int len) |
91447636 A |
2383 | { |
2384 | int i, extlen; | |
2d21ac55 | 2385 | const char *ptr, *name_ext; |
91447636 A |
2386 | |
2387 | if (len <= 3) { | |
2388 | return 0; | |
2389 | } | |
2390 | ||
2391 | name_ext = NULL; | |
2392 | for(ptr=name; *ptr != '\0'; ptr++) { | |
2393 | if (*ptr == '.') { | |
2394 | name_ext = ptr; | |
55e303ae | 2395 | } |
91447636 | 2396 | } |
55e303ae | 2397 | |
91447636 A |
2398 | // if there is no "." extension, it can't match |
2399 | if (name_ext == NULL) { | |
2400 | return 0; | |
2401 | } | |
55e303ae | 2402 | |
91447636 A |
2403 | // advance over the "." |
2404 | name_ext++; | |
55e303ae | 2405 | |
91447636 A |
2406 | // now iterate over all the extensions to see if any match |
2407 | ptr = &extension_table[0]; | |
2408 | for(i=0; i < nexts; i++, ptr+=max_ext_width) { | |
2409 | extlen = strlen(ptr); | |
2410 | if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') { | |
2411 | // aha, a match! | |
2412 | return 1; | |
55e303ae A |
2413 | } |
2414 | } | |
2415 | ||
91447636 A |
2416 | // if we get here, no extension matched |
2417 | return 0; | |
55e303ae A |
2418 | } |
2419 | ||
91447636 A |
2420 | int |
2421 | vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component) | |
55e303ae | 2422 | { |
91447636 A |
2423 | char *ptr, *end; |
2424 | int comp=0; | |
2425 | ||
2426 | *component = -1; | |
2427 | if (*path != '/') { | |
2428 | return EINVAL; | |
2429 | } | |
2430 | ||
2431 | end = path + 1; | |
2432 | while(end < path + pathlen && *end != '\0') { | |
2433 | while(end < path + pathlen && *end == '/' && *end != '\0') { | |
2434 | end++; | |
2435 | } | |
2436 | ||
2437 | ptr = end; | |
2438 | ||
2439 | while(end < path + pathlen && *end != '/' && *end != '\0') { | |
2440 | end++; | |
2441 | } | |
2442 | ||
2443 | if (end > path + pathlen) { | |
2444 | // hmm, string wasn't null terminated | |
2445 | return EINVAL; | |
2446 | } | |
2447 | ||
2448 | *end = '\0'; | |
2449 | if (is_package_name(ptr, end - ptr)) { | |
2450 | *component = comp; | |
2451 | break; | |
2452 | } | |
55e303ae | 2453 | |
91447636 A |
2454 | end++; |
2455 | comp++; | |
2456 | } | |
2457 | ||
2458 | return 0; | |
2459 | } | |
55e303ae A |
2460 | |
2461 | ||
1c79356b A |
2462 | /* |
2463 | * Top level filesystem related information gathering. | |
2464 | */ | |
91447636 A |
2465 | extern unsigned int vfs_nummntops; |
2466 | ||
1c79356b | 2467 | int |
91447636 | 2468 | vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, |
2d21ac55 | 2469 | user_addr_t newp, size_t newlen, proc_t p) |
1c79356b | 2470 | { |
91447636 | 2471 | struct vfstable *vfsp; |
55e303ae A |
2472 | int *username; |
2473 | u_int usernamelen; | |
2474 | int error; | |
91447636 | 2475 | struct vfsconf *vfsc; |
1c79356b | 2476 | |
2d21ac55 A |
2477 | /* All non VFS_GENERIC and in VFS_GENERIC, |
2478 | * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS | |
2479 | * needs to have root priv to have modifiers. | |
2480 | * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover. | |
2481 | */ | |
2482 | if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) || | |
2483 | ((name[1] == VFS_MAXTYPENUM) || | |
2484 | (name[1] == VFS_CONF) || | |
2485 | (name[1] == VFS_SET_PACKAGE_EXTS))) | |
2486 | && (error = suser(kauth_cred_get(), &p->p_acflag))) { | |
2487 | return(error); | |
2488 | } | |
9bccf70c A |
2489 | /* |
2490 | * The VFS_NUMMNTOPS shouldn't be at name[0] since | |
2491 | * is a VFS generic variable. So now we must check | |
2492 | * namelen so we don't end up covering any UFS | |
2493 | * variables (sinc UFS vfc_typenum is 1). | |
2494 | * | |
2495 | * It should have been: | |
2496 | * name[0]: VFS_GENERIC | |
2497 | * name[1]: VFS_NUMMNTOPS | |
2498 | */ | |
2499 | if (namelen == 1 && name[0] == VFS_NUMMNTOPS) { | |
1c79356b A |
2500 | return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops)); |
2501 | } | |
2502 | ||
2503 | /* all sysctl names at this level are at least name and field */ | |
2504 | if (namelen < 2) | |
55e303ae | 2505 | return (EISDIR); /* overloaded */ |
1c79356b A |
2506 | if (name[0] != VFS_GENERIC) { |
2507 | for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) | |
2508 | if (vfsp->vfc_typenum == name[0]) | |
2509 | break; | |
2510 | if (vfsp == NULL) | |
91447636 | 2511 | return (ENOTSUP); |
91447636 | 2512 | |
2d21ac55 | 2513 | /* XXX current context proxy for proc p? */ |
1c79356b | 2514 | return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, |
2d21ac55 A |
2515 | oldp, oldlenp, newp, newlen, |
2516 | vfs_context_current())); | |
1c79356b A |
2517 | } |
2518 | switch (name[1]) { | |
2519 | case VFS_MAXTYPENUM: | |
2520 | return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); | |
2521 | case VFS_CONF: | |
2522 | if (namelen < 3) | |
2523 | return (ENOTDIR); /* overloaded */ | |
2524 | for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) | |
2525 | if (vfsp->vfc_typenum == name[2]) | |
2526 | break; | |
2527 | if (vfsp == NULL) | |
91447636 A |
2528 | return (ENOTSUP); |
2529 | vfsc = (struct vfsconf *)vfsp; | |
2530 | if (proc_is64bit(p)) { | |
2531 | struct user_vfsconf usr_vfsc; | |
2532 | usr_vfsc.vfc_vfsops = CAST_USER_ADDR_T(vfsc->vfc_vfsops); | |
2533 | bcopy(vfsc->vfc_name, usr_vfsc.vfc_name, sizeof(usr_vfsc.vfc_name)); | |
2534 | usr_vfsc.vfc_typenum = vfsc->vfc_typenum; | |
2535 | usr_vfsc.vfc_refcount = vfsc->vfc_refcount; | |
2536 | usr_vfsc.vfc_flags = vfsc->vfc_flags; | |
2537 | usr_vfsc.vfc_mountroot = CAST_USER_ADDR_T(vfsc->vfc_mountroot); | |
2538 | usr_vfsc.vfc_next = CAST_USER_ADDR_T(vfsc->vfc_next); | |
2539 | return (sysctl_rdstruct(oldp, oldlenp, newp, &usr_vfsc, | |
2540 | sizeof(usr_vfsc))); | |
2541 | } | |
2542 | else { | |
2543 | return (sysctl_rdstruct(oldp, oldlenp, newp, vfsc, | |
2544 | sizeof(struct vfsconf))); | |
2545 | } | |
2546 | ||
2547 | case VFS_SET_PACKAGE_EXTS: | |
2548 | return set_package_extensions_table((void *)name[1], name[2], name[3]); | |
1c79356b | 2549 | } |
55e303ae A |
2550 | /* |
2551 | * We need to get back into the general MIB, so we need to re-prepend | |
2552 | * CTL_VFS to our name and try userland_sysctl(). | |
2553 | */ | |
2554 | usernamelen = namelen + 1; | |
2555 | MALLOC(username, int *, usernamelen * sizeof(*username), | |
2556 | M_TEMP, M_WAITOK); | |
2557 | bcopy(name, username + 1, namelen * sizeof(*name)); | |
2558 | username[0] = CTL_VFS; | |
91447636 A |
2559 | error = userland_sysctl(p, username, usernamelen, oldp, |
2560 | oldlenp, 1, newp, newlen, oldlenp); | |
55e303ae A |
2561 | FREE(username, M_TEMP); |
2562 | return (error); | |
1c79356b A |
2563 | } |
2564 | ||
1c79356b | 2565 | /* |
2d21ac55 A |
2566 | * Dump vnode list (via sysctl) - defunct |
2567 | * use "pstat" instead | |
1c79356b A |
2568 | */ |
2569 | /* ARGSUSED */ | |
2570 | int | |
2d21ac55 A |
2571 | sysctl_vnode |
2572 | (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req) | |
1c79356b | 2573 | { |
91447636 | 2574 | return(EINVAL); |
1c79356b A |
2575 | } |
2576 | ||
2d21ac55 A |
2577 | SYSCTL_PROC(_kern, KERN_VNODE, vnode, |
2578 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED, | |
2579 | 0, 0, sysctl_vnode, "S,", ""); | |
2580 | ||
2581 | ||
1c79356b A |
2582 | /* |
2583 | * Check to see if a filesystem is mounted on a block device. | |
2584 | */ | |
2585 | int | |
2d21ac55 | 2586 | vfs_mountedon(struct vnode *vp) |
1c79356b A |
2587 | { |
2588 | struct vnode *vq; | |
2589 | int error = 0; | |
2590 | ||
91447636 A |
2591 | SPECHASH_LOCK(); |
2592 | if (vp->v_specflags & SI_MOUNTEDON) { | |
2593 | error = EBUSY; | |
2594 | goto out; | |
2595 | } | |
1c79356b | 2596 | if (vp->v_flag & VALIASED) { |
1c79356b A |
2597 | for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { |
2598 | if (vq->v_rdev != vp->v_rdev || | |
2599 | vq->v_type != vp->v_type) | |
2600 | continue; | |
2601 | if (vq->v_specflags & SI_MOUNTEDON) { | |
2602 | error = EBUSY; | |
2603 | break; | |
2604 | } | |
2605 | } | |
1c79356b | 2606 | } |
91447636 A |
2607 | out: |
2608 | SPECHASH_UNLOCK(); | |
1c79356b A |
2609 | return (error); |
2610 | } | |
2611 | ||
2612 | /* | |
2613 | * Unmount all filesystems. The list is traversed in reverse order | |
2614 | * of mounting to avoid dependencies. | |
2615 | */ | |
0b4e3aa0 | 2616 | __private_extern__ void |
2d21ac55 | 2617 | vfs_unmountall(void) |
1c79356b | 2618 | { |
91447636 | 2619 | struct mount *mp; |
91447636 | 2620 | int error; |
1c79356b A |
2621 | |
2622 | /* | |
2623 | * Since this only runs when rebooting, it is not interlocked. | |
2624 | */ | |
91447636 A |
2625 | mount_list_lock(); |
2626 | while(!TAILQ_EMPTY(&mountlist)) { | |
2627 | mp = TAILQ_LAST(&mountlist, mntlist); | |
2628 | mount_list_unlock(); | |
2d21ac55 | 2629 | error = dounmount(mp, MNT_FORCE, 0, vfs_context_current()); |
6601e61a A |
2630 | if ((error != 0) && (error != EBUSY)) { |
2631 | printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname); | |
2632 | printf("%d)\n", error); | |
91447636 | 2633 | mount_list_lock(); |
2d21ac55 | 2634 | TAILQ_REMOVE(&mountlist, mp, mnt_list); |
91447636 | 2635 | continue; |
6601e61a A |
2636 | } else if (error == EBUSY) { |
2637 | /* If EBUSY is returned, the unmount was already in progress */ | |
2638 | printf("unmount of %x failed (", (unsigned int)mp); | |
2d21ac55 | 2639 | printf("BUSY)\n"); |
6601e61a | 2640 | } |
91447636 | 2641 | mount_list_lock(); |
1c79356b | 2642 | } |
91447636 | 2643 | mount_list_unlock(); |
1c79356b A |
2644 | } |
2645 | ||
1c79356b | 2646 | |
91447636 | 2647 | /* |
2d21ac55 A |
2648 | * This routine is called from vnode_pager_deallocate out of the VM |
2649 | * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named | |
2650 | * on a vnode that has a UBCINFO | |
1c79356b | 2651 | */ |
91447636 | 2652 | __private_extern__ void |
2d21ac55 | 2653 | vnode_pager_vrele(vnode_t vp) |
1c79356b | 2654 | { |
2d21ac55 A |
2655 | struct ubc_info *uip; |
2656 | ||
91447636 | 2657 | vnode_lock(vp); |
1c79356b | 2658 | |
91447636 | 2659 | vp->v_lflag &= ~VNAMED_UBC; |
1c79356b | 2660 | |
2d21ac55 A |
2661 | uip = vp->v_ubcinfo; |
2662 | vp->v_ubcinfo = UBC_INFO_NULL; | |
1c79356b | 2663 | |
2d21ac55 | 2664 | ubc_info_deallocate(uip); |
91447636 | 2665 | |
91447636 | 2666 | vnode_unlock(vp); |
1c79356b A |
2667 | } |
2668 | ||
91447636 A |
2669 | |
2670 | #include <sys/disk.h> | |
2671 | ||
2672 | errno_t | |
2673 | vfs_init_io_attributes(vnode_t devvp, mount_t mp) | |
1c79356b | 2674 | { |
91447636 A |
2675 | int error; |
2676 | off_t readblockcnt; | |
2677 | off_t writeblockcnt; | |
2678 | off_t readmaxcnt; | |
2679 | off_t writemaxcnt; | |
2680 | off_t readsegcnt; | |
2681 | off_t writesegcnt; | |
2682 | off_t readsegsize; | |
2683 | off_t writesegsize; | |
2d21ac55 | 2684 | off_t alignment; |
91447636 A |
2685 | u_long blksize; |
2686 | u_int64_t temp; | |
2d21ac55 A |
2687 | u_int32_t features; |
2688 | vfs_context_t ctx = vfs_context_current(); | |
0b4e3aa0 | 2689 | |
55e303ae A |
2690 | int isvirtual = 0; |
2691 | /* | |
2692 | * determine if this mount point exists on the same device as the root | |
2693 | * partition... if so, then it comes under the hard throttle control | |
2694 | */ | |
2695 | int thisunit = -1; | |
2696 | static int rootunit = -1; | |
55e303ae A |
2697 | |
2698 | if (rootunit == -1) { | |
2d21ac55 | 2699 | if (VNOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, ctx)) |
55e303ae A |
2700 | rootunit = -1; |
2701 | else if (rootvp == devvp) | |
2702 | mp->mnt_kern_flag |= MNTK_ROOTDEV; | |
2703 | } | |
2704 | if (devvp != rootvp && rootunit != -1) { | |
2d21ac55 | 2705 | if (VNOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, ctx) == 0) { |
55e303ae A |
2706 | if (thisunit == rootunit) |
2707 | mp->mnt_kern_flag |= MNTK_ROOTDEV; | |
2708 | } | |
2709 | } | |
91447636 A |
2710 | /* |
2711 | * force the spec device to re-cache | |
2712 | * the underlying block size in case | |
2713 | * the filesystem overrode the initial value | |
2714 | */ | |
2715 | set_fsblocksize(devvp); | |
2716 | ||
2717 | ||
2718 | if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, | |
2d21ac55 | 2719 | (caddr_t)&blksize, 0, ctx))) |
91447636 A |
2720 | return (error); |
2721 | ||
2722 | mp->mnt_devblocksize = blksize; | |
2723 | ||
2d21ac55 | 2724 | if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) { |
55e303ae A |
2725 | if (isvirtual) |
2726 | mp->mnt_kern_flag |= MNTK_VIRTUALDEV; | |
2727 | } | |
2728 | ||
2d21ac55 A |
2729 | if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES, |
2730 | (caddr_t)&features, 0, ctx))) | |
2731 | return (error); | |
2732 | ||
91447636 | 2733 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD, |
2d21ac55 | 2734 | (caddr_t)&readblockcnt, 0, ctx))) |
0b4e3aa0 A |
2735 | return (error); |
2736 | ||
91447636 | 2737 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE, |
2d21ac55 | 2738 | (caddr_t)&writeblockcnt, 0, ctx))) |
55e303ae A |
2739 | return (error); |
2740 | ||
91447636 | 2741 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD, |
2d21ac55 | 2742 | (caddr_t)&readmaxcnt, 0, ctx))) |
55e303ae A |
2743 | return (error); |
2744 | ||
91447636 | 2745 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE, |
2d21ac55 | 2746 | (caddr_t)&writemaxcnt, 0, ctx))) |
0b4e3aa0 A |
2747 | return (error); |
2748 | ||
91447636 | 2749 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD, |
2d21ac55 | 2750 | (caddr_t)&readsegcnt, 0, ctx))) |
0b4e3aa0 A |
2751 | return (error); |
2752 | ||
91447636 | 2753 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE, |
2d21ac55 | 2754 | (caddr_t)&writesegcnt, 0, ctx))) |
55e303ae A |
2755 | return (error); |
2756 | ||
91447636 | 2757 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD, |
2d21ac55 | 2758 | (caddr_t)&readsegsize, 0, ctx))) |
55e303ae A |
2759 | return (error); |
2760 | ||
91447636 | 2761 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, |
2d21ac55 A |
2762 | (caddr_t)&writesegsize, 0, ctx))) |
2763 | return (error); | |
2764 | ||
2765 | if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT, | |
2766 | (caddr_t)&alignment, 0, ctx))) | |
0b4e3aa0 A |
2767 | return (error); |
2768 | ||
55e303ae A |
2769 | if (readmaxcnt) |
2770 | temp = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt; | |
2771 | else { | |
2772 | if (readblockcnt) { | |
2773 | temp = readblockcnt * blksize; | |
2774 | temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; | |
2775 | } else | |
2776 | temp = MAXPHYS; | |
2777 | } | |
91447636 | 2778 | mp->mnt_maxreadcnt = (u_int32_t)temp; |
55e303ae A |
2779 | |
2780 | if (writemaxcnt) | |
2781 | temp = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt; | |
2782 | else { | |
2783 | if (writeblockcnt) { | |
2784 | temp = writeblockcnt * blksize; | |
2785 | temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; | |
2786 | } else | |
2787 | temp = MAXPHYS; | |
2788 | } | |
0b4e3aa0 A |
2789 | mp->mnt_maxwritecnt = (u_int32_t)temp; |
2790 | ||
55e303ae A |
2791 | if (readsegcnt) { |
2792 | temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt; | |
2793 | mp->mnt_segreadcnt = (u_int16_t)temp; | |
2794 | } | |
2795 | if (writesegcnt) { | |
2796 | temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt; | |
2797 | mp->mnt_segwritecnt = (u_int16_t)temp; | |
2798 | } | |
2799 | if (readsegsize) | |
2800 | temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize; | |
2801 | else | |
2802 | temp = mp->mnt_maxreadcnt; | |
91447636 | 2803 | mp->mnt_maxsegreadsize = (u_int32_t)temp; |
0b4e3aa0 | 2804 | |
55e303ae A |
2805 | if (writesegsize) |
2806 | temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize; | |
2807 | else | |
2808 | temp = mp->mnt_maxwritecnt; | |
91447636 | 2809 | mp->mnt_maxsegwritesize = (u_int32_t)temp; |
0b4e3aa0 | 2810 | |
2d21ac55 A |
2811 | if (alignment) |
2812 | temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1; | |
2813 | else | |
2814 | temp = 0; | |
2815 | mp->mnt_alignmentmask = temp; | |
2816 | ||
2817 | if (features & DK_FEATURE_FORCE_UNIT_ACCESS) | |
2818 | mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED; | |
2819 | ||
55e303ae A |
2820 | return (error); |
2821 | } | |
2822 | ||
2823 | static struct klist fs_klist; | |
4a3eedf9 A |
2824 | lck_grp_t *fs_klist_lck_grp; |
2825 | lck_mtx_t *fs_klist_lock; | |
55e303ae A |
2826 | |
2827 | void | |
2828 | vfs_event_init(void) | |
2829 | { | |
55e303ae | 2830 | klist_init(&fs_klist); |
4a3eedf9 A |
2831 | fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL); |
2832 | fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL); | |
55e303ae A |
2833 | } |
2834 | ||
2835 | void | |
91447636 | 2836 | vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data) |
55e303ae | 2837 | { |
4a3eedf9 | 2838 | lck_mtx_lock(fs_klist_lock); |
55e303ae | 2839 | KNOTE(&fs_klist, event); |
4a3eedf9 | 2840 | lck_mtx_unlock(fs_klist_lock); |
55e303ae A |
2841 | } |
2842 | ||
2843 | /* | |
2844 | * return the number of mounted filesystems. | |
2845 | */ | |
2846 | static int | |
2847 | sysctl_vfs_getvfscnt(void) | |
2848 | { | |
91447636 A |
2849 | return(mount_getvfscnt()); |
2850 | } | |
2851 | ||
0b4e3aa0 | 2852 | |
91447636 A |
2853 | static int |
2854 | mount_getvfscnt(void) | |
2855 | { | |
2856 | int ret; | |
2857 | ||
2858 | mount_list_lock(); | |
2859 | ret = nummounts; | |
2860 | mount_list_unlock(); | |
55e303ae | 2861 | return (ret); |
91447636 A |
2862 | |
2863 | } | |
2864 | ||
2865 | ||
2866 | ||
2867 | static int | |
2868 | mount_fillfsids(fsid_t *fsidlst, int count) | |
2869 | { | |
2870 | struct mount *mp; | |
2871 | int actual=0; | |
2872 | ||
2873 | actual = 0; | |
2874 | mount_list_lock(); | |
2875 | TAILQ_FOREACH(mp, &mountlist, mnt_list) { | |
2876 | if (actual <= count) { | |
2877 | fsidlst[actual] = mp->mnt_vfsstat.f_fsid; | |
2878 | actual++; | |
2879 | } | |
2880 | } | |
2881 | mount_list_unlock(); | |
2882 | return (actual); | |
2883 | ||
55e303ae A |
2884 | } |
2885 | ||
2886 | /* | |
2887 | * fill in the array of fsid_t's up to a max of 'count', the actual | |
2888 | * number filled in will be set in '*actual'. If there are more fsid_t's | |
2889 | * than room in fsidlst then ENOMEM will be returned and '*actual' will | |
2890 | * have the actual count. | |
2891 | * having *actual filled out even in the error case is depended upon. | |
2892 | */ | |
2893 | static int | |
2894 | sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual) | |
2895 | { | |
2896 | struct mount *mp; | |
2897 | ||
2898 | *actual = 0; | |
91447636 A |
2899 | mount_list_lock(); |
2900 | TAILQ_FOREACH(mp, &mountlist, mnt_list) { | |
55e303ae A |
2901 | (*actual)++; |
2902 | if (*actual <= count) | |
91447636 | 2903 | fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid; |
55e303ae | 2904 | } |
91447636 | 2905 | mount_list_unlock(); |
55e303ae A |
2906 | return (*actual <= count ? 0 : ENOMEM); |
2907 | } | |
2908 | ||
2909 | static int | |
2d21ac55 A |
2910 | sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1, |
2911 | __unused int arg2, struct sysctl_req *req) | |
55e303ae A |
2912 | { |
2913 | int actual, error; | |
2914 | size_t space; | |
2915 | fsid_t *fsidlst; | |
2916 | ||
2917 | /* This is a readonly node. */ | |
91447636 | 2918 | if (req->newptr != USER_ADDR_NULL) |
55e303ae A |
2919 | return (EPERM); |
2920 | ||
2921 | /* they are querying us so just return the space required. */ | |
91447636 | 2922 | if (req->oldptr == USER_ADDR_NULL) { |
55e303ae A |
2923 | req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t); |
2924 | return 0; | |
2925 | } | |
2926 | again: | |
2927 | /* | |
2928 | * Retrieve an accurate count of the amount of space required to copy | |
2929 | * out all the fsids in the system. | |
2930 | */ | |
2931 | space = req->oldlen; | |
2932 | req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t); | |
2933 | ||
2934 | /* they didn't give us enough space. */ | |
2935 | if (space < req->oldlen) | |
2936 | return (ENOMEM); | |
2937 | ||
2938 | MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK); | |
2939 | error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t), | |
2940 | &actual); | |
2941 | /* | |
2942 | * If we get back ENOMEM, then another mount has been added while we | |
2943 | * slept in malloc above. If this is the case then try again. | |
2944 | */ | |
2945 | if (error == ENOMEM) { | |
2946 | FREE(fsidlst, M_TEMP); | |
2947 | req->oldlen = space; | |
2948 | goto again; | |
2949 | } | |
2950 | if (error == 0) { | |
2951 | error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t)); | |
2952 | } | |
2953 | FREE(fsidlst, M_TEMP); | |
2954 | return (error); | |
2955 | } | |
2956 | ||
2957 | /* | |
2958 | * Do a sysctl by fsid. | |
2959 | */ | |
2960 | static int | |
2d21ac55 A |
2961 | sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, |
2962 | struct sysctl_req *req) | |
55e303ae A |
2963 | { |
2964 | struct vfsidctl vc; | |
91447636 | 2965 | struct user_vfsidctl user_vc; |
55e303ae | 2966 | struct mount *mp; |
91447636 | 2967 | struct vfsstatfs *sp; |
2d21ac55 A |
2968 | int *name, flags, namelen; |
2969 | int error=0, gotref=0; | |
2970 | vfs_context_t ctx = vfs_context_current(); | |
2971 | proc_t p = req->p; /* XXX req->p != current_proc()? */ | |
91447636 | 2972 | boolean_t is_64_bit; |
55e303ae A |
2973 | |
2974 | name = arg1; | |
2975 | namelen = arg2; | |
91447636 | 2976 | is_64_bit = proc_is64bit(p); |
55e303ae | 2977 | |
91447636 A |
2978 | if (is_64_bit) { |
2979 | error = SYSCTL_IN(req, &user_vc, sizeof(user_vc)); | |
2980 | if (error) | |
2d21ac55 A |
2981 | goto out; |
2982 | if (user_vc.vc_vers != VFS_CTL_VERS1) { | |
2983 | error = EINVAL; | |
2984 | goto out; | |
2985 | } | |
2986 | mp = mount_list_lookupby_fsid(&user_vc.vc_fsid, 0, 1); | |
91447636 A |
2987 | } |
2988 | else { | |
2989 | error = SYSCTL_IN(req, &vc, sizeof(vc)); | |
2990 | if (error) | |
2d21ac55 A |
2991 | goto out; |
2992 | if (vc.vc_vers != VFS_CTL_VERS1) { | |
2993 | error = EINVAL; | |
2994 | goto out; | |
2995 | } | |
2996 | mp = mount_list_lookupby_fsid(&vc.vc_fsid, 0, 1); | |
2997 | } | |
2998 | if (mp == NULL) { | |
2999 | error = ENOENT; | |
3000 | goto out; | |
91447636 | 3001 | } |
2d21ac55 | 3002 | gotref = 1; |
55e303ae A |
3003 | /* reset so that the fs specific code can fetch it. */ |
3004 | req->newidx = 0; | |
3005 | /* | |
3006 | * Note if this is a VFS_CTL then we pass the actual sysctl req | |
3007 | * in for "oldp" so that the lower layer can DTRT and use the | |
3008 | * SYSCTL_IN/OUT routines. | |
3009 | */ | |
3010 | if (mp->mnt_op->vfs_sysctl != NULL) { | |
91447636 A |
3011 | if (is_64_bit) { |
3012 | if (vfs_64bitready(mp)) { | |
3013 | error = mp->mnt_op->vfs_sysctl(name, namelen, | |
3014 | CAST_USER_ADDR_T(req), | |
3015 | NULL, USER_ADDR_NULL, 0, | |
2d21ac55 | 3016 | ctx); |
91447636 A |
3017 | } |
3018 | else { | |
3019 | error = ENOTSUP; | |
3020 | } | |
3021 | } | |
3022 | else { | |
3023 | error = mp->mnt_op->vfs_sysctl(name, namelen, | |
3024 | CAST_USER_ADDR_T(req), | |
3025 | NULL, USER_ADDR_NULL, 0, | |
2d21ac55 A |
3026 | ctx); |
3027 | } | |
3028 | if (error != ENOTSUP) { | |
3029 | goto out; | |
91447636 | 3030 | } |
55e303ae A |
3031 | } |
3032 | switch (name[0]) { | |
3033 | case VFS_CTL_UMOUNT: | |
91447636 A |
3034 | req->newidx = 0; |
3035 | if (is_64_bit) { | |
3036 | req->newptr = user_vc.vc_ptr; | |
3037 | req->newlen = (size_t)user_vc.vc_len; | |
3038 | } | |
3039 | else { | |
3040 | req->newptr = CAST_USER_ADDR_T(vc.vc_ptr); | |
3041 | req->newlen = vc.vc_len; | |
3042 | } | |
55e303ae A |
3043 | error = SYSCTL_IN(req, &flags, sizeof(flags)); |
3044 | if (error) | |
3045 | break; | |
2d21ac55 | 3046 | |
6601e61a | 3047 | mount_ref(mp, 0); |
2d21ac55 A |
3048 | mount_iterdrop(mp); |
3049 | gotref = 0; | |
6601e61a | 3050 | /* safedounmount consumes a ref */ |
2d21ac55 | 3051 | error = safedounmount(mp, flags, ctx); |
55e303ae A |
3052 | break; |
3053 | case VFS_CTL_STATFS: | |
91447636 A |
3054 | req->newidx = 0; |
3055 | if (is_64_bit) { | |
3056 | req->newptr = user_vc.vc_ptr; | |
3057 | req->newlen = (size_t)user_vc.vc_len; | |
3058 | } | |
3059 | else { | |
3060 | req->newptr = CAST_USER_ADDR_T(vc.vc_ptr); | |
3061 | req->newlen = vc.vc_len; | |
3062 | } | |
55e303ae A |
3063 | error = SYSCTL_IN(req, &flags, sizeof(flags)); |
3064 | if (error) | |
3065 | break; | |
91447636 | 3066 | sp = &mp->mnt_vfsstat; |
55e303ae | 3067 | if (((flags & MNT_NOWAIT) == 0 || (flags & MNT_WAIT)) && |
2d21ac55 A |
3068 | (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) |
3069 | goto out; | |
91447636 A |
3070 | if (is_64_bit) { |
3071 | struct user_statfs sfs; | |
3072 | bzero(&sfs, sizeof(sfs)); | |
3073 | sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; | |
3074 | sfs.f_type = mp->mnt_vtable->vfc_typenum; | |
3075 | sfs.f_bsize = (user_long_t)sp->f_bsize; | |
3076 | sfs.f_iosize = (user_long_t)sp->f_iosize; | |
3077 | sfs.f_blocks = (user_long_t)sp->f_blocks; | |
3078 | sfs.f_bfree = (user_long_t)sp->f_bfree; | |
3079 | sfs.f_bavail = (user_long_t)sp->f_bavail; | |
3080 | sfs.f_files = (user_long_t)sp->f_files; | |
3081 | sfs.f_ffree = (user_long_t)sp->f_ffree; | |
3082 | sfs.f_fsid = sp->f_fsid; | |
3083 | sfs.f_owner = sp->f_owner; | |
3084 | ||
2d21ac55 A |
3085 | strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); |
3086 | strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); | |
3087 | strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); | |
91447636 A |
3088 | |
3089 | error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); | |
3090 | } | |
3091 | else { | |
3092 | struct statfs sfs; | |
3093 | bzero(&sfs, sizeof(struct statfs)); | |
3094 | sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; | |
3095 | sfs.f_type = mp->mnt_vtable->vfc_typenum; | |
3096 | ||
3097 | /* | |
3098 | * It's possible for there to be more than 2^^31 blocks in the filesystem, so we | |
3099 | * have to fudge the numbers here in that case. We inflate the blocksize in order | |
3100 | * to reflect the filesystem size as best we can. | |
3101 | */ | |
3102 | if (sp->f_blocks > LONG_MAX) { | |
3103 | int shift; | |
3104 | ||
3105 | /* | |
3106 | * Work out how far we have to shift the block count down to make it fit. | |
3107 | * Note that it's possible to have to shift so far that the resulting | |
3108 | * blocksize would be unreportably large. At that point, we will clip | |
3109 | * any values that don't fit. | |
3110 | * | |
3111 | * For safety's sake, we also ensure that f_iosize is never reported as | |
3112 | * being smaller than f_bsize. | |
3113 | */ | |
3114 | for (shift = 0; shift < 32; shift++) { | |
3115 | if ((sp->f_blocks >> shift) <= LONG_MAX) | |
3116 | break; | |
3117 | if ((sp->f_bsize << (shift + 1)) > LONG_MAX) | |
3118 | break; | |
3119 | } | |
3120 | #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > LONG_MAX) ? LONG_MAX : ((x) >> (s))) | |
3121 | sfs.f_blocks = (long)__SHIFT_OR_CLIP(sp->f_blocks, shift); | |
3122 | sfs.f_bfree = (long)__SHIFT_OR_CLIP(sp->f_bfree, shift); | |
3123 | sfs.f_bavail = (long)__SHIFT_OR_CLIP(sp->f_bavail, shift); | |
3124 | #undef __SHIFT_OR_CLIP | |
3125 | sfs.f_bsize = (long)(sp->f_bsize << shift); | |
3126 | sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize); | |
3127 | } else { | |
3128 | sfs.f_bsize = (long)sp->f_bsize; | |
3129 | sfs.f_iosize = (long)sp->f_iosize; | |
3130 | sfs.f_blocks = (long)sp->f_blocks; | |
3131 | sfs.f_bfree = (long)sp->f_bfree; | |
3132 | sfs.f_bavail = (long)sp->f_bavail; | |
3133 | } | |
3134 | sfs.f_files = (long)sp->f_files; | |
3135 | sfs.f_ffree = (long)sp->f_ffree; | |
3136 | sfs.f_fsid = sp->f_fsid; | |
3137 | sfs.f_owner = sp->f_owner; | |
3138 | ||
2d21ac55 A |
3139 | strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); |
3140 | strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); | |
3141 | strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); | |
91447636 A |
3142 | |
3143 | error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); | |
3144 | } | |
55e303ae A |
3145 | break; |
3146 | default: | |
2d21ac55 A |
3147 | error = ENOTSUP; |
3148 | goto out; | |
55e303ae | 3149 | } |
2d21ac55 A |
3150 | out: |
3151 | if(gotref != 0) | |
3152 | mount_iterdrop(mp); | |
0b4e3aa0 A |
3153 | return (error); |
3154 | } | |
3155 | ||
55e303ae A |
3156 | static int filt_fsattach(struct knote *kn); |
3157 | static void filt_fsdetach(struct knote *kn); | |
3158 | static int filt_fsevent(struct knote *kn, long hint); | |
3159 | ||
3160 | struct filterops fs_filtops = | |
3161 | { 0, filt_fsattach, filt_fsdetach, filt_fsevent }; | |
3162 | ||
3163 | static int | |
3164 | filt_fsattach(struct knote *kn) | |
3165 | { | |
3166 | ||
4a3eedf9 | 3167 | lck_mtx_lock(fs_klist_lock); |
55e303ae A |
3168 | kn->kn_flags |= EV_CLEAR; |
3169 | KNOTE_ATTACH(&fs_klist, kn); | |
4a3eedf9 | 3170 | lck_mtx_unlock(fs_klist_lock); |
55e303ae A |
3171 | return (0); |
3172 | } | |
3173 | ||
3174 | static void | |
3175 | filt_fsdetach(struct knote *kn) | |
3176 | { | |
4a3eedf9 | 3177 | lck_mtx_lock(fs_klist_lock); |
55e303ae | 3178 | KNOTE_DETACH(&fs_klist, kn); |
4a3eedf9 | 3179 | lck_mtx_unlock(fs_klist_lock); |
55e303ae A |
3180 | } |
3181 | ||
3182 | static int | |
3183 | filt_fsevent(struct knote *kn, long hint) | |
3184 | { | |
2d21ac55 A |
3185 | /* |
3186 | * Backwards compatibility: | |
3187 | * Other filters would do nothing if kn->kn_sfflags == 0 | |
3188 | */ | |
3189 | ||
3190 | if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) { | |
3191 | kn->kn_fflags |= hint; | |
3192 | } | |
55e303ae | 3193 | |
55e303ae A |
3194 | return (kn->kn_fflags != 0); |
3195 | } | |
3196 | ||
3197 | static int | |
2d21ac55 A |
3198 | sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp, |
3199 | __unused void *arg1, __unused int arg2, struct sysctl_req *req) | |
55e303ae A |
3200 | { |
3201 | int out, error; | |
3202 | pid_t pid; | |
2d21ac55 | 3203 | proc_t p; |
55e303ae A |
3204 | |
3205 | /* We need a pid. */ | |
91447636 | 3206 | if (req->newptr == USER_ADDR_NULL) |
55e303ae A |
3207 | return (EINVAL); |
3208 | ||
3209 | error = SYSCTL_IN(req, &pid, sizeof(pid)); | |
3210 | if (error) | |
3211 | return (error); | |
3212 | ||
2d21ac55 | 3213 | p = proc_find(pid < 0 ? -pid : pid); |
55e303ae A |
3214 | if (p == NULL) |
3215 | return (ESRCH); | |
3216 | ||
3217 | /* | |
3218 | * Fetching the value is ok, but we only fetch if the old | |
3219 | * pointer is given. | |
3220 | */ | |
91447636 | 3221 | if (req->oldptr != USER_ADDR_NULL) { |
55e303ae | 3222 | out = !((p->p_flag & P_NOREMOTEHANG) == 0); |
2d21ac55 | 3223 | proc_rele(p); |
55e303ae A |
3224 | error = SYSCTL_OUT(req, &out, sizeof(out)); |
3225 | return (error); | |
3226 | } | |
3227 | ||
3228 | /* cansignal offers us enough security. */ | |
2d21ac55 A |
3229 | if (p != req->p && proc_suser(req->p) != 0) { |
3230 | proc_rele(p); | |
55e303ae | 3231 | return (EPERM); |
2d21ac55 | 3232 | } |
55e303ae A |
3233 | |
3234 | if (pid < 0) | |
2d21ac55 | 3235 | OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), (UInt32 *)&p->p_flag); |
55e303ae | 3236 | else |
2d21ac55 A |
3237 | OSBitOrAtomic(P_NOREMOTEHANG, (UInt32 *)&p->p_flag); |
3238 | proc_rele(p); | |
55e303ae A |
3239 | |
3240 | return (0); | |
3241 | } | |
2d21ac55 | 3242 | |
55e303ae | 3243 | /* the vfs.generic. branch. */ |
2d21ac55 | 3244 | SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "vfs generic hinge"); |
55e303ae A |
3245 | /* retreive a list of mounted filesystem fsid_t */ |
3246 | SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD, | |
2d21ac55 | 3247 | NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids"); |
55e303ae | 3248 | /* perform operations on filesystem via fsid_t */ |
2d21ac55 | 3249 | SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW|CTLFLAG_LOCKED, |
55e303ae | 3250 | sysctl_vfs_ctlbyfsid, "ctlbyfsid"); |
2d21ac55 A |
3251 | SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW|CTLFLAG_ANYBODY, |
3252 | NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang"); | |
91447636 A |
3253 | |
3254 | ||
2d21ac55 | 3255 | long num_reusedvnodes = 0; /* long for OSAddAtomic */ |
55e303ae | 3256 | |
91447636 A |
3257 | static int |
3258 | new_vnode(vnode_t *vpp) | |
3259 | { | |
3260 | vnode_t vp; | |
3261 | int retries = 0; /* retry incase of tablefull */ | |
2d21ac55 | 3262 | int force_alloc = 0, walk_count = 0; |
91447636 A |
3263 | int vpid; |
3264 | struct timespec ts; | |
2d21ac55 A |
3265 | struct timeval current_tv; |
3266 | struct unsafe_fsnode *l_unsafefs = 0; | |
3267 | proc_t curproc = current_proc(); | |
3268 | pid_t current_pid = proc_pid(curproc); | |
91447636 A |
3269 | |
3270 | retry: | |
2d21ac55 A |
3271 | microuptime(¤t_tv); |
3272 | ||
3273 | vp = NULLVP; | |
3274 | ||
91447636 A |
3275 | vnode_list_lock(); |
3276 | ||
2d21ac55 | 3277 | if ( !TAILQ_EMPTY(&vnode_dead_list)) { |
91447636 | 3278 | /* |
2d21ac55 | 3279 | * Can always reuse a dead one |
91447636 | 3280 | */ |
2d21ac55 A |
3281 | vp = TAILQ_FIRST(&vnode_dead_list); |
3282 | goto steal_this_vp; | |
3283 | } | |
91447636 | 3284 | /* |
2d21ac55 A |
3285 | * no dead vnodes available... if we're under |
3286 | * the limit, we'll create a new vnode | |
91447636 | 3287 | */ |
2d21ac55 | 3288 | if (numvnodes < desiredvnodes || force_alloc) { |
91447636 A |
3289 | numvnodes++; |
3290 | vnode_list_unlock(); | |
2d21ac55 A |
3291 | MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK); |
3292 | bzero((char *)vp, sizeof(*vp)); | |
91447636 A |
3293 | VLISTNONE(vp); /* avoid double queue removal */ |
3294 | lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr); | |
3295 | ||
3296 | nanouptime(&ts); | |
3297 | vp->v_id = ts.tv_nsec; | |
3298 | vp->v_flag = VSTANDARD; | |
3299 | ||
2d21ac55 A |
3300 | #if CONFIG_MACF |
3301 | mac_vnode_label_init(vp); | |
3302 | #endif /* MAC */ | |
3303 | ||
cf7d32b8 | 3304 | vp->v_iocount = 1; |
91447636 A |
3305 | goto done; |
3306 | } | |
2d21ac55 A |
3307 | |
3308 | #define MAX_WALK_COUNT 1000 | |
3309 | ||
3310 | if ( !TAILQ_EMPTY(&vnode_rage_list) && | |
3311 | (ragevnodes >= rage_limit || | |
3312 | (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) { | |
3313 | ||
3314 | TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) { | |
3315 | if ( !(vp->v_listflag & VLIST_RAGE) || !(vp->v_flag & VRAGE)) | |
3316 | panic("new_vnode: vp on RAGE list not marked both VLIST_RAGE and VRAGE"); | |
3317 | ||
3318 | // skip vnodes which have a dependency on this process | |
3319 | // (i.e. they're vnodes in a disk image and this process | |
3320 | // is diskimages-helper) | |
3321 | // | |
3322 | if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) { | |
3323 | break; | |
3324 | } | |
3325 | ||
3326 | // don't iterate more than MAX_WALK_COUNT vnodes to | |
3327 | // avoid keeping the vnode list lock held for too long. | |
3328 | if (walk_count++ > MAX_WALK_COUNT) { | |
3329 | vp = NULL; | |
3330 | break; | |
3331 | } | |
3332 | } | |
3333 | ||
3334 | } | |
3335 | ||
3336 | if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) { | |
3337 | /* | |
3338 | * Pick the first vp for possible reuse | |
3339 | */ | |
3340 | walk_count = 0; | |
3341 | TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) { | |
3342 | // skip vnodes which have a dependency on this process | |
3343 | // (i.e. they're vnodes in a disk image and this process | |
3344 | // is diskimages-helper) | |
3345 | // | |
3346 | if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) { | |
3347 | break; | |
3348 | } | |
3349 | ||
3350 | // don't iterate more than MAX_WALK_COUNT vnodes to | |
3351 | // avoid keeping the vnode list lock held for too long. | |
3352 | if (walk_count++ > MAX_WALK_COUNT) { | |
3353 | vp = NULL; | |
3354 | break; | |
3355 | } | |
3356 | } | |
3357 | ||
3358 | } | |
3359 | ||
3360 | // | |
3361 | // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT | |
3362 | // then we're trying to create a vnode on behalf of a | |
3363 | // process like diskimages-helper that has file systems | |
3364 | // mounted on top of itself (and thus we can't reclaim | |
3365 | // vnodes in the file systems on top of us). if we can't | |
3366 | // find a vnode to reclaim then we'll just have to force | |
3367 | // the allocation. | |
3368 | // | |
3369 | if (vp == NULL && walk_count >= MAX_WALK_COUNT) { | |
3370 | force_alloc = 1; | |
3371 | vnode_list_unlock(); | |
3372 | goto retry; | |
3373 | } | |
3374 | ||
3375 | if (vp == NULL) { | |
3376 | /* | |
91447636 A |
3377 | * we've reached the system imposed maximum number of vnodes |
3378 | * but there isn't a single one available | |
3379 | * wait a bit and then retry... if we can't get a vnode | |
3380 | * after 100 retries, than log a complaint | |
3381 | */ | |
3382 | if (++retries <= 100) { | |
3383 | vnode_list_unlock(); | |
2d21ac55 | 3384 | delay_for_interval(1, 1000 * 1000); |
91447636 A |
3385 | goto retry; |
3386 | } | |
3387 | ||
3388 | vnode_list_unlock(); | |
3389 | tablefull("vnode"); | |
3390 | log(LOG_EMERG, "%d desired, %d numvnodes, " | |
2d21ac55 A |
3391 | "%d free, %d dead, %d rage\n", |
3392 | desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes); | |
3393 | *vpp = NULL; | |
91447636 A |
3394 | return (ENFILE); |
3395 | } | |
3396 | steal_this_vp: | |
3397 | vpid = vp->v_id; | |
3398 | ||
cf7d32b8 | 3399 | vnode_list_remove_locked(vp); |
91447636 A |
3400 | |
3401 | vnode_list_unlock(); | |
2d21ac55 | 3402 | vnode_lock_spin(vp); |
91447636 A |
3403 | |
3404 | /* | |
3405 | * We could wait for the vnode_lock after removing the vp from the freelist | |
3406 | * and the vid is bumped only at the very end of reclaim. So it is possible | |
3407 | * that we are looking at a vnode that is being terminated. If so skip it. | |
3408 | */ | |
3409 | if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || | |
3410 | VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) { | |
3411 | /* | |
3412 | * we lost the race between dropping the list lock | |
3413 | * and picking up the vnode_lock... someone else | |
3414 | * used this vnode and it is now in a new state | |
3415 | * so we need to go back and try again | |
3416 | */ | |
3417 | vnode_unlock(vp); | |
3418 | goto retry; | |
3419 | } | |
3420 | if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) { | |
3421 | /* | |
3422 | * we did a vnode_rele_ext that asked for | |
3423 | * us not to reenter the filesystem during | |
3424 | * the release even though VL_NEEDINACTIVE was | |
3425 | * set... we'll do it here by doing a | |
3426 | * vnode_get/vnode_put | |
3427 | * | |
3428 | * pick up an iocount so that we can call | |
3429 | * vnode_put and drive the VNOP_INACTIVE... | |
3430 | * vnode_put will either leave us off | |
3431 | * the freelist if a new ref comes in, | |
3432 | * or put us back on the end of the freelist | |
3433 | * or recycle us if we were marked for termination... | |
3434 | * so we'll just go grab a new candidate | |
3435 | */ | |
3436 | vp->v_iocount++; | |
3437 | #ifdef JOE_DEBUG | |
3438 | record_vp(vp, 1); | |
3439 | #endif | |
3440 | vnode_put_locked(vp); | |
3441 | vnode_unlock(vp); | |
3442 | goto retry; | |
3443 | } | |
3444 | OSAddAtomic(1, &num_reusedvnodes); | |
3445 | ||
3446 | /* Checks for anyone racing us for recycle */ | |
3447 | if (vp->v_type != VBAD) { | |
3448 | if (vp->v_lflag & VL_DEAD) | |
3449 | panic("new_vnode: the vnode is VL_DEAD but not VBAD"); | |
2d21ac55 | 3450 | vnode_lock_convert(vp); |
2d21ac55 | 3451 | (void)vnode_reclaim_internal(vp, 1, 1, 0); |
91447636 A |
3452 | |
3453 | if ((VONLIST(vp))) | |
3454 | panic("new_vnode: vp on list "); | |
3455 | if (vp->v_usecount || vp->v_iocount || vp->v_kusecount || | |
3456 | (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) | |
3457 | panic("new_vnode: free vnode still referenced\n"); | |
3458 | if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) | |
3459 | panic("new_vnode: vnode seems to be on mount list "); | |
3460 | if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren)) | |
3461 | panic("new_vnode: vnode still hooked into the name cache"); | |
3462 | } | |
3463 | if (vp->v_unsafefs) { | |
2d21ac55 | 3464 | l_unsafefs = vp->v_unsafefs; |
91447636 A |
3465 | vp->v_unsafefs = (struct unsafe_fsnode *)NULL; |
3466 | } | |
2d21ac55 A |
3467 | |
3468 | #if CONFIG_MACF | |
3469 | /* | |
3470 | * We should never see VL_LABELWAIT or VL_LABEL here. | |
3471 | * as those operations hold a reference. | |
3472 | */ | |
3473 | assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT); | |
3474 | assert ((vp->v_lflag & VL_LABEL) != VL_LABEL); | |
3475 | if (vp->v_lflag & VL_LABELED) { | |
3476 | vnode_lock_convert(vp); | |
3477 | mac_vnode_label_recycle(vp); | |
3478 | } | |
3479 | #endif /* MAC */ | |
3480 | ||
cf7d32b8 | 3481 | vp->v_iocount = 1; |
91447636 A |
3482 | vp->v_lflag = 0; |
3483 | vp->v_writecount = 0; | |
3484 | vp->v_references = 0; | |
3485 | vp->v_iterblkflags = 0; | |
3486 | vp->v_flag = VSTANDARD; | |
3487 | /* vbad vnodes can point to dead_mountp */ | |
2d21ac55 | 3488 | vp->v_mount = NULL; |
91447636 A |
3489 | vp->v_defer_reclaimlist = (vnode_t)0; |
3490 | ||
3491 | vnode_unlock(vp); | |
2d21ac55 A |
3492 | |
3493 | if (l_unsafefs) { | |
3494 | lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp); | |
3495 | FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS); | |
3496 | } | |
91447636 A |
3497 | done: |
3498 | *vpp = vp; | |
3499 | ||
3500 | return (0); | |
3501 | } | |
3502 | ||
3503 | void | |
3504 | vnode_lock(vnode_t vp) | |
3505 | { | |
3506 | lck_mtx_lock(&vp->v_lock); | |
3507 | } | |
3508 | ||
2d21ac55 A |
3509 | void |
3510 | vnode_lock_spin(vnode_t vp) | |
3511 | { | |
3512 | lck_mtx_lock_spin(&vp->v_lock); | |
3513 | } | |
3514 | ||
91447636 A |
3515 | void |
3516 | vnode_unlock(vnode_t vp) | |
3517 | { | |
3518 | lck_mtx_unlock(&vp->v_lock); | |
3519 | } | |
3520 | ||
3521 | ||
3522 | ||
3523 | int | |
3524 | vnode_get(struct vnode *vp) | |
3525 | { | |
2d21ac55 | 3526 | int retval; |
91447636 | 3527 | |
2d21ac55 A |
3528 | vnode_lock_spin(vp); |
3529 | retval = vnode_get_locked(vp); | |
3530 | vnode_unlock(vp); | |
3531 | ||
3532 | return(retval); | |
3533 | } | |
3534 | ||
3535 | int | |
3536 | vnode_get_locked(struct vnode *vp) | |
3537 | { | |
3538 | ||
3539 | if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) { | |
91447636 A |
3540 | return(ENOENT); |
3541 | } | |
3542 | vp->v_iocount++; | |
3543 | #ifdef JOE_DEBUG | |
3544 | record_vp(vp, 1); | |
3545 | #endif | |
2d21ac55 | 3546 | return (0); |
91447636 A |
3547 | } |
3548 | ||
3549 | int | |
3550 | vnode_getwithvid(vnode_t vp, int vid) | |
3551 | { | |
3552 | return(vget_internal(vp, vid, ( VNODE_NODEAD| VNODE_WITHID))); | |
3553 | } | |
3554 | ||
3555 | int | |
3556 | vnode_getwithref(vnode_t vp) | |
3557 | { | |
3558 | return(vget_internal(vp, 0, 0)); | |
3559 | } | |
3560 | ||
3561 | ||
3562 | int | |
3563 | vnode_put(vnode_t vp) | |
3564 | { | |
3565 | int retval; | |
3566 | ||
2d21ac55 | 3567 | vnode_lock_spin(vp); |
91447636 A |
3568 | retval = vnode_put_locked(vp); |
3569 | vnode_unlock(vp); | |
3570 | ||
3571 | return(retval); | |
3572 | } | |
3573 | ||
3574 | int | |
3575 | vnode_put_locked(vnode_t vp) | |
3576 | { | |
2d21ac55 | 3577 | vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */ |
91447636 A |
3578 | |
3579 | retry: | |
3580 | if (vp->v_iocount < 1) | |
2d21ac55 | 3581 | panic("vnode_put(%p): iocount < 1", vp); |
91447636 A |
3582 | |
3583 | if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) { | |
2d21ac55 | 3584 | vnode_dropiocount(vp); |
91447636 A |
3585 | return(0); |
3586 | } | |
3587 | if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) { | |
3588 | ||
3589 | vp->v_lflag &= ~VL_NEEDINACTIVE; | |
3590 | vnode_unlock(vp); | |
3591 | ||
2d21ac55 | 3592 | VNOP_INACTIVE(vp, ctx); |
91447636 | 3593 | |
2d21ac55 | 3594 | vnode_lock_spin(vp); |
91447636 A |
3595 | /* |
3596 | * because we had to drop the vnode lock before calling | |
3597 | * VNOP_INACTIVE, the state of this vnode may have changed... | |
3598 | * we may pick up both VL_MARTERM and either | |
3599 | * an iocount or a usecount while in the VNOP_INACTIVE call | |
3600 | * we don't want to call vnode_reclaim_internal on a vnode | |
3601 | * that has active references on it... so loop back around | |
3602 | * and reevaluate the state | |
3603 | */ | |
3604 | goto retry; | |
3605 | } | |
3606 | vp->v_lflag &= ~VL_NEEDINACTIVE; | |
3607 | ||
2d21ac55 A |
3608 | if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) { |
3609 | vnode_lock_convert(vp); | |
cf7d32b8 | 3610 | vnode_reclaim_internal(vp, 1, 1, 0); |
2d21ac55 A |
3611 | } |
3612 | vnode_dropiocount(vp); | |
91447636 A |
3613 | vnode_list_add(vp); |
3614 | ||
3615 | return(0); | |
3616 | } | |
3617 | ||
3618 | /* is vnode_t in use by others? */ | |
3619 | int | |
3620 | vnode_isinuse(vnode_t vp, int refcnt) | |
3621 | { | |
3622 | return(vnode_isinuse_locked(vp, refcnt, 0)); | |
3623 | } | |
3624 | ||
3625 | ||
3626 | static int | |
3627 | vnode_isinuse_locked(vnode_t vp, int refcnt, int locked) | |
3628 | { | |
3629 | int retval = 0; | |
3630 | ||
3631 | if (!locked) | |
2d21ac55 A |
3632 | vnode_lock_spin(vp); |
3633 | if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) { | |
91447636 A |
3634 | retval = 1; |
3635 | goto out; | |
3636 | } | |
3637 | if (vp->v_type == VREG) { | |
3638 | retval = ubc_isinuse_locked(vp, refcnt, 1); | |
3639 | } | |
3640 | ||
3641 | out: | |
3642 | if (!locked) | |
3643 | vnode_unlock(vp); | |
3644 | return(retval); | |
3645 | } | |
3646 | ||
3647 | ||
3648 | /* resume vnode_t */ | |
3649 | errno_t | |
3650 | vnode_resume(vnode_t vp) | |
3651 | { | |
3652 | ||
2d21ac55 | 3653 | vnode_lock_spin(vp); |
91447636 A |
3654 | |
3655 | if (vp->v_owner == current_thread()) { | |
3656 | vp->v_lflag &= ~VL_SUSPENDED; | |
2d21ac55 | 3657 | vp->v_owner = NULL; |
91447636 A |
3658 | vnode_unlock(vp); |
3659 | wakeup(&vp->v_iocount); | |
3660 | } else | |
3661 | vnode_unlock(vp); | |
3662 | ||
3663 | return(0); | |
3664 | } | |
3665 | ||
2d21ac55 A |
3666 | /* suspend vnode_t |
3667 | * Please do not use on more than one vnode at a time as it may | |
3668 | * cause deadlocks. | |
3669 | * xxx should we explicity prevent this from happening? | |
3670 | */ | |
3671 | ||
3672 | errno_t | |
3673 | vnode_suspend(vnode_t vp) | |
3674 | { | |
3675 | if (vp->v_lflag & VL_SUSPENDED) { | |
3676 | return(EBUSY); | |
3677 | } | |
3678 | ||
3679 | vnode_lock_spin(vp); | |
3680 | ||
3681 | /* | |
3682 | * xxx is this sufficient to check if a vnode_drain is | |
3683 | * progress? | |
3684 | */ | |
3685 | ||
3686 | if (vp->v_owner == NULL) { | |
3687 | vp->v_lflag |= VL_SUSPENDED; | |
3688 | vp->v_owner = current_thread(); | |
3689 | } | |
3690 | vnode_unlock(vp); | |
3691 | ||
3692 | return(0); | |
3693 | } | |
3694 | ||
3695 | ||
3696 | ||
91447636 A |
3697 | static errno_t |
3698 | vnode_drain(vnode_t vp) | |
3699 | { | |
3700 | ||
3701 | if (vp->v_lflag & VL_DRAIN) { | |
3702 | panic("vnode_drain: recursuve drain"); | |
3703 | return(ENOENT); | |
3704 | } | |
3705 | vp->v_lflag |= VL_DRAIN; | |
3706 | vp->v_owner = current_thread(); | |
3707 | ||
3708 | while (vp->v_iocount > 1) | |
2d21ac55 | 3709 | msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL); |
91447636 A |
3710 | return(0); |
3711 | } | |
3712 | ||
3713 | ||
3714 | /* | |
3715 | * if the number of recent references via vnode_getwithvid or vnode_getwithref | |
3716 | * exceeds this threshhold, than 'UN-AGE' the vnode by removing it from | |
3717 | * the LRU list if it's currently on it... once the iocount and usecount both drop | |
3718 | * to 0, it will get put back on the end of the list, effectively making it younger | |
3719 | * this allows us to keep actively referenced vnodes in the list without having | |
3720 | * to constantly remove and add to the list each time a vnode w/o a usecount is | |
3721 | * referenced which costs us taking and dropping a global lock twice. | |
3722 | */ | |
2d21ac55 | 3723 | #define UNAGE_THRESHHOLD 25 |
91447636 | 3724 | |
2d21ac55 A |
3725 | static errno_t |
3726 | vnode_getiocount(vnode_t vp, int vid, int vflags) | |
91447636 A |
3727 | { |
3728 | int nodead = vflags & VNODE_NODEAD; | |
3729 | int nosusp = vflags & VNODE_NOSUSPEND; | |
3730 | ||
91447636 A |
3731 | for (;;) { |
3732 | /* | |
3733 | * if it is a dead vnode with deadfs | |
3734 | */ | |
2d21ac55 | 3735 | if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) { |
91447636 A |
3736 | return(ENOENT); |
3737 | } | |
3738 | /* | |
3739 | * will return VL_DEAD ones | |
3740 | */ | |
3741 | if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) { | |
3742 | break; | |
3743 | } | |
3744 | /* | |
3745 | * if suspended vnodes are to be failed | |
3746 | */ | |
3747 | if (nosusp && (vp->v_lflag & VL_SUSPENDED)) { | |
91447636 A |
3748 | return(ENOENT); |
3749 | } | |
3750 | /* | |
3751 | * if you are the owner of drain/suspend/termination , can acquire iocount | |
3752 | * check for VL_TERMINATE; it does not set owner | |
3753 | */ | |
3754 | if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) && | |
3755 | (vp->v_owner == current_thread())) { | |
3756 | break; | |
3757 | } | |
2d21ac55 A |
3758 | vnode_lock_convert(vp); |
3759 | ||
91447636 A |
3760 | if (vp->v_lflag & VL_TERMINATE) { |
3761 | vp->v_lflag |= VL_TERMWANT; | |
3762 | ||
2d21ac55 | 3763 | msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL); |
91447636 | 3764 | } else |
2d21ac55 | 3765 | msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL); |
91447636 A |
3766 | } |
3767 | if (vid != vp->v_id) { | |
91447636 A |
3768 | return(ENOENT); |
3769 | } | |
3770 | if (++vp->v_references >= UNAGE_THRESHHOLD) { | |
3771 | vp->v_references = 0; | |
3772 | vnode_list_remove(vp); | |
3773 | } | |
3774 | vp->v_iocount++; | |
3775 | #ifdef JOE_DEBUG | |
3776 | record_vp(vp, 1); | |
3777 | #endif | |
91447636 A |
3778 | return(0); |
3779 | } | |
3780 | ||
3781 | static void | |
2d21ac55 | 3782 | vnode_dropiocount (vnode_t vp) |
91447636 | 3783 | { |
91447636 | 3784 | if (vp->v_iocount < 1) |
2d21ac55 | 3785 | panic("vnode_dropiocount(%p): v_iocount < 1", vp); |
91447636 A |
3786 | |
3787 | vp->v_iocount--; | |
3788 | #ifdef JOE_DEBUG | |
3789 | record_vp(vp, -1); | |
3790 | #endif | |
2d21ac55 A |
3791 | if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) { |
3792 | vnode_lock_convert(vp); | |
91447636 | 3793 | wakeup(&vp->v_iocount); |
2d21ac55 | 3794 | } |
91447636 A |
3795 | } |
3796 | ||
3797 | ||
3798 | void | |
3799 | vnode_reclaim(struct vnode * vp) | |
3800 | { | |
2d21ac55 | 3801 | vnode_reclaim_internal(vp, 0, 0, 0); |
91447636 A |
3802 | } |
3803 | ||
3804 | __private_extern__ | |
3805 | void | |
2d21ac55 | 3806 | vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) |
91447636 A |
3807 | { |
3808 | int isfifo = 0; | |
3809 | ||
3810 | if (!locked) | |
3811 | vnode_lock(vp); | |
3812 | ||
3813 | if (vp->v_lflag & VL_TERMINATE) { | |
3814 | panic("vnode reclaim in progress"); | |
3815 | } | |
3816 | vp->v_lflag |= VL_TERMINATE; | |
3817 | ||
2d21ac55 A |
3818 | vn_clearunionwait(vp, 1); |
3819 | ||
91447636 A |
3820 | if (vnode_drain(vp)) { |
3821 | panic("vnode drain failed"); | |
3822 | vnode_unlock(vp); | |
3823 | return; | |
3824 | } | |
3825 | isfifo = (vp->v_type == VFIFO); | |
3826 | ||
3827 | if (vp->v_type != VBAD) | |
2d21ac55 | 3828 | vgone(vp, flags); /* clean and reclaim the vnode */ |
91447636 A |
3829 | |
3830 | /* | |
4a3eedf9 A |
3831 | * give the vnode a new identity so that vnode_getwithvid will fail |
3832 | * on any stale cache accesses... | |
3833 | * grab the list_lock so that if we're in "new_vnode" | |
3834 | * behind the list_lock trying to steal this vnode, the v_id is stable... | |
3835 | * once new_vnode drops the list_lock, it will block trying to take | |
3836 | * the vnode lock until we release it... at that point it will evaluate | |
3837 | * whether the v_vid has changed | |
cf7d32b8 A |
3838 | * also need to make sure that the vnode isn't on a list where "new_vnode" |
3839 | * can find it after the v_id has been bumped until we are completely done | |
3840 | * with the vnode (i.e. putting it back on a list has to be the very last | |
3841 | * thing we do to this vnode... many of the callers of vnode_reclaim_internal | |
3842 | * are holding an io_count on the vnode... they need to drop the io_count | |
3843 | * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until | |
3844 | * they are completely done with the vnode | |
91447636 | 3845 | */ |
4a3eedf9 | 3846 | vnode_list_lock(); |
cf7d32b8 A |
3847 | |
3848 | vnode_list_remove_locked(vp); | |
91447636 | 3849 | vp->v_id++; |
cf7d32b8 | 3850 | |
4a3eedf9 A |
3851 | vnode_list_unlock(); |
3852 | ||
91447636 A |
3853 | if (isfifo) { |
3854 | struct fifoinfo * fip; | |
3855 | ||
3856 | fip = vp->v_fifoinfo; | |
3857 | vp->v_fifoinfo = NULL; | |
3858 | FREE(fip, M_TEMP); | |
3859 | } | |
3860 | ||
3861 | vp->v_type = VBAD; | |
3862 | ||
3863 | if (vp->v_data) | |
3864 | panic("vnode_reclaim_internal: cleaned vnode isn't"); | |
3865 | if (vp->v_numoutput) | |
cf7d32b8 | 3866 | panic("vnode_reclaim_internal: clean vnode has pending I/O's"); |
91447636 A |
3867 | if (UBCINFOEXISTS(vp)) |
3868 | panic("vnode_reclaim_internal: ubcinfo not cleaned"); | |
3869 | if (vp->v_parent) | |
3870 | panic("vnode_reclaim_internal: vparent not removed"); | |
3871 | if (vp->v_name) | |
3872 | panic("vnode_reclaim_internal: vname not removed"); | |
3873 | ||
2d21ac55 | 3874 | vp->v_socket = NULL; |
91447636 A |
3875 | |
3876 | vp->v_lflag &= ~VL_TERMINATE; | |
3877 | vp->v_lflag &= ~VL_DRAIN; | |
2d21ac55 | 3878 | vp->v_owner = NULL; |
91447636 A |
3879 | |
3880 | if (vp->v_lflag & VL_TERMWANT) { | |
3881 | vp->v_lflag &= ~VL_TERMWANT; | |
3882 | wakeup(&vp->v_lflag); | |
3883 | } | |
cf7d32b8 | 3884 | if (!reuse) { |
2d21ac55 A |
3885 | /* |
3886 | * make sure we get on the | |
cf7d32b8 | 3887 | * dead list if appropriate |
2d21ac55 | 3888 | */ |
91447636 | 3889 | vnode_list_add(vp); |
2d21ac55 | 3890 | } |
91447636 A |
3891 | if (!locked) |
3892 | vnode_unlock(vp); | |
3893 | } | |
3894 | ||
3895 | /* USAGE: | |
91447636 A |
3896 | * vnode_create(int flavor, size_t size, void * param, vnode_t *vp) |
3897 | */ | |
3898 | int | |
3899 | vnode_create(int flavor, size_t size, void *data, vnode_t *vpp) | |
3900 | { | |
3901 | int error; | |
3902 | int insert = 1; | |
3903 | vnode_t vp; | |
3904 | vnode_t nvp; | |
3905 | vnode_t dvp; | |
2d21ac55 | 3906 | struct uthread *ut; |
91447636 A |
3907 | struct componentname *cnp; |
3908 | struct vnode_fsparam *param = (struct vnode_fsparam *)data; | |
3909 | ||
3910 | if (flavor == VNCREATE_FLAVOR && (size == VCREATESIZE) && param) { | |
3911 | if ( (error = new_vnode(&vp)) ) { | |
3912 | return(error); | |
3913 | } else { | |
3914 | dvp = param->vnfs_dvp; | |
3915 | cnp = param->vnfs_cnp; | |
3916 | ||
3917 | vp->v_op = param->vnfs_vops; | |
3918 | vp->v_type = param->vnfs_vtype; | |
3919 | vp->v_data = param->vnfs_fsnode; | |
91447636 A |
3920 | |
3921 | if (param->vnfs_markroot) | |
3922 | vp->v_flag |= VROOT; | |
3923 | if (param->vnfs_marksystem) | |
3924 | vp->v_flag |= VSYSTEM; | |
2d21ac55 | 3925 | if (vp->v_type == VREG) { |
91447636 A |
3926 | error = ubc_info_init_withsize(vp, param->vnfs_filesize); |
3927 | if (error) { | |
3928 | #ifdef JOE_DEBUG | |
3929 | record_vp(vp, 1); | |
3930 | #endif | |
2d21ac55 | 3931 | vp->v_mount = NULL; |
91447636 A |
3932 | vp->v_op = dead_vnodeop_p; |
3933 | vp->v_tag = VT_NON; | |
3934 | vp->v_data = NULL; | |
3935 | vp->v_type = VBAD; | |
3936 | vp->v_lflag |= VL_DEAD; | |
3937 | ||
3938 | vnode_put(vp); | |
3939 | return(error); | |
3940 | } | |
3941 | } | |
3942 | #ifdef JOE_DEBUG | |
3943 | record_vp(vp, 1); | |
3944 | #endif | |
3945 | if (vp->v_type == VCHR || vp->v_type == VBLK) { | |
3946 | ||
2d21ac55 A |
3947 | vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */ |
3948 | ||
91447636 A |
3949 | if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) { |
3950 | /* | |
3951 | * if checkalias returns a vnode, it will be locked | |
3952 | * | |
3953 | * first get rid of the unneeded vnode we acquired | |
3954 | */ | |
3955 | vp->v_data = NULL; | |
3956 | vp->v_op = spec_vnodeop_p; | |
3957 | vp->v_type = VBAD; | |
3958 | vp->v_lflag = VL_DEAD; | |
3959 | vp->v_data = NULL; | |
3960 | vp->v_tag = VT_NON; | |
3961 | vnode_put(vp); | |
3962 | ||
3963 | /* | |
3964 | * switch to aliased vnode and finish | |
3965 | * preparing it | |
3966 | */ | |
3967 | vp = nvp; | |
3968 | ||
2d21ac55 | 3969 | vclean(vp, 0); |
91447636 A |
3970 | vp->v_op = param->vnfs_vops; |
3971 | vp->v_type = param->vnfs_vtype; | |
3972 | vp->v_data = param->vnfs_fsnode; | |
3973 | vp->v_lflag = 0; | |
3974 | vp->v_mount = NULL; | |
3975 | insmntque(vp, param->vnfs_mp); | |
3976 | insert = 0; | |
3977 | vnode_unlock(vp); | |
3978 | } | |
3979 | } | |
3980 | ||
3981 | if (vp->v_type == VFIFO) { | |
3982 | struct fifoinfo *fip; | |
3983 | ||
3984 | MALLOC(fip, struct fifoinfo *, | |
3985 | sizeof(*fip), M_TEMP, M_WAITOK); | |
3986 | bzero(fip, sizeof(struct fifoinfo )); | |
3987 | vp->v_fifoinfo = fip; | |
3988 | } | |
3989 | /* The file systems usually pass the address of the location where | |
3990 | * where there store the vnode pointer. When we add the vnode in mount | |
3991 | * point and name cache they are discoverable. So the file system node | |
3992 | * will have the connection to vnode setup by then | |
3993 | */ | |
3994 | *vpp = vp; | |
3995 | ||
2d21ac55 A |
3996 | /* Add fs named reference. */ |
3997 | if (param->vnfs_flags & VNFS_ADDFSREF) { | |
3998 | vp->v_lflag |= VNAMED_FSHASH; | |
3999 | } | |
91447636 A |
4000 | if (param->vnfs_mp) { |
4001 | if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) | |
4002 | vp->v_flag |= VLOCKLOCAL; | |
4003 | if (insert) { | |
4004 | /* | |
4005 | * enter in mount vnode list | |
4006 | */ | |
4007 | insmntque(vp, param->vnfs_mp); | |
4008 | } | |
4009 | #ifdef INTERIM_FSNODE_LOCK | |
4010 | if (param->vnfs_mp->mnt_vtable->vfc_threadsafe == 0) { | |
4011 | MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *, | |
4012 | sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK); | |
4013 | vp->v_unsafefs->fsnode_count = 0; | |
4014 | vp->v_unsafefs->fsnodeowner = (void *)NULL; | |
4015 | lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr); | |
4016 | } | |
4017 | #endif /* INTERIM_FSNODE_LOCK */ | |
4018 | } | |
4019 | if (dvp && vnode_ref(dvp) == 0) { | |
4020 | vp->v_parent = dvp; | |
4021 | } | |
4022 | if (cnp) { | |
4023 | if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) { | |
4024 | /* | |
4025 | * enter into name cache | |
4026 | * we've got the info to enter it into the name cache now | |
4027 | */ | |
4028 | cache_enter(dvp, vp, cnp); | |
4029 | } | |
4030 | vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); | |
2d21ac55 A |
4031 | if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) |
4032 | vp->v_flag |= VISUNION; | |
91447636 A |
4033 | } |
4034 | if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) { | |
4035 | /* | |
4036 | * this vnode is being created as cacheable in the name cache | |
4037 | * this allows us to re-enter it in the cache | |
4038 | */ | |
4039 | vp->v_flag |= VNCACHEABLE; | |
4040 | } | |
2d21ac55 A |
4041 | ut = get_bsdthread_info(current_thread()); |
4042 | ||
4043 | if ((current_proc()->p_lflag & P_LRAGE_VNODES) || | |
4044 | (ut->uu_flag & UT_RAGE_VNODES)) { | |
4045 | /* | |
4046 | * process has indicated that it wants any | |
4047 | * vnodes created on its behalf to be rapidly | |
4048 | * aged to reduce the impact on the cached set | |
4049 | * of vnodes | |
4050 | */ | |
4051 | vp->v_flag |= VRAGE; | |
4052 | } | |
91447636 A |
4053 | return(0); |
4054 | } | |
4055 | } | |
4056 | return (EINVAL); | |
4057 | } | |
4058 | ||
4059 | int | |
4060 | vnode_addfsref(vnode_t vp) | |
4061 | { | |
2d21ac55 | 4062 | vnode_lock_spin(vp); |
91447636 A |
4063 | if (vp->v_lflag & VNAMED_FSHASH) |
4064 | panic("add_fsref: vp already has named reference"); | |
4065 | if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) | |
4066 | panic("addfsref: vp on the free list\n"); | |
4067 | vp->v_lflag |= VNAMED_FSHASH; | |
4068 | vnode_unlock(vp); | |
4069 | return(0); | |
4070 | ||
4071 | } | |
4072 | int | |
4073 | vnode_removefsref(vnode_t vp) | |
4074 | { | |
2d21ac55 | 4075 | vnode_lock_spin(vp); |
91447636 A |
4076 | if ((vp->v_lflag & VNAMED_FSHASH) == 0) |
4077 | panic("remove_fsref: no named reference"); | |
4078 | vp->v_lflag &= ~VNAMED_FSHASH; | |
4079 | vnode_unlock(vp); | |
4080 | return(0); | |
4081 | ||
4082 | } | |
4083 | ||
4084 | ||
4085 | int | |
4086 | vfs_iterate(__unused int flags, int (*callout)(mount_t, void *), void *arg) | |
4087 | { | |
4088 | mount_t mp; | |
4089 | int ret = 0; | |
4090 | fsid_t * fsid_list; | |
4091 | int count, actualcount, i; | |
4092 | void * allocmem; | |
4093 | ||
4094 | count = mount_getvfscnt(); | |
4095 | count += 10; | |
4096 | ||
4097 | fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t)); | |
4098 | allocmem = (void *)fsid_list; | |
4099 | ||
4100 | actualcount = mount_fillfsids(fsid_list, count); | |
4101 | ||
4102 | for (i=0; i< actualcount; i++) { | |
4103 | ||
4104 | /* obtain the mount point with iteration reference */ | |
4105 | mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1); | |
4106 | ||
4107 | if(mp == (struct mount *)0) | |
4108 | continue; | |
4109 | mount_lock(mp); | |
4110 | if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) { | |
4111 | mount_unlock(mp); | |
4112 | mount_iterdrop(mp); | |
4113 | continue; | |
4114 | ||
4115 | } | |
4116 | mount_unlock(mp); | |
4117 | ||
4118 | /* iterate over all the vnodes */ | |
4119 | ret = callout(mp, arg); | |
4120 | ||
4121 | mount_iterdrop(mp); | |
4122 | ||
4123 | switch (ret) { | |
4124 | case VFS_RETURNED: | |
4125 | case VFS_RETURNED_DONE: | |
4126 | if (ret == VFS_RETURNED_DONE) { | |
4127 | ret = 0; | |
4128 | goto out; | |
4129 | } | |
4130 | break; | |
4131 | ||
4132 | case VFS_CLAIMED_DONE: | |
4133 | ret = 0; | |
4134 | goto out; | |
4135 | case VFS_CLAIMED: | |
4136 | default: | |
4137 | break; | |
4138 | } | |
4139 | ret = 0; | |
4140 | } | |
4141 | ||
4142 | out: | |
4143 | kfree(allocmem, (count * sizeof(fsid_t))); | |
4144 | return (ret); | |
4145 | } | |
4146 | ||
4147 | /* | |
4148 | * Update the vfsstatfs structure in the mountpoint. | |
2d21ac55 A |
4149 | * MAC: Parameter eventtype added, indicating whether the event that |
4150 | * triggered this update came from user space, via a system call | |
4151 | * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT). | |
91447636 A |
4152 | */ |
4153 | int | |
2d21ac55 | 4154 | vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) |
91447636 A |
4155 | { |
4156 | struct vfs_attr va; | |
4157 | int error; | |
4158 | ||
4159 | /* | |
4160 | * Request the attributes we want to propagate into | |
4161 | * the per-mount vfsstat structure. | |
4162 | */ | |
4163 | VFSATTR_INIT(&va); | |
4164 | VFSATTR_WANTED(&va, f_iosize); | |
4165 | VFSATTR_WANTED(&va, f_blocks); | |
4166 | VFSATTR_WANTED(&va, f_bfree); | |
4167 | VFSATTR_WANTED(&va, f_bavail); | |
4168 | VFSATTR_WANTED(&va, f_bused); | |
4169 | VFSATTR_WANTED(&va, f_files); | |
4170 | VFSATTR_WANTED(&va, f_ffree); | |
4171 | VFSATTR_WANTED(&va, f_bsize); | |
4172 | VFSATTR_WANTED(&va, f_fssubtype); | |
2d21ac55 A |
4173 | #if CONFIG_MACF |
4174 | if (eventtype == VFS_USER_EVENT) { | |
4175 | error = mac_mount_check_getattr(ctx, mp, &va); | |
4176 | if (error != 0) | |
4177 | return (error); | |
4178 | } | |
4179 | #endif | |
4180 | ||
91447636 A |
4181 | if ((error = vfs_getattr(mp, &va, ctx)) != 0) { |
4182 | KAUTH_DEBUG("STAT - filesystem returned error %d", error); | |
4183 | return(error); | |
4184 | } | |
4185 | ||
4186 | /* | |
4187 | * Unpack into the per-mount structure. | |
4188 | * | |
4189 | * We only overwrite these fields, which are likely to change: | |
4190 | * f_blocks | |
4191 | * f_bfree | |
4192 | * f_bavail | |
4193 | * f_bused | |
4194 | * f_files | |
4195 | * f_ffree | |
4196 | * | |
4197 | * And these which are not, but which the FS has no other way | |
4198 | * of providing to us: | |
4199 | * f_bsize | |
4200 | * f_iosize | |
4201 | * f_fssubtype | |
4202 | * | |
4203 | */ | |
4204 | if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) { | |
2d21ac55 A |
4205 | /* 4822056 - protect against malformed server mount */ |
4206 | mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512); | |
91447636 A |
4207 | } else { |
4208 | mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */ | |
4209 | } | |
4210 | if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) { | |
4211 | mp->mnt_vfsstat.f_iosize = va.f_iosize; | |
4212 | } else { | |
4213 | mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */ | |
4214 | } | |
4215 | if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) | |
4216 | mp->mnt_vfsstat.f_blocks = va.f_blocks; | |
4217 | if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) | |
4218 | mp->mnt_vfsstat.f_bfree = va.f_bfree; | |
4219 | if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) | |
4220 | mp->mnt_vfsstat.f_bavail = va.f_bavail; | |
4221 | if (VFSATTR_IS_SUPPORTED(&va, f_bused)) | |
4222 | mp->mnt_vfsstat.f_bused = va.f_bused; | |
4223 | if (VFSATTR_IS_SUPPORTED(&va, f_files)) | |
4224 | mp->mnt_vfsstat.f_files = va.f_files; | |
4225 | if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) | |
4226 | mp->mnt_vfsstat.f_ffree = va.f_ffree; | |
4227 | ||
4228 | /* this is unlikely to change, but has to be queried for */ | |
4229 | if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) | |
4230 | mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype; | |
4231 | ||
4232 | return(0); | |
4233 | } | |
4234 | ||
4235 | void | |
4236 | mount_list_add(mount_t mp) | |
4237 | { | |
4238 | mount_list_lock(); | |
4239 | TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); | |
4240 | nummounts++; | |
4241 | mount_list_unlock(); | |
4242 | } | |
4243 | ||
4244 | void | |
4245 | mount_list_remove(mount_t mp) | |
4246 | { | |
4247 | mount_list_lock(); | |
4248 | TAILQ_REMOVE(&mountlist, mp, mnt_list); | |
4249 | nummounts--; | |
2d21ac55 A |
4250 | mp->mnt_list.tqe_next = NULL; |
4251 | mp->mnt_list.tqe_prev = NULL; | |
91447636 A |
4252 | mount_list_unlock(); |
4253 | } | |
4254 | ||
2d21ac55 | 4255 | #if CONFIG_VOLFS |
91447636 A |
4256 | mount_t |
4257 | mount_lookupby_volfsid(int volfs_id, int withref) | |
4258 | { | |
4259 | mount_t cur_mount = (mount_t)0; | |
2d21ac55 | 4260 | mount_t mp; |
91447636 A |
4261 | |
4262 | mount_list_lock(); | |
2d21ac55 A |
4263 | TAILQ_FOREACH(mp, &mountlist, mnt_list) { |
4264 | if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) && | |
4265 | (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) && | |
4266 | (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) { | |
4267 | cur_mount = mp; | |
91447636 A |
4268 | if (withref) { |
4269 | if (mount_iterref(cur_mount, 1)) { | |
4270 | cur_mount = (mount_t)0; | |
4271 | mount_list_unlock(); | |
4272 | goto out; | |
4273 | } | |
4274 | } | |
2d21ac55 A |
4275 | break; |
4276 | } | |
91447636 A |
4277 | } |
4278 | mount_list_unlock(); | |
4279 | if (withref && (cur_mount != (mount_t)0)) { | |
4280 | mp = cur_mount; | |
4281 | if (vfs_busy(mp, LK_NOWAIT) != 0) { | |
4282 | cur_mount = (mount_t)0; | |
2d21ac55 | 4283 | } |
91447636 A |
4284 | mount_iterdrop(mp); |
4285 | } | |
4286 | out: | |
4287 | return(cur_mount); | |
4288 | } | |
2d21ac55 | 4289 | #endif |
91447636 A |
4290 | |
4291 | ||
4292 | mount_t | |
2d21ac55 | 4293 | mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref) |
91447636 A |
4294 | { |
4295 | mount_t retmp = (mount_t)0; | |
4296 | mount_t mp; | |
4297 | ||
4298 | if (!locked) | |
4299 | mount_list_lock(); | |
4300 | TAILQ_FOREACH(mp, &mountlist, mnt_list) | |
4301 | if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] && | |
4302 | mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) { | |
4303 | retmp = mp; | |
4304 | if (withref) { | |
4305 | if (mount_iterref(retmp, 1)) | |
4306 | retmp = (mount_t)0; | |
4307 | } | |
4308 | goto out; | |
4309 | } | |
4310 | out: | |
4311 | if (!locked) | |
4312 | mount_list_unlock(); | |
4313 | return (retmp); | |
4314 | } | |
4315 | ||
4316 | errno_t | |
2d21ac55 | 4317 | vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx) |
91447636 A |
4318 | { |
4319 | struct nameidata nd; | |
4320 | int error; | |
91447636 A |
4321 | u_long ndflags = 0; |
4322 | ||
2d21ac55 A |
4323 | if (ctx == NULL) { /* XXX technically an error */ |
4324 | ctx = vfs_context_current(); | |
91447636 A |
4325 | } |
4326 | ||
4327 | if (flags & VNODE_LOOKUP_NOFOLLOW) | |
4328 | ndflags = NOFOLLOW; | |
4329 | else | |
4330 | ndflags = FOLLOW; | |
4331 | ||
4332 | if (flags & VNODE_LOOKUP_NOCROSSMOUNT) | |
4333 | ndflags |= NOCROSSMOUNT; | |
4334 | if (flags & VNODE_LOOKUP_DOWHITEOUT) | |
4335 | ndflags |= DOWHITEOUT; | |
4336 | ||
4337 | /* XXX AUDITVNPATH1 needed ? */ | |
4338 | NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); | |
4339 | ||
4340 | if ((error = namei(&nd))) | |
4341 | return (error); | |
4342 | *vpp = nd.ni_vp; | |
4343 | nameidone(&nd); | |
4344 | ||
4345 | return (0); | |
4346 | } | |
4347 | ||
4348 | errno_t | |
2d21ac55 | 4349 | vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx) |
91447636 A |
4350 | { |
4351 | struct nameidata nd; | |
4352 | int error; | |
91447636 | 4353 | u_long ndflags = 0; |
3a60a9f5 | 4354 | int lflags = flags; |
91447636 | 4355 | |
2d21ac55 A |
4356 | if (ctx == NULL) { /* XXX technically an error */ |
4357 | ctx = vfs_context_current(); | |
91447636 A |
4358 | } |
4359 | ||
3a60a9f5 A |
4360 | if (fmode & O_NOFOLLOW) |
4361 | lflags |= VNODE_LOOKUP_NOFOLLOW; | |
4362 | ||
4363 | if (lflags & VNODE_LOOKUP_NOFOLLOW) | |
91447636 A |
4364 | ndflags = NOFOLLOW; |
4365 | else | |
4366 | ndflags = FOLLOW; | |
4367 | ||
3a60a9f5 | 4368 | if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) |
91447636 | 4369 | ndflags |= NOCROSSMOUNT; |
3a60a9f5 | 4370 | if (lflags & VNODE_LOOKUP_DOWHITEOUT) |
91447636 A |
4371 | ndflags |= DOWHITEOUT; |
4372 | ||
4373 | /* XXX AUDITVNPATH1 needed ? */ | |
4374 | NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); | |
4375 | ||
4376 | if ((error = vn_open(&nd, fmode, cmode))) | |
4377 | *vpp = NULL; | |
4378 | else | |
4379 | *vpp = nd.ni_vp; | |
4380 | ||
4381 | return (error); | |
4382 | } | |
4383 | ||
4384 | errno_t | |
2d21ac55 | 4385 | vnode_close(vnode_t vp, int flags, vfs_context_t ctx) |
91447636 | 4386 | { |
91447636 A |
4387 | int error; |
4388 | ||
2d21ac55 A |
4389 | if (ctx == NULL) { |
4390 | ctx = vfs_context_current(); | |
91447636 A |
4391 | } |
4392 | ||
2d21ac55 | 4393 | error = vn_close(vp, flags, ctx); |
91447636 A |
4394 | vnode_put(vp); |
4395 | return (error); | |
4396 | } | |
4397 | ||
2d21ac55 A |
4398 | /* |
4399 | * Returns: 0 Success | |
4400 | * vnode_getattr:??? | |
4401 | */ | |
91447636 A |
4402 | errno_t |
4403 | vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx) | |
4404 | { | |
4405 | struct vnode_attr va; | |
4406 | int error; | |
4407 | ||
4408 | VATTR_INIT(&va); | |
4409 | VATTR_WANTED(&va, va_data_size); | |
4410 | error = vnode_getattr(vp, &va, ctx); | |
4411 | if (!error) | |
4412 | *sizep = va.va_data_size; | |
4413 | return(error); | |
4414 | } | |
4415 | ||
4416 | errno_t | |
4417 | vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx) | |
4418 | { | |
4419 | struct vnode_attr va; | |
4420 | ||
4421 | VATTR_INIT(&va); | |
4422 | VATTR_SET(&va, va_data_size, size); | |
4423 | va.va_vaflags = ioflag & 0xffff; | |
4424 | return(vnode_setattr(vp, &va, ctx)); | |
4425 | } | |
4426 | ||
0c530ab8 A |
4427 | /* |
4428 | * Create a filesystem object of arbitrary type with arbitrary attributes in | |
4429 | * the spevied directory with the specified name. | |
4430 | * | |
4431 | * Parameters: dvp Pointer to the vnode of the directory | |
4432 | * in which to create the object. | |
4433 | * vpp Pointer to the area into which to | |
4434 | * return the vnode of the created object. | |
4435 | * cnp Component name pointer from the namei | |
4436 | * data structure, containing the name to | |
4437 | * use for the create object. | |
4438 | * vap Pointer to the vnode_attr structure | |
4439 | * describing the object to be created, | |
4440 | * including the type of object. | |
4441 | * flags VN_* flags controlling ACL inheritance | |
4442 | * and whether or not authorization is to | |
4443 | * be required for the operation. | |
4444 | * | |
4445 | * Returns: 0 Success | |
4446 | * !0 errno value | |
4447 | * | |
4448 | * Implicit: *vpp Contains the vnode of the object that | |
4449 | * was created, if successful. | |
4450 | * *cnp May be modified by the underlying VFS. | |
4451 | * *vap May be modified by the underlying VFS. | |
4452 | * modified by either ACL inheritance or | |
4453 | * | |
4454 | * | |
4455 | * be modified, even if the operation is | |
4456 | * | |
4457 | * | |
4458 | * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order. | |
4459 | * | |
4460 | * Modification of '*cnp' and '*vap' by the underlying VFS is | |
4461 | * strongly discouraged. | |
4462 | * | |
4463 | * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c | |
4464 | * | |
4465 | * XXX: We should enummerate the possible errno values here, and where | |
4466 | * in the code they originated. | |
4467 | */ | |
91447636 A |
4468 | errno_t |
4469 | vn_create(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, int flags, vfs_context_t ctx) | |
4470 | { | |
4471 | kauth_acl_t oacl, nacl; | |
4472 | int initial_acl; | |
4473 | errno_t error; | |
4474 | vnode_t vp = (vnode_t)0; | |
4475 | ||
4476 | error = 0; | |
4477 | oacl = nacl = NULL; | |
4478 | initial_acl = 0; | |
4479 | ||
4480 | KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr); | |
4481 | ||
4482 | /* | |
4483 | * Handle ACL inheritance. | |
4484 | */ | |
4485 | if (!(flags & VN_CREATE_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) { | |
4486 | /* save the original filesec */ | |
4487 | if (VATTR_IS_ACTIVE(vap, va_acl)) { | |
4488 | initial_acl = 1; | |
4489 | oacl = vap->va_acl; | |
4490 | } | |
4491 | ||
4492 | vap->va_acl = NULL; | |
4493 | if ((error = kauth_acl_inherit(dvp, | |
4494 | oacl, | |
4495 | &nacl, | |
4496 | vap->va_type == VDIR, | |
4497 | ctx)) != 0) { | |
4498 | KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error); | |
4499 | return(error); | |
4500 | } | |
4501 | ||
4502 | /* | |
4503 | * If the generated ACL is NULL, then we can save ourselves some effort | |
4504 | * by clearing the active bit. | |
4505 | */ | |
4506 | if (nacl == NULL) { | |
4507 | VATTR_CLEAR_ACTIVE(vap, va_acl); | |
4508 | } else { | |
4509 | VATTR_SET(vap, va_acl, nacl); | |
4510 | } | |
4511 | } | |
4512 | ||
4513 | /* | |
4514 | * Check and default new attributes. | |
4515 | * This will set va_uid, va_gid, va_mode and va_create_time at least, if the caller | |
4516 | * hasn't supplied them. | |
4517 | */ | |
4518 | if ((error = vnode_authattr_new(dvp, vap, flags & VN_CREATE_NOAUTH, ctx)) != 0) { | |
4519 | KAUTH_DEBUG("%p CREATE - error %d handing/defaulting attributes", dvp, error); | |
4520 | goto out; | |
4521 | } | |
4522 | ||
4523 | ||
4524 | /* | |
4525 | * Create the requested node. | |
4526 | */ | |
4527 | switch(vap->va_type) { | |
4528 | case VREG: | |
4529 | error = VNOP_CREATE(dvp, vpp, cnp, vap, ctx); | |
4530 | break; | |
4531 | case VDIR: | |
4532 | error = VNOP_MKDIR(dvp, vpp, cnp, vap, ctx); | |
4533 | break; | |
4534 | case VSOCK: | |
4535 | case VFIFO: | |
4536 | case VBLK: | |
4537 | case VCHR: | |
4538 | error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx); | |
4539 | break; | |
4540 | default: | |
4541 | panic("vnode_create: unknown vtype %d", vap->va_type); | |
4542 | } | |
4543 | if (error != 0) { | |
4544 | KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error); | |
4545 | goto out; | |
4546 | } | |
4547 | ||
4548 | vp = *vpp; | |
2d21ac55 A |
4549 | #if CONFIG_MACF |
4550 | if (!(flags & VN_CREATE_NOLABEL)) { | |
4551 | error = vnode_label(vnode_mount(vp), dvp, vp, cnp, | |
4552 | VNODE_LABEL_CREATE|VNODE_LABEL_NEEDREF, ctx); | |
4553 | if (error) | |
4554 | goto error; | |
4555 | } | |
4556 | #endif | |
4557 | ||
91447636 A |
4558 | /* |
4559 | * If some of the requested attributes weren't handled by the VNOP, | |
4560 | * use our fallback code. | |
4561 | */ | |
4562 | if (!VATTR_ALL_SUPPORTED(vap) && *vpp) { | |
4563 | KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl); | |
4564 | error = vnode_setattr_fallback(*vpp, vap, ctx); | |
4565 | } | |
2d21ac55 A |
4566 | #if CONFIG_MACF |
4567 | error: | |
4568 | #endif | |
91447636 A |
4569 | if ((error != 0 ) && (vp != (vnode_t)0)) { |
4570 | *vpp = (vnode_t) 0; | |
4571 | vnode_put(vp); | |
4572 | } | |
4573 | ||
4574 | out: | |
4575 | /* | |
4576 | * If the caller supplied a filesec in vap, it has been replaced | |
4577 | * now by the post-inheritance copy. We need to put the original back | |
4578 | * and free the inherited product. | |
4579 | */ | |
4580 | if (initial_acl) { | |
4581 | VATTR_SET(vap, va_acl, oacl); | |
4582 | } else { | |
4583 | VATTR_CLEAR_ACTIVE(vap, va_acl); | |
4584 | } | |
4585 | if (nacl != NULL) | |
4586 | kauth_acl_free(nacl); | |
4587 | ||
4588 | return(error); | |
4589 | } | |
4590 | ||
4591 | static kauth_scope_t vnode_scope; | |
2d21ac55 A |
4592 | static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action, |
4593 | uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); | |
4594 | static int vnode_authorize_callback_int(__unused kauth_cred_t credential, __unused void *idata, kauth_action_t action, | |
91447636 A |
4595 | uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); |
4596 | ||
4597 | typedef struct _vnode_authorize_context { | |
4598 | vnode_t vp; | |
4599 | struct vnode_attr *vap; | |
4600 | vnode_t dvp; | |
4601 | struct vnode_attr *dvap; | |
4602 | vfs_context_t ctx; | |
4603 | int flags; | |
4604 | int flags_valid; | |
4605 | #define _VAC_IS_OWNER (1<<0) | |
4606 | #define _VAC_IN_GROUP (1<<1) | |
4607 | #define _VAC_IS_DIR_OWNER (1<<2) | |
4608 | #define _VAC_IN_DIR_GROUP (1<<3) | |
4609 | } *vauth_ctx; | |
4610 | ||
4611 | void | |
4612 | vnode_authorize_init(void) | |
4613 | { | |
4614 | vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL); | |
4615 | } | |
4616 | ||
4617 | /* | |
4618 | * Authorize an operation on a vnode. | |
4619 | * | |
4620 | * This is KPI, but here because it needs vnode_scope. | |
2d21ac55 A |
4621 | * |
4622 | * Returns: 0 Success | |
4623 | * kauth_authorize_action:EPERM ... | |
4624 | * xlate => EACCES Permission denied | |
4625 | * kauth_authorize_action:0 Success | |
4626 | * kauth_authorize_action: Depends on callback return; this is | |
4627 | * usually only vnode_authorize_callback(), | |
4628 | * but may include other listerners, if any | |
4629 | * exist. | |
4630 | * EROFS | |
4631 | * EACCES | |
4632 | * EPERM | |
4633 | * ??? | |
91447636 A |
4634 | */ |
4635 | int | |
2d21ac55 | 4636 | vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx) |
91447636 A |
4637 | { |
4638 | int error, result; | |
4639 | ||
4640 | /* | |
4641 | * We can't authorize against a dead vnode; allow all operations through so that | |
4642 | * the correct error can be returned. | |
4643 | */ | |
4644 | if (vp->v_type == VBAD) | |
4645 | return(0); | |
4646 | ||
4647 | error = 0; | |
2d21ac55 A |
4648 | result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action, |
4649 | (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error); | |
91447636 A |
4650 | if (result == EPERM) /* traditional behaviour */ |
4651 | result = EACCES; | |
4652 | /* did the lower layers give a better error return? */ | |
4653 | if ((result != 0) && (error != 0)) | |
2d21ac55 | 4654 | return(error); |
91447636 A |
4655 | return(result); |
4656 | } | |
4657 | ||
4658 | /* | |
4659 | * Test for vnode immutability. | |
4660 | * | |
4661 | * The 'append' flag is set when the authorization request is constrained | |
4662 | * to operations which only request the right to append to a file. | |
4663 | * | |
4664 | * The 'ignore' flag is set when an operation modifying the immutability flags | |
4665 | * is being authorized. We check the system securelevel to determine which | |
4666 | * immutability flags we can ignore. | |
4667 | */ | |
4668 | static int | |
4669 | vnode_immutable(struct vnode_attr *vap, int append, int ignore) | |
4670 | { | |
4671 | int mask; | |
4672 | ||
4673 | /* start with all bits precluding the operation */ | |
4674 | mask = IMMUTABLE | APPEND; | |
4675 | ||
4676 | /* if appending only, remove the append-only bits */ | |
4677 | if (append) | |
4678 | mask &= ~APPEND; | |
4679 | ||
4680 | /* ignore only set when authorizing flags changes */ | |
4681 | if (ignore) { | |
4682 | if (securelevel <= 0) { | |
4683 | /* in insecure state, flags do not inhibit changes */ | |
4684 | mask = 0; | |
4685 | } else { | |
4686 | /* in secure state, user flags don't inhibit */ | |
4687 | mask &= ~(UF_IMMUTABLE | UF_APPEND); | |
4688 | } | |
4689 | } | |
4690 | KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore); | |
4691 | if ((vap->va_flags & mask) != 0) | |
4692 | return(EPERM); | |
4693 | return(0); | |
4694 | } | |
4695 | ||
4696 | static int | |
4697 | vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred) | |
4698 | { | |
4699 | int result; | |
4700 | ||
4701 | /* default assumption is not-owner */ | |
4702 | result = 0; | |
4703 | ||
4704 | /* | |
4705 | * If the filesystem has given us a UID, we treat this as authoritative. | |
4706 | */ | |
4707 | if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) { | |
4708 | result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0; | |
4709 | } | |
4710 | /* we could test the owner UUID here if we had a policy for it */ | |
4711 | ||
4712 | return(result); | |
4713 | } | |
4714 | ||
4715 | static int | |
4716 | vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember) | |
4717 | { | |
4718 | int error; | |
4719 | int result; | |
4720 | ||
4721 | error = 0; | |
4722 | result = 0; | |
4723 | ||
4724 | /* the caller is expected to have asked the filesystem for a group at some point */ | |
4725 | if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) { | |
4726 | error = kauth_cred_ismember_gid(cred, vap->va_gid, &result); | |
4727 | } | |
4728 | /* we could test the group UUID here if we had a policy for it */ | |
4729 | ||
4730 | if (!error) | |
4731 | *ismember = result; | |
4732 | return(error); | |
4733 | } | |
4734 | ||
4735 | static int | |
4736 | vauth_file_owner(vauth_ctx vcp) | |
4737 | { | |
4738 | int result; | |
4739 | ||
4740 | if (vcp->flags_valid & _VAC_IS_OWNER) { | |
4741 | result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0; | |
4742 | } else { | |
4743 | result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred); | |
4744 | ||
4745 | /* cache our result */ | |
4746 | vcp->flags_valid |= _VAC_IS_OWNER; | |
4747 | if (result) { | |
4748 | vcp->flags |= _VAC_IS_OWNER; | |
4749 | } else { | |
4750 | vcp->flags &= ~_VAC_IS_OWNER; | |
4751 | } | |
4752 | } | |
4753 | return(result); | |
4754 | } | |
4755 | ||
4756 | static int | |
4757 | vauth_file_ingroup(vauth_ctx vcp, int *ismember) | |
4758 | { | |
4759 | int error; | |
4760 | ||
4761 | if (vcp->flags_valid & _VAC_IN_GROUP) { | |
4762 | *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0; | |
4763 | error = 0; | |
4764 | } else { | |
4765 | error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember); | |
4766 | ||
4767 | if (!error) { | |
4768 | /* cache our result */ | |
4769 | vcp->flags_valid |= _VAC_IN_GROUP; | |
4770 | if (*ismember) { | |
4771 | vcp->flags |= _VAC_IN_GROUP; | |
4772 | } else { | |
4773 | vcp->flags &= ~_VAC_IN_GROUP; | |
4774 | } | |
4775 | } | |
4776 | ||
4777 | } | |
4778 | return(error); | |
4779 | } | |
4780 | ||
4781 | static int | |
4782 | vauth_dir_owner(vauth_ctx vcp) | |
4783 | { | |
4784 | int result; | |
4785 | ||
4786 | if (vcp->flags_valid & _VAC_IS_DIR_OWNER) { | |
4787 | result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0; | |
4788 | } else { | |
4789 | result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred); | |
4790 | ||
4791 | /* cache our result */ | |
4792 | vcp->flags_valid |= _VAC_IS_DIR_OWNER; | |
4793 | if (result) { | |
4794 | vcp->flags |= _VAC_IS_DIR_OWNER; | |
4795 | } else { | |
4796 | vcp->flags &= ~_VAC_IS_DIR_OWNER; | |
4797 | } | |
4798 | } | |
4799 | return(result); | |
4800 | } | |
4801 | ||
4802 | static int | |
4803 | vauth_dir_ingroup(vauth_ctx vcp, int *ismember) | |
4804 | { | |
4805 | int error; | |
4806 | ||
4807 | if (vcp->flags_valid & _VAC_IN_DIR_GROUP) { | |
4808 | *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0; | |
4809 | error = 0; | |
4810 | } else { | |
4811 | error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember); | |
4812 | ||
4813 | if (!error) { | |
4814 | /* cache our result */ | |
4815 | vcp->flags_valid |= _VAC_IN_DIR_GROUP; | |
4816 | if (*ismember) { | |
4817 | vcp->flags |= _VAC_IN_DIR_GROUP; | |
4818 | } else { | |
4819 | vcp->flags &= ~_VAC_IN_DIR_GROUP; | |
4820 | } | |
4821 | } | |
4822 | } | |
4823 | return(error); | |
4824 | } | |
4825 | ||
4826 | /* | |
4827 | * Test the posix permissions in (vap) to determine whether (credential) | |
4828 | * may perform (action) | |
4829 | */ | |
4830 | static int | |
4831 | vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) | |
4832 | { | |
4833 | struct vnode_attr *vap; | |
4834 | int needed, error, owner_ok, group_ok, world_ok, ismember; | |
4835 | #ifdef KAUTH_DEBUG_ENABLE | |
2d21ac55 | 4836 | const char *where = "uninitialized"; |
91447636 A |
4837 | # define _SETWHERE(c) where = c; |
4838 | #else | |
4839 | # define _SETWHERE(c) | |
4840 | #endif | |
4841 | ||
4842 | /* checking file or directory? */ | |
4843 | if (on_dir) { | |
4844 | vap = vcp->dvap; | |
4845 | } else { | |
4846 | vap = vcp->vap; | |
4847 | } | |
4848 | ||
4849 | error = 0; | |
4850 | ||
4851 | /* | |
4852 | * We want to do as little work here as possible. So first we check | |
4853 | * which sets of permissions grant us the access we need, and avoid checking | |
4854 | * whether specific permissions grant access when more generic ones would. | |
4855 | */ | |
4856 | ||
4857 | /* owner permissions */ | |
4858 | needed = 0; | |
4859 | if (action & VREAD) | |
4860 | needed |= S_IRUSR; | |
4861 | if (action & VWRITE) | |
4862 | needed |= S_IWUSR; | |
4863 | if (action & VEXEC) | |
4864 | needed |= S_IXUSR; | |
4865 | owner_ok = (needed & vap->va_mode) == needed; | |
4866 | ||
4867 | /* group permissions */ | |
4868 | needed = 0; | |
4869 | if (action & VREAD) | |
4870 | needed |= S_IRGRP; | |
4871 | if (action & VWRITE) | |
4872 | needed |= S_IWGRP; | |
4873 | if (action & VEXEC) | |
4874 | needed |= S_IXGRP; | |
4875 | group_ok = (needed & vap->va_mode) == needed; | |
4876 | ||
4877 | /* world permissions */ | |
4878 | needed = 0; | |
4879 | if (action & VREAD) | |
4880 | needed |= S_IROTH; | |
4881 | if (action & VWRITE) | |
4882 | needed |= S_IWOTH; | |
4883 | if (action & VEXEC) | |
4884 | needed |= S_IXOTH; | |
4885 | world_ok = (needed & vap->va_mode) == needed; | |
4886 | ||
4887 | /* If granted/denied by all three, we're done */ | |
4888 | if (owner_ok && group_ok && world_ok) { | |
4889 | _SETWHERE("all"); | |
4890 | goto out; | |
4891 | } | |
4892 | if (!owner_ok && !group_ok && !world_ok) { | |
4893 | _SETWHERE("all"); | |
4894 | error = EACCES; | |
4895 | goto out; | |
4896 | } | |
4897 | ||
4898 | /* Check ownership (relatively cheap) */ | |
4899 | if ((on_dir && vauth_dir_owner(vcp)) || | |
4900 | (!on_dir && vauth_file_owner(vcp))) { | |
4901 | _SETWHERE("user"); | |
4902 | if (!owner_ok) | |
4903 | error = EACCES; | |
4904 | goto out; | |
4905 | } | |
4906 | ||
4907 | /* Not owner; if group and world both grant it we're done */ | |
4908 | if (group_ok && world_ok) { | |
4909 | _SETWHERE("group/world"); | |
4910 | goto out; | |
4911 | } | |
4912 | if (!group_ok && !world_ok) { | |
4913 | _SETWHERE("group/world"); | |
4914 | error = EACCES; | |
4915 | goto out; | |
4916 | } | |
4917 | ||
4918 | /* Check group membership (most expensive) */ | |
4919 | ismember = 0; | |
4920 | if (on_dir) { | |
4921 | error = vauth_dir_ingroup(vcp, &ismember); | |
4922 | } else { | |
4923 | error = vauth_file_ingroup(vcp, &ismember); | |
4924 | } | |
4925 | if (error) | |
4926 | goto out; | |
4927 | if (ismember) { | |
4928 | _SETWHERE("group"); | |
4929 | if (!group_ok) | |
4930 | error = EACCES; | |
4931 | goto out; | |
4932 | } | |
4933 | ||
4934 | /* Not owner, not in group, use world result */ | |
4935 | _SETWHERE("world"); | |
4936 | if (!world_ok) | |
4937 | error = EACCES; | |
4938 | ||
4939 | /* FALLTHROUGH */ | |
4940 | ||
4941 | out: | |
4942 | KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d", | |
4943 | vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where, | |
4944 | (action & VREAD) ? "r" : "-", | |
4945 | (action & VWRITE) ? "w" : "-", | |
4946 | (action & VEXEC) ? "x" : "-", | |
4947 | needed, | |
4948 | (vap->va_mode & S_IRUSR) ? "r" : "-", | |
4949 | (vap->va_mode & S_IWUSR) ? "w" : "-", | |
4950 | (vap->va_mode & S_IXUSR) ? "x" : "-", | |
4951 | (vap->va_mode & S_IRGRP) ? "r" : "-", | |
4952 | (vap->va_mode & S_IWGRP) ? "w" : "-", | |
4953 | (vap->va_mode & S_IXGRP) ? "x" : "-", | |
4954 | (vap->va_mode & S_IROTH) ? "r" : "-", | |
4955 | (vap->va_mode & S_IWOTH) ? "w" : "-", | |
4956 | (vap->va_mode & S_IXOTH) ? "x" : "-", | |
4957 | kauth_cred_getuid(vcp->ctx->vc_ucred), | |
4958 | on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid, | |
4959 | on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid); | |
4960 | return(error); | |
4961 | } | |
4962 | ||
4963 | /* | |
4964 | * Authorize the deletion of the node vp from the directory dvp. | |
4965 | * | |
4966 | * We assume that: | |
4967 | * - Neither the node nor the directory are immutable. | |
4968 | * - The user is not the superuser. | |
4969 | * | |
4970 | * Deletion is not permitted if the directory is sticky and the caller is not owner of the | |
4971 | * node or directory. | |
4972 | * | |
4973 | * If either the node grants DELETE, or the directory grants DELETE_CHILD, the node may be | |
4974 | * deleted. If neither denies the permission, and the caller has Posix write access to the | |
4975 | * directory, then the node may be deleted. | |
4976 | */ | |
4977 | static int | |
4978 | vnode_authorize_delete(vauth_ctx vcp) | |
4979 | { | |
4980 | struct vnode_attr *vap = vcp->vap; | |
4981 | struct vnode_attr *dvap = vcp->dvap; | |
4982 | kauth_cred_t cred = vcp->ctx->vc_ucred; | |
4983 | struct kauth_acl_eval eval; | |
4984 | int error, delete_denied, delete_child_denied, ismember; | |
4985 | ||
4986 | /* check the ACL on the directory */ | |
4987 | delete_child_denied = 0; | |
4988 | if (VATTR_IS_NOT(dvap, va_acl, NULL)) { | |
4989 | eval.ae_requested = KAUTH_VNODE_DELETE_CHILD; | |
4990 | eval.ae_acl = &dvap->va_acl->acl_ace[0]; | |
4991 | eval.ae_count = dvap->va_acl->acl_entrycount; | |
4992 | eval.ae_options = 0; | |
4993 | if (vauth_dir_owner(vcp)) | |
4994 | eval.ae_options |= KAUTH_AEVAL_IS_OWNER; | |
4995 | if ((error = vauth_dir_ingroup(vcp, &ismember)) != 0) | |
4996 | return(error); | |
4997 | if (ismember) | |
4998 | eval.ae_options |= KAUTH_AEVAL_IN_GROUP; | |
4999 | eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; | |
5000 | eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; | |
5001 | eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; | |
5002 | eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; | |
5003 | ||
5004 | error = kauth_acl_evaluate(cred, &eval); | |
5005 | ||
5006 | if (error != 0) { | |
5007 | KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); | |
5008 | return(error); | |
5009 | } | |
5010 | if (eval.ae_result == KAUTH_RESULT_DENY) | |
5011 | delete_child_denied = 1; | |
5012 | if (eval.ae_result == KAUTH_RESULT_ALLOW) { | |
5013 | KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp); | |
5014 | return(0); | |
5015 | } | |
5016 | } | |
5017 | ||
5018 | /* check the ACL on the node */ | |
5019 | delete_denied = 0; | |
5020 | if (VATTR_IS_NOT(vap, va_acl, NULL)) { | |
5021 | eval.ae_requested = KAUTH_VNODE_DELETE; | |
5022 | eval.ae_acl = &vap->va_acl->acl_ace[0]; | |
5023 | eval.ae_count = vap->va_acl->acl_entrycount; | |
5024 | eval.ae_options = 0; | |
5025 | if (vauth_file_owner(vcp)) | |
5026 | eval.ae_options |= KAUTH_AEVAL_IS_OWNER; | |
5027 | if ((error = vauth_file_ingroup(vcp, &ismember)) != 0) | |
5028 | return(error); | |
5029 | if (ismember) | |
5030 | eval.ae_options |= KAUTH_AEVAL_IN_GROUP; | |
5031 | eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; | |
5032 | eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; | |
5033 | eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; | |
5034 | eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; | |
5035 | ||
5036 | if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { | |
5037 | KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); | |
5038 | return(error); | |
5039 | } | |
5040 | if (eval.ae_result == KAUTH_RESULT_DENY) | |
5041 | delete_denied = 1; | |
5042 | if (eval.ae_result == KAUTH_RESULT_ALLOW) { | |
5043 | KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp); | |
5044 | return(0); | |
5045 | } | |
5046 | } | |
5047 | ||
5048 | /* if denied by ACL on directory or node, return denial */ | |
5049 | if (delete_denied || delete_child_denied) { | |
5050 | KAUTH_DEBUG("%p ALLOWED - denied by ACL", vcp->vp); | |
5051 | return(EACCES); | |
5052 | } | |
5053 | ||
5054 | /* enforce sticky bit behaviour */ | |
5055 | if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) { | |
5056 | KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)", | |
5057 | vcp->vp, cred->cr_uid, vap->va_uid, dvap->va_uid); | |
5058 | return(EACCES); | |
5059 | } | |
5060 | ||
5061 | /* check the directory */ | |
5062 | if ((error = vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */)) != 0) { | |
5063 | KAUTH_DEBUG("%p ALLOWED - granted by posix permisssions", vcp->vp); | |
5064 | return(error); | |
5065 | } | |
5066 | ||
5067 | /* not denied, must be OK */ | |
5068 | return(0); | |
5069 | } | |
5070 | ||
5071 | ||
5072 | /* | |
5073 | * Authorize an operation based on the node's attributes. | |
5074 | */ | |
5075 | static int | |
2d21ac55 | 5076 | vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny) |
91447636 A |
5077 | { |
5078 | struct vnode_attr *vap = vcp->vap; | |
5079 | kauth_cred_t cred = vcp->ctx->vc_ucred; | |
5080 | struct kauth_acl_eval eval; | |
5081 | int error, ismember; | |
5082 | mode_t posix_action; | |
5083 | ||
5084 | /* | |
5085 | * If we are the file owner, we automatically have some rights. | |
5086 | * | |
5087 | * Do we need to expand this to support group ownership? | |
5088 | */ | |
5089 | if (vauth_file_owner(vcp)) | |
5090 | acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY); | |
5091 | ||
5092 | /* | |
5093 | * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can | |
5094 | * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to | |
5095 | * change ownership to themselves, and WRITE_SECURITY is implicitly | |
5096 | * granted to the owner. We need to do this because at this point | |
5097 | * WRITE_SECURITY may not be granted as the caller is not currently | |
5098 | * the owner. | |
5099 | */ | |
5100 | if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) && | |
5101 | (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) | |
5102 | acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY; | |
5103 | ||
5104 | if (acl_rights == 0) { | |
5105 | KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp); | |
5106 | return(0); | |
5107 | } | |
5108 | ||
5109 | /* if we have an ACL, evaluate it */ | |
5110 | if (VATTR_IS_NOT(vap, va_acl, NULL)) { | |
5111 | eval.ae_requested = acl_rights; | |
5112 | eval.ae_acl = &vap->va_acl->acl_ace[0]; | |
5113 | eval.ae_count = vap->va_acl->acl_entrycount; | |
5114 | eval.ae_options = 0; | |
5115 | if (vauth_file_owner(vcp)) | |
5116 | eval.ae_options |= KAUTH_AEVAL_IS_OWNER; | |
5117 | if ((error = vauth_file_ingroup(vcp, &ismember)) != 0) | |
5118 | return(error); | |
5119 | if (ismember) | |
5120 | eval.ae_options |= KAUTH_AEVAL_IN_GROUP; | |
5121 | eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; | |
5122 | eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; | |
5123 | eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; | |
5124 | eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; | |
5125 | ||
5126 | if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { | |
5127 | KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); | |
5128 | return(error); | |
5129 | } | |
5130 | ||
5131 | if (eval.ae_result == KAUTH_RESULT_DENY) { | |
5132 | KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp); | |
5133 | return(EACCES); /* deny, deny, counter-allege */ | |
5134 | } | |
5135 | if (eval.ae_result == KAUTH_RESULT_ALLOW) { | |
5136 | KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp); | |
5137 | return(0); | |
5138 | } | |
2d21ac55 A |
5139 | *found_deny = eval.ae_found_deny; |
5140 | ||
91447636 A |
5141 | /* fall through and evaluate residual rights */ |
5142 | } else { | |
5143 | /* no ACL, everything is residual */ | |
5144 | eval.ae_residual = acl_rights; | |
5145 | } | |
5146 | ||
5147 | /* | |
5148 | * Grant residual rights that have been pre-authorized. | |
5149 | */ | |
5150 | eval.ae_residual &= ~preauth_rights; | |
5151 | ||
5152 | /* | |
5153 | * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied. | |
5154 | */ | |
5155 | if (vauth_file_owner(vcp)) | |
5156 | eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES; | |
5157 | ||
5158 | if (eval.ae_residual == 0) { | |
5159 | KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp); | |
5160 | return(0); | |
5161 | } | |
5162 | ||
5163 | /* | |
5164 | * Bail if we have residual rights that can't be granted by posix permissions, | |
5165 | * or aren't presumed granted at this point. | |
5166 | * | |
5167 | * XXX these can be collapsed for performance | |
5168 | */ | |
5169 | if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) { | |
5170 | KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp); | |
5171 | return(EACCES); | |
5172 | } | |
5173 | if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) { | |
5174 | KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp); | |
5175 | return(EACCES); | |
5176 | } | |
5177 | ||
5178 | #if DIAGNOSTIC | |
5179 | if (eval.ae_residual & KAUTH_VNODE_DELETE) | |
5180 | panic("vnode_authorize: can't be checking delete permission here"); | |
5181 | #endif | |
5182 | ||
5183 | /* | |
5184 | * Compute the fallback posix permissions that will satisfy the remaining | |
5185 | * rights. | |
5186 | */ | |
5187 | posix_action = 0; | |
5188 | if (eval.ae_residual & (KAUTH_VNODE_READ_DATA | | |
5189 | KAUTH_VNODE_LIST_DIRECTORY | | |
5190 | KAUTH_VNODE_READ_EXTATTRIBUTES)) | |
5191 | posix_action |= VREAD; | |
5192 | if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA | | |
5193 | KAUTH_VNODE_ADD_FILE | | |
5194 | KAUTH_VNODE_ADD_SUBDIRECTORY | | |
5195 | KAUTH_VNODE_DELETE_CHILD | | |
5196 | KAUTH_VNODE_WRITE_ATTRIBUTES | | |
5197 | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) | |
5198 | posix_action |= VWRITE; | |
5199 | if (eval.ae_residual & (KAUTH_VNODE_EXECUTE | | |
5200 | KAUTH_VNODE_SEARCH)) | |
5201 | posix_action |= VEXEC; | |
5202 | ||
5203 | if (posix_action != 0) { | |
5204 | return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */)); | |
5205 | } else { | |
5206 | KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping", | |
5207 | vcp->vp, | |
5208 | (eval.ae_residual & KAUTH_VNODE_READ_DATA) | |
5209 | ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "", | |
5210 | (eval.ae_residual & KAUTH_VNODE_WRITE_DATA) | |
5211 | ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "", | |
5212 | (eval.ae_residual & KAUTH_VNODE_EXECUTE) | |
5213 | ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "", | |
5214 | (eval.ae_residual & KAUTH_VNODE_DELETE) | |
5215 | ? " DELETE" : "", | |
5216 | (eval.ae_residual & KAUTH_VNODE_APPEND_DATA) | |
5217 | ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "", | |
5218 | (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD) | |
5219 | ? " DELETE_CHILD" : "", | |
5220 | (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES) | |
5221 | ? " READ_ATTRIBUTES" : "", | |
5222 | (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES) | |
5223 | ? " WRITE_ATTRIBUTES" : "", | |
5224 | (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES) | |
5225 | ? " READ_EXTATTRIBUTES" : "", | |
5226 | (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES) | |
5227 | ? " WRITE_EXTATTRIBUTES" : "", | |
5228 | (eval.ae_residual & KAUTH_VNODE_READ_SECURITY) | |
5229 | ? " READ_SECURITY" : "", | |
5230 | (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) | |
5231 | ? " WRITE_SECURITY" : "", | |
5232 | (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE) | |
5233 | ? " CHECKIMMUTABLE" : "", | |
5234 | (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) | |
5235 | ? " CHANGE_OWNER" : ""); | |
5236 | } | |
5237 | ||
5238 | /* | |
5239 | * Lack of required Posix permissions implies no reason to deny access. | |
5240 | */ | |
5241 | return(0); | |
5242 | } | |
5243 | ||
5244 | /* | |
5245 | * Check for file immutability. | |
5246 | */ | |
5247 | static int | |
5248 | vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore) | |
5249 | { | |
5250 | mount_t mp; | |
5251 | int error; | |
5252 | int append; | |
5253 | ||
5254 | /* | |
5255 | * Perform immutability checks for operations that change data. | |
5256 | * | |
5257 | * Sockets, fifos and devices require special handling. | |
5258 | */ | |
5259 | switch(vp->v_type) { | |
5260 | case VSOCK: | |
5261 | case VFIFO: | |
5262 | case VBLK: | |
5263 | case VCHR: | |
5264 | /* | |
5265 | * Writing to these nodes does not change the filesystem data, | |
5266 | * so forget that it's being tried. | |
5267 | */ | |
5268 | rights &= ~KAUTH_VNODE_WRITE_DATA; | |
5269 | break; | |
5270 | default: | |
5271 | break; | |
5272 | } | |
5273 | ||
5274 | error = 0; | |
5275 | if (rights & KAUTH_VNODE_WRITE_RIGHTS) { | |
5276 | ||
5277 | /* check per-filesystem options if possible */ | |
2d21ac55 | 5278 | mp = vp->v_mount; |
91447636 A |
5279 | if (mp != NULL) { |
5280 | ||
5281 | /* check for no-EA filesystems */ | |
5282 | if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) && | |
5283 | (vfs_flags(mp) & MNT_NOUSERXATTR)) { | |
5284 | KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp); | |
5285 | error = EACCES; /* User attributes disabled */ | |
5286 | goto out; | |
5287 | } | |
5288 | } | |
5289 | ||
5290 | /* check for file immutability */ | |
5291 | append = 0; | |
5292 | if (vp->v_type == VDIR) { | |
5293 | if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY)) == rights) | |
5294 | append = 1; | |
5295 | } else { | |
5296 | if ((rights & KAUTH_VNODE_APPEND_DATA) == rights) | |
5297 | append = 1; | |
5298 | } | |
5299 | if ((error = vnode_immutable(vap, append, ignore)) != 0) { | |
5300 | KAUTH_DEBUG("%p DENIED - file is immutable", vp); | |
5301 | goto out; | |
5302 | } | |
5303 | } | |
5304 | out: | |
5305 | return(error); | |
5306 | } | |
5307 | ||
5308 | /* | |
2d21ac55 A |
5309 | * Handle authorization actions for filesystems that advertise that the |
5310 | * server will be enforcing. | |
5311 | * | |
5312 | * Returns: 0 Authorization should be handled locally | |
5313 | * 1 Authorization was handled by the FS | |
5314 | * | |
5315 | * Note: Imputed returns will only occur if the authorization request | |
5316 | * was handled by the FS. | |
5317 | * | |
5318 | * Imputed: *resultp, modified Return code from FS when the request is | |
5319 | * handled by the FS. | |
5320 | * VNOP_ACCESS:??? | |
5321 | * VNOP_OPEN:??? | |
91447636 A |
5322 | */ |
5323 | static int | |
5324 | vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx) | |
5325 | { | |
5326 | int error; | |
5327 | ||
5328 | /* | |
5329 | * If the vp is a device node, socket or FIFO it actually represents a local | |
5330 | * endpoint, so we need to handle it locally. | |
5331 | */ | |
5332 | switch(vp->v_type) { | |
5333 | case VBLK: | |
5334 | case VCHR: | |
5335 | case VSOCK: | |
5336 | case VFIFO: | |
5337 | return(0); | |
5338 | default: | |
5339 | break; | |
5340 | } | |
5341 | ||
5342 | /* | |
5343 | * In the advisory request case, if the filesystem doesn't think it's reliable | |
5344 | * we will attempt to formulate a result ourselves based on VNOP_GETATTR data. | |
5345 | */ | |
2d21ac55 | 5346 | if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) |
91447636 A |
5347 | return(0); |
5348 | ||
5349 | /* | |
5350 | * Let the filesystem have a say in the matter. It's OK for it to not implemnent | |
5351 | * VNOP_ACCESS, as most will authorise inline with the actual request. | |
5352 | */ | |
5353 | if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) { | |
5354 | *resultp = error; | |
5355 | KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp); | |
5356 | return(1); | |
5357 | } | |
5358 | ||
5359 | /* | |
5360 | * Typically opaque filesystems do authorisation in-line, but exec is a special case. In | |
5361 | * order to be reasonably sure that exec will be permitted, we try a bit harder here. | |
5362 | */ | |
2d21ac55 | 5363 | if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) { |
91447636 A |
5364 | /* try a VNOP_OPEN for readonly access */ |
5365 | if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) { | |
5366 | *resultp = error; | |
5367 | KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp); | |
5368 | return(1); | |
5369 | } | |
5370 | VNOP_CLOSE(vp, FREAD, ctx); | |
5371 | } | |
5372 | ||
5373 | /* | |
5374 | * We don't have any reason to believe that the request has to be denied at this point, | |
5375 | * so go ahead and allow it. | |
5376 | */ | |
5377 | *resultp = 0; | |
5378 | KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp); | |
5379 | return(1); | |
5380 | } | |
5381 | ||
2d21ac55 A |
5382 | |
5383 | ||
5384 | ||
5385 | /* | |
5386 | * Returns: KAUTH_RESULT_ALLOW | |
5387 | * KAUTH_RESULT_DENY | |
5388 | * | |
5389 | * Imputed: *arg3, modified Error code in the deny case | |
5390 | * EROFS Read-only file system | |
5391 | * EACCES Permission denied | |
5392 | * EPERM Operation not permitted [no execute] | |
5393 | * vnode_getattr:ENOMEM Not enough space [only if has filesec] | |
5394 | * vnode_getattr:??? | |
5395 | * vnode_authorize_opaque:*arg2 ??? | |
5396 | * vnode_authorize_checkimmutable:??? | |
5397 | * vnode_authorize_delete:??? | |
5398 | * vnode_authorize_simple:??? | |
5399 | */ | |
5400 | ||
5401 | ||
5402 | static int | |
5403 | vnode_authorize_callback(kauth_cred_t cred, void *idata, kauth_action_t action, | |
5404 | uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) | |
5405 | { | |
5406 | vfs_context_t ctx; | |
5407 | vnode_t cvp = NULLVP; | |
5408 | vnode_t vp, dvp; | |
5409 | int result; | |
5410 | ||
5411 | ctx = (vfs_context_t)arg0; | |
5412 | vp = (vnode_t)arg1; | |
5413 | dvp = (vnode_t)arg2; | |
5414 | ||
5415 | /* | |
5416 | * if there are 2 vnodes passed in, we don't know at | |
5417 | * this point which rights to look at based on the | |
5418 | * combined action being passed in... defer until later... | |
5419 | * otherwise check the kauth 'rights' cache hung | |
5420 | * off of the vnode we're interested in... if we've already | |
5421 | * been granted the right we're currently interested in, | |
5422 | * we can just return success... otherwise we'll go through | |
5423 | * the process of authorizing the requested right(s)... if that | |
5424 | * succeeds, we'll add the right(s) to the cache. | |
5425 | * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache | |
5426 | */ | |
5427 | if (dvp && vp) | |
5428 | goto defer; | |
5429 | if (dvp) | |
5430 | cvp = dvp; | |
5431 | else | |
5432 | cvp = vp; | |
5433 | ||
5434 | if (vnode_cache_is_authorized(cvp, ctx, action) == TRUE) | |
5435 | return KAUTH_RESULT_ALLOW; | |
5436 | defer: | |
5437 | result = vnode_authorize_callback_int(cred, idata, action, arg0, arg1, arg2, arg3); | |
5438 | ||
5439 | if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) | |
5440 | vnode_cache_authorized_action(cvp, ctx, action); | |
5441 | ||
5442 | return result; | |
5443 | } | |
5444 | ||
5445 | ||
91447636 | 5446 | static int |
2d21ac55 | 5447 | vnode_authorize_callback_int(__unused kauth_cred_t unused_cred, __unused void *idata, kauth_action_t action, |
91447636 A |
5448 | uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) |
5449 | { | |
5450 | struct _vnode_authorize_context auth_context; | |
5451 | vauth_ctx vcp; | |
5452 | vfs_context_t ctx; | |
5453 | vnode_t vp, dvp; | |
5454 | kauth_cred_t cred; | |
5455 | kauth_ace_rights_t rights; | |
5456 | struct vnode_attr va, dva; | |
5457 | int result; | |
5458 | int *errorp; | |
5459 | int noimmutable; | |
2d21ac55 A |
5460 | boolean_t parent_authorized_for_delete = FALSE; |
5461 | boolean_t found_deny = FALSE; | |
5462 | boolean_t parent_ref= FALSE; | |
91447636 A |
5463 | |
5464 | vcp = &auth_context; | |
5465 | ctx = vcp->ctx = (vfs_context_t)arg0; | |
5466 | vp = vcp->vp = (vnode_t)arg1; | |
5467 | dvp = vcp->dvp = (vnode_t)arg2; | |
5468 | errorp = (int *)arg3; | |
2d21ac55 A |
5469 | /* |
5470 | * Note that we authorize against the context, not the passed cred | |
5471 | * (the same thing anyway) | |
5472 | */ | |
91447636 A |
5473 | cred = ctx->vc_ucred; |
5474 | ||
5475 | VATTR_INIT(&va); | |
5476 | vcp->vap = &va; | |
5477 | VATTR_INIT(&dva); | |
5478 | vcp->dvap = &dva; | |
5479 | ||
5480 | vcp->flags = vcp->flags_valid = 0; | |
5481 | ||
5482 | #if DIAGNOSTIC | |
5483 | if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) | |
5484 | panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred); | |
5485 | #endif | |
5486 | ||
5487 | KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)", | |
5488 | vp, vfs_context_proc(ctx)->p_comm, | |
5489 | (action & KAUTH_VNODE_ACCESS) ? "access" : "auth", | |
5490 | (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "", | |
5491 | (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "", | |
5492 | (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "", | |
5493 | (action & KAUTH_VNODE_DELETE) ? " DELETE" : "", | |
5494 | (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "", | |
5495 | (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "", | |
5496 | (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "", | |
5497 | (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "", | |
5498 | (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "", | |
5499 | (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "", | |
5500 | (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "", | |
5501 | (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "", | |
5502 | (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "", | |
5503 | (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "", | |
5504 | vnode_isdir(vp) ? "directory" : "file", | |
5505 | vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp); | |
5506 | ||
5507 | /* | |
5508 | * Extract the control bits from the action, everything else is | |
5509 | * requested rights. | |
5510 | */ | |
5511 | noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0; | |
5512 | rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE); | |
5513 | ||
5514 | if (rights & KAUTH_VNODE_DELETE) { | |
5515 | #if DIAGNOSTIC | |
5516 | if (dvp == NULL) | |
5517 | panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory"); | |
5518 | #endif | |
2d21ac55 A |
5519 | /* |
5520 | * check to see if we've already authorized the parent | |
5521 | * directory for deletion of its children... if so, we | |
5522 | * can skip a whole bunch of work... we will still have to | |
5523 | * authorize that this specific child can be removed | |
5524 | */ | |
5525 | if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE) == TRUE) | |
5526 | parent_authorized_for_delete = TRUE; | |
91447636 A |
5527 | } else { |
5528 | dvp = NULL; | |
5529 | } | |
5530 | ||
5531 | /* | |
5532 | * Check for read-only filesystems. | |
5533 | */ | |
5534 | if ((rights & KAUTH_VNODE_WRITE_RIGHTS) && | |
5535 | (vp->v_mount->mnt_flag & MNT_RDONLY) && | |
5536 | ((vp->v_type == VREG) || (vp->v_type == VDIR) || | |
5537 | (vp->v_type == VLNK) || (vp->v_type == VCPLX) || | |
5538 | (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) { | |
5539 | result = EROFS; | |
5540 | goto out; | |
5541 | } | |
5542 | ||
5543 | /* | |
5544 | * Check for noexec filesystems. | |
5545 | */ | |
2d21ac55 | 5546 | if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) { |
91447636 A |
5547 | result = EACCES; |
5548 | goto out; | |
5549 | } | |
5550 | ||
5551 | /* | |
5552 | * Handle cases related to filesystems with non-local enforcement. | |
5553 | * This call can return 0, in which case we will fall through to perform a | |
5554 | * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets | |
5555 | * an appropriate result, at which point we can return immediately. | |
5556 | */ | |
2d21ac55 | 5557 | if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) |
91447636 A |
5558 | goto out; |
5559 | ||
5560 | /* | |
5561 | * Get vnode attributes and extended security information for the vnode | |
5562 | * and directory if required. | |
5563 | */ | |
5564 | VATTR_WANTED(&va, va_mode); | |
5565 | VATTR_WANTED(&va, va_uid); | |
5566 | VATTR_WANTED(&va, va_gid); | |
5567 | VATTR_WANTED(&va, va_flags); | |
5568 | VATTR_WANTED(&va, va_acl); | |
5569 | if ((result = vnode_getattr(vp, &va, ctx)) != 0) { | |
5570 | KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result); | |
5571 | goto out; | |
5572 | } | |
2d21ac55 | 5573 | if (dvp && parent_authorized_for_delete == FALSE) { |
91447636 A |
5574 | VATTR_WANTED(&dva, va_mode); |
5575 | VATTR_WANTED(&dva, va_uid); | |
5576 | VATTR_WANTED(&dva, va_gid); | |
5577 | VATTR_WANTED(&dva, va_flags); | |
5578 | VATTR_WANTED(&dva, va_acl); | |
5579 | if ((result = vnode_getattr(dvp, &dva, ctx)) != 0) { | |
5580 | KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result); | |
5581 | goto out; | |
5582 | } | |
5583 | } | |
5584 | ||
5585 | /* | |
5586 | * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes | |
5587 | * *_EXTATTRIBUTES. | |
5588 | */ | |
2d21ac55 | 5589 | if (S_ISXATTR(va.va_mode) || vnode_isnamedstream(vp)) { |
91447636 A |
5590 | if (rights & KAUTH_VNODE_READ_DATA) { |
5591 | rights &= ~KAUTH_VNODE_READ_DATA; | |
5592 | rights |= KAUTH_VNODE_READ_EXTATTRIBUTES; | |
5593 | } | |
5594 | if (rights & KAUTH_VNODE_WRITE_DATA) { | |
5595 | rights &= ~KAUTH_VNODE_WRITE_DATA; | |
5596 | rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES; | |
5597 | } | |
5598 | } | |
2d21ac55 A |
5599 | |
5600 | /* | |
5601 | * Point 'vp' to the resource fork's parent for ACL checking | |
5602 | */ | |
5603 | if (vnode_isnamedstream(vp) && | |
5604 | (vp->v_parent != NULL) && | |
5605 | (vget_internal(vp->v_parent, 0, VNODE_NODEAD) == 0)) { | |
5606 | parent_ref = TRUE; | |
5607 | vcp->vp = vp = vp->v_parent; | |
5608 | if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) | |
5609 | kauth_acl_free(va.va_acl); | |
5610 | VATTR_INIT(&va); | |
5611 | VATTR_WANTED(&va, va_mode); | |
5612 | VATTR_WANTED(&va, va_uid); | |
5613 | VATTR_WANTED(&va, va_gid); | |
5614 | VATTR_WANTED(&va, va_flags); | |
5615 | VATTR_WANTED(&va, va_acl); | |
5616 | if ((result = vnode_getattr(vp, &va, ctx)) != 0) | |
5617 | goto out; | |
5618 | } | |
5619 | ||
91447636 A |
5620 | /* |
5621 | * Check for immutability. | |
5622 | * | |
5623 | * In the deletion case, parent directory immutability vetoes specific | |
5624 | * file rights. | |
5625 | */ | |
5626 | if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0) | |
5627 | goto out; | |
5628 | if ((rights & KAUTH_VNODE_DELETE) && | |
2d21ac55 | 5629 | parent_authorized_for_delete == FALSE && |
91447636 A |
5630 | ((result = vnode_authorize_checkimmutable(dvp, &dva, KAUTH_VNODE_DELETE_CHILD, 0)) != 0)) |
5631 | goto out; | |
5632 | ||
5633 | /* | |
5634 | * Clear rights that have been authorized by reaching this point, bail if nothing left to | |
5635 | * check. | |
5636 | */ | |
5637 | rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE); | |
5638 | if (rights == 0) | |
5639 | goto out; | |
5640 | ||
5641 | /* | |
5642 | * If we're not the superuser, authorize based on file properties. | |
5643 | */ | |
5644 | if (!vfs_context_issuser(ctx)) { | |
5645 | /* process delete rights */ | |
5646 | if ((rights & KAUTH_VNODE_DELETE) && | |
2d21ac55 | 5647 | parent_authorized_for_delete == FALSE && |
91447636 A |
5648 | ((result = vnode_authorize_delete(vcp)) != 0)) |
5649 | goto out; | |
5650 | ||
5651 | /* process remaining rights */ | |
5652 | if ((rights & ~KAUTH_VNODE_DELETE) && | |
2d21ac55 | 5653 | (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, &found_deny)) != 0) |
91447636 A |
5654 | goto out; |
5655 | } else { | |
5656 | ||
5657 | /* | |
5658 | * Execute is only granted to root if one of the x bits is set. This check only | |
5659 | * makes sense if the posix mode bits are actually supported. | |
5660 | */ | |
5661 | if ((rights & KAUTH_VNODE_EXECUTE) && | |
5662 | (vp->v_type == VREG) && | |
5663 | VATTR_IS_SUPPORTED(&va, va_mode) && | |
5664 | !(va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) { | |
5665 | result = EPERM; | |
5666 | KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode); | |
5667 | goto out; | |
5668 | } | |
5669 | ||
5670 | KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp); | |
5671 | } | |
91447636 A |
5672 | out: |
5673 | if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) | |
5674 | kauth_acl_free(va.va_acl); | |
5675 | if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) | |
5676 | kauth_acl_free(dva.va_acl); | |
2d21ac55 | 5677 | |
91447636 | 5678 | if (result) { |
2d21ac55 A |
5679 | if (parent_ref) |
5680 | vnode_put(vp); | |
91447636 A |
5681 | *errorp = result; |
5682 | KAUTH_DEBUG("%p DENIED - auth denied", vp); | |
5683 | return(KAUTH_RESULT_DENY); | |
5684 | } | |
2d21ac55 A |
5685 | if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) { |
5686 | /* | |
5687 | * if we were successfully granted the right to search this directory | |
5688 | * and there were NO ACL DENYs for search and the posix permissions also don't | |
5689 | * deny execute, we can synthesize a global right that allows anyone to | |
5690 | * traverse this directory during a pathname lookup without having to | |
5691 | * match the credential associated with this cache of rights. | |
5692 | */ | |
5693 | if (!VATTR_IS_SUPPORTED(&va, va_mode) || | |
5694 | ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == | |
5695 | (S_IXUSR | S_IXGRP | S_IXOTH))) { | |
5696 | vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE); | |
5697 | } | |
5698 | } | |
5699 | if ((rights & KAUTH_VNODE_DELETE) && parent_authorized_for_delete == FALSE) { | |
5700 | /* | |
5701 | * parent was successfully and newly authorized for deletions | |
5702 | * add it to the cache | |
5703 | */ | |
5704 | vnode_cache_authorized_action(dvp, ctx, KAUTH_VNODE_DELETE); | |
5705 | } | |
5706 | if (parent_ref) | |
5707 | vnode_put(vp); | |
91447636 A |
5708 | /* |
5709 | * Note that this implies that we will allow requests for no rights, as well as | |
5710 | * for rights that we do not recognise. There should be none of these. | |
5711 | */ | |
5712 | KAUTH_DEBUG("%p ALLOWED - auth granted", vp); | |
5713 | return(KAUTH_RESULT_ALLOW); | |
5714 | } | |
5715 | ||
5716 | /* | |
5717 | * Check that the attribute information in vattr can be legally applied to | |
5718 | * a new file by the context. | |
5719 | */ | |
5720 | int | |
5721 | vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx) | |
5722 | { | |
5723 | int error; | |
2d21ac55 | 5724 | int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode; |
91447636 A |
5725 | kauth_cred_t cred; |
5726 | guid_t changer; | |
5727 | mount_t dmp; | |
5728 | ||
5729 | error = 0; | |
5730 | defaulted_owner = defaulted_group = defaulted_mode = 0; | |
5731 | ||
5732 | /* | |
5733 | * Require that the filesystem support extended security to apply any. | |
5734 | */ | |
5735 | if (!vfs_extendedsecurity(dvp->v_mount) && | |
5736 | (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) { | |
5737 | error = EINVAL; | |
5738 | goto out; | |
5739 | } | |
5740 | ||
5741 | /* | |
5742 | * Default some fields. | |
5743 | */ | |
5744 | dmp = dvp->v_mount; | |
5745 | ||
5746 | /* | |
5747 | * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that | |
5748 | * owner takes ownership of all new files. | |
5749 | */ | |
5750 | if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) { | |
5751 | VATTR_SET(vap, va_uid, dmp->mnt_fsowner); | |
5752 | defaulted_owner = 1; | |
5753 | } else { | |
5754 | if (!VATTR_IS_ACTIVE(vap, va_uid)) { | |
5755 | /* default owner is current user */ | |
5756 | VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx))); | |
5757 | defaulted_owner = 1; | |
5758 | } | |
5759 | } | |
5760 | ||
5761 | /* | |
5762 | * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that | |
5763 | * group takes ownership of all new files. | |
5764 | */ | |
5765 | if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) { | |
5766 | VATTR_SET(vap, va_gid, dmp->mnt_fsgroup); | |
5767 | defaulted_group = 1; | |
5768 | } else { | |
5769 | if (!VATTR_IS_ACTIVE(vap, va_gid)) { | |
5770 | /* default group comes from parent object, fallback to current user */ | |
5771 | struct vnode_attr dva; | |
5772 | VATTR_INIT(&dva); | |
5773 | VATTR_WANTED(&dva, va_gid); | |
5774 | if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) | |
5775 | goto out; | |
5776 | if (VATTR_IS_SUPPORTED(&dva, va_gid)) { | |
5777 | VATTR_SET(vap, va_gid, dva.va_gid); | |
5778 | } else { | |
5779 | VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx))); | |
5780 | } | |
5781 | defaulted_group = 1; | |
5782 | } | |
5783 | } | |
5784 | ||
5785 | if (!VATTR_IS_ACTIVE(vap, va_flags)) | |
5786 | VATTR_SET(vap, va_flags, 0); | |
5787 | ||
5788 | /* default mode is everything, masked with current umask */ | |
5789 | if (!VATTR_IS_ACTIVE(vap, va_mode)) { | |
5790 | VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd->fd_cmask); | |
5791 | KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap->va_mode, vfs_context_proc(ctx)->p_fd->fd_cmask); | |
5792 | defaulted_mode = 1; | |
5793 | } | |
5794 | /* set timestamps to now */ | |
5795 | if (!VATTR_IS_ACTIVE(vap, va_create_time)) { | |
5796 | nanotime(&vap->va_create_time); | |
5797 | VATTR_SET_ACTIVE(vap, va_create_time); | |
5798 | } | |
5799 | ||
5800 | /* | |
5801 | * Check for attempts to set nonsensical fields. | |
5802 | */ | |
5803 | if (vap->va_active & ~VNODE_ATTR_NEWOBJ) { | |
5804 | error = EINVAL; | |
5805 | KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx", | |
5806 | vap->va_active & ~VNODE_ATTR_NEWOBJ); | |
5807 | goto out; | |
5808 | } | |
5809 | ||
5810 | /* | |
5811 | * Quickly check for the applicability of any enforcement here. | |
5812 | * Tests below maintain the integrity of the local security model. | |
5813 | */ | |
2d21ac55 | 5814 | if (vfs_authopaque(dvp->v_mount)) |
91447636 A |
5815 | goto out; |
5816 | ||
5817 | /* | |
5818 | * We need to know if the caller is the superuser, or if the work is | |
5819 | * otherwise already authorised. | |
5820 | */ | |
5821 | cred = vfs_context_ucred(ctx); | |
5822 | if (noauth) { | |
5823 | /* doing work for the kernel */ | |
2d21ac55 | 5824 | has_priv_suser = 1; |
91447636 | 5825 | } else { |
2d21ac55 | 5826 | has_priv_suser = vfs_context_issuser(ctx); |
91447636 A |
5827 | } |
5828 | ||
5829 | ||
5830 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
2d21ac55 | 5831 | if (has_priv_suser) { |
91447636 A |
5832 | if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) { |
5833 | error = EPERM; | |
5834 | KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)"); | |
5835 | goto out; | |
5836 | } | |
5837 | } else { | |
5838 | if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) { | |
5839 | error = EPERM; | |
5840 | KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)"); | |
5841 | goto out; | |
5842 | } | |
5843 | } | |
5844 | } | |
5845 | ||
5846 | /* if not superuser, validate legality of new-item attributes */ | |
2d21ac55 | 5847 | if (!has_priv_suser) { |
91447636 A |
5848 | if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) { |
5849 | /* setgid? */ | |
5850 | if (vap->va_mode & S_ISGID) { | |
5851 | if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) { | |
5852 | KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid); | |
5853 | goto out; | |
5854 | } | |
5855 | if (!ismember) { | |
5856 | KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid); | |
5857 | error = EPERM; | |
5858 | goto out; | |
5859 | } | |
5860 | } | |
5861 | ||
5862 | /* setuid? */ | |
5863 | if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) { | |
5864 | KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit"); | |
5865 | error = EPERM; | |
5866 | goto out; | |
5867 | } | |
5868 | } | |
5869 | if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) { | |
5870 | KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid); | |
5871 | error = EPERM; | |
5872 | goto out; | |
5873 | } | |
5874 | if (!defaulted_group) { | |
5875 | if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) { | |
5876 | KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid); | |
5877 | goto out; | |
5878 | } | |
5879 | if (!ismember) { | |
5880 | KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid); | |
5881 | error = EPERM; | |
5882 | goto out; | |
5883 | } | |
5884 | } | |
5885 | ||
5886 | /* initialising owner/group UUID */ | |
5887 | if (VATTR_IS_ACTIVE(vap, va_uuuid)) { | |
5888 | if ((error = kauth_cred_getguid(cred, &changer)) != 0) { | |
5889 | KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error); | |
5890 | /* XXX ENOENT here - no GUID - should perhaps become EPERM */ | |
5891 | goto out; | |
5892 | } | |
5893 | if (!kauth_guid_equal(&vap->va_uuuid, &changer)) { | |
5894 | KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us"); | |
5895 | error = EPERM; | |
5896 | goto out; | |
5897 | } | |
5898 | } | |
5899 | if (VATTR_IS_ACTIVE(vap, va_guuid)) { | |
5900 | if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) { | |
5901 | KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error); | |
5902 | goto out; | |
5903 | } | |
5904 | if (!ismember) { | |
5905 | KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member"); | |
5906 | error = EPERM; | |
5907 | goto out; | |
5908 | } | |
5909 | } | |
5910 | } | |
5911 | out: | |
5912 | return(error); | |
5913 | } | |
5914 | ||
5915 | /* | |
2d21ac55 A |
5916 | * Check that the attribute information in vap can be legally written by the |
5917 | * context. | |
91447636 | 5918 | * |
2d21ac55 A |
5919 | * Call this when you're not sure about the vnode_attr; either its contents |
5920 | * have come from an unknown source, or when they are variable. | |
91447636 A |
5921 | * |
5922 | * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that | |
5923 | * must be authorized to be permitted to write the vattr. | |
5924 | */ | |
5925 | int | |
5926 | vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx) | |
5927 | { | |
5928 | struct vnode_attr ova; | |
5929 | kauth_action_t required_action; | |
2d21ac55 | 5930 | int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid; |
91447636 A |
5931 | guid_t changer; |
5932 | gid_t group; | |
5933 | uid_t owner; | |
5934 | mode_t newmode; | |
5935 | kauth_cred_t cred; | |
5936 | uint32_t fdelta; | |
5937 | ||
5938 | VATTR_INIT(&ova); | |
5939 | required_action = 0; | |
5940 | error = 0; | |
5941 | ||
5942 | /* | |
5943 | * Quickly check for enforcement applicability. | |
5944 | */ | |
2d21ac55 | 5945 | if (vfs_authopaque(vp->v_mount)) |
91447636 A |
5946 | goto out; |
5947 | ||
5948 | /* | |
5949 | * Check for attempts to set nonsensical fields. | |
5950 | */ | |
5951 | if (vap->va_active & VNODE_ATTR_RDONLY) { | |
5952 | KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)"); | |
5953 | error = EINVAL; | |
5954 | goto out; | |
5955 | } | |
5956 | ||
5957 | /* | |
5958 | * We need to know if the caller is the superuser. | |
5959 | */ | |
5960 | cred = vfs_context_ucred(ctx); | |
2d21ac55 | 5961 | has_priv_suser = kauth_cred_issuser(cred); |
91447636 A |
5962 | |
5963 | /* | |
5964 | * If any of the following are changing, we need information from the old file: | |
5965 | * va_uid | |
5966 | * va_gid | |
5967 | * va_mode | |
5968 | * va_uuuid | |
5969 | * va_guuid | |
5970 | */ | |
5971 | if (VATTR_IS_ACTIVE(vap, va_uid) || | |
5972 | VATTR_IS_ACTIVE(vap, va_gid) || | |
5973 | VATTR_IS_ACTIVE(vap, va_mode) || | |
5974 | VATTR_IS_ACTIVE(vap, va_uuuid) || | |
5975 | VATTR_IS_ACTIVE(vap, va_guuid)) { | |
5976 | VATTR_WANTED(&ova, va_mode); | |
5977 | VATTR_WANTED(&ova, va_uid); | |
5978 | VATTR_WANTED(&ova, va_gid); | |
5979 | VATTR_WANTED(&ova, va_uuuid); | |
5980 | VATTR_WANTED(&ova, va_guuid); | |
5981 | KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes"); | |
5982 | } | |
5983 | ||
5984 | /* | |
5985 | * If timestamps are being changed, we need to know who the file is owned | |
5986 | * by. | |
5987 | */ | |
5988 | if (VATTR_IS_ACTIVE(vap, va_create_time) || | |
5989 | VATTR_IS_ACTIVE(vap, va_change_time) || | |
5990 | VATTR_IS_ACTIVE(vap, va_modify_time) || | |
5991 | VATTR_IS_ACTIVE(vap, va_access_time) || | |
5992 | VATTR_IS_ACTIVE(vap, va_backup_time)) { | |
5993 | ||
5994 | VATTR_WANTED(&ova, va_uid); | |
5995 | #if 0 /* enable this when we support UUIDs as official owners */ | |
5996 | VATTR_WANTED(&ova, va_uuuid); | |
5997 | #endif | |
5998 | KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID"); | |
5999 | } | |
6000 | ||
6001 | /* | |
6002 | * If flags are being changed, we need the old flags. | |
6003 | */ | |
6004 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
6005 | KAUTH_DEBUG("ATTR - flags changing, fetching old flags"); | |
6006 | VATTR_WANTED(&ova, va_flags); | |
6007 | } | |
6008 | ||
6009 | /* | |
6010 | * If the size is being set, make sure it's not a directory. | |
6011 | */ | |
6012 | if (VATTR_IS_ACTIVE(vap, va_data_size)) { | |
6013 | /* size is meaningless on a directory, don't permit this */ | |
6014 | if (vnode_isdir(vp)) { | |
6015 | KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory"); | |
6016 | error = EISDIR; | |
6017 | goto out; | |
6018 | } | |
6019 | } | |
6020 | ||
6021 | /* | |
6022 | * Get old data. | |
6023 | */ | |
6024 | KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active); | |
6025 | if ((error = vnode_getattr(vp, &ova, ctx)) != 0) { | |
6026 | KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error); | |
6027 | goto out; | |
6028 | } | |
6029 | ||
6030 | /* | |
6031 | * Size changes require write access to the file data. | |
6032 | */ | |
6033 | if (VATTR_IS_ACTIVE(vap, va_data_size)) { | |
6034 | /* if we can't get the size, or it's different, we need write access */ | |
6035 | KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA"); | |
6036 | required_action |= KAUTH_VNODE_WRITE_DATA; | |
6037 | } | |
6038 | ||
6039 | /* | |
6040 | * Changing timestamps? | |
6041 | * | |
6042 | * Note that we are only called to authorize user-requested time changes; | |
6043 | * side-effect time changes are not authorized. Authorisation is only | |
6044 | * required for existing files. | |
6045 | * | |
6046 | * Non-owners are not permitted to change the time on an existing | |
6047 | * file to anything other than the current time. | |
6048 | */ | |
6049 | if (VATTR_IS_ACTIVE(vap, va_create_time) || | |
6050 | VATTR_IS_ACTIVE(vap, va_change_time) || | |
6051 | VATTR_IS_ACTIVE(vap, va_modify_time) || | |
6052 | VATTR_IS_ACTIVE(vap, va_access_time) || | |
6053 | VATTR_IS_ACTIVE(vap, va_backup_time)) { | |
6054 | /* | |
6055 | * The owner and root may set any timestamps they like, | |
6056 | * provided that the file is not immutable. The owner still needs | |
6057 | * WRITE_ATTRIBUTES (implied by ownership but still deniable). | |
6058 | */ | |
2d21ac55 | 6059 | if (has_priv_suser || vauth_node_owner(&ova, cred)) { |
91447636 A |
6060 | KAUTH_DEBUG("ATTR - root or owner changing timestamps"); |
6061 | required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES; | |
6062 | } else { | |
6063 | /* just setting the current time? */ | |
6064 | if (vap->va_vaflags & VA_UTIMES_NULL) { | |
6065 | KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES"); | |
6066 | required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES; | |
6067 | } else { | |
6068 | KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted"); | |
6069 | error = EACCES; | |
6070 | goto out; | |
6071 | } | |
6072 | } | |
6073 | } | |
6074 | ||
6075 | /* | |
6076 | * Changing file mode? | |
6077 | */ | |
6078 | if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) { | |
6079 | KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode); | |
6080 | ||
6081 | /* | |
6082 | * Mode changes always have the same basic auth requirements. | |
6083 | */ | |
2d21ac55 | 6084 | if (has_priv_suser) { |
91447636 A |
6085 | KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check"); |
6086 | required_action |= KAUTH_VNODE_CHECKIMMUTABLE; | |
6087 | } else { | |
6088 | /* need WRITE_SECURITY */ | |
6089 | KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY"); | |
6090 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6091 | } | |
6092 | ||
6093 | /* | |
6094 | * Can't set the setgid bit if you're not in the group and not root. Have to have | |
6095 | * existing group information in the case we're not setting it right now. | |
6096 | */ | |
6097 | if (vap->va_mode & S_ISGID) { | |
6098 | required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ | |
2d21ac55 | 6099 | if (!has_priv_suser) { |
91447636 A |
6100 | if (VATTR_IS_ACTIVE(vap, va_gid)) { |
6101 | group = vap->va_gid; | |
6102 | } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) { | |
6103 | group = ova.va_gid; | |
6104 | } else { | |
6105 | KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available"); | |
6106 | error = EINVAL; | |
6107 | goto out; | |
6108 | } | |
6109 | /* | |
6110 | * This might be too restrictive; WRITE_SECURITY might be implied by | |
6111 | * membership in this case, rather than being an additional requirement. | |
6112 | */ | |
6113 | if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) { | |
6114 | KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid); | |
6115 | goto out; | |
6116 | } | |
6117 | if (!ismember) { | |
6118 | KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group); | |
6119 | error = EPERM; | |
6120 | goto out; | |
6121 | } | |
6122 | } | |
6123 | } | |
6124 | ||
6125 | /* | |
6126 | * Can't set the setuid bit unless you're root or the file's owner. | |
6127 | */ | |
6128 | if (vap->va_mode & S_ISUID) { | |
6129 | required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ | |
2d21ac55 | 6130 | if (!has_priv_suser) { |
91447636 A |
6131 | if (VATTR_IS_ACTIVE(vap, va_uid)) { |
6132 | owner = vap->va_uid; | |
6133 | } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) { | |
6134 | owner = ova.va_uid; | |
6135 | } else { | |
6136 | KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available"); | |
6137 | error = EINVAL; | |
6138 | goto out; | |
6139 | } | |
6140 | if (owner != kauth_cred_getuid(cred)) { | |
6141 | /* | |
6142 | * We could allow this if WRITE_SECURITY is permitted, perhaps. | |
6143 | */ | |
6144 | KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit"); | |
6145 | error = EPERM; | |
6146 | goto out; | |
6147 | } | |
6148 | } | |
6149 | } | |
6150 | } | |
6151 | ||
6152 | /* | |
6153 | * Validate/mask flags changes. This checks that only the flags in | |
6154 | * the UF_SETTABLE mask are being set, and preserves the flags in | |
6155 | * the SF_SETTABLE case. | |
6156 | * | |
6157 | * Since flags changes may be made in conjunction with other changes, | |
6158 | * we will ask the auth code to ignore immutability in the case that | |
6159 | * the SF_* flags are not set and we are only manipulating the file flags. | |
6160 | * | |
6161 | */ | |
6162 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
6163 | /* compute changing flags bits */ | |
6164 | if (VATTR_IS_SUPPORTED(&ova, va_flags)) { | |
6165 | fdelta = vap->va_flags ^ ova.va_flags; | |
6166 | } else { | |
6167 | fdelta = vap->va_flags; | |
6168 | } | |
6169 | ||
6170 | if (fdelta != 0) { | |
6171 | KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY"); | |
6172 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6173 | ||
6174 | /* check that changing bits are legal */ | |
2d21ac55 | 6175 | if (has_priv_suser) { |
91447636 A |
6176 | /* |
6177 | * The immutability check will prevent us from clearing the SF_* | |
6178 | * flags unless the system securelevel permits it, so just check | |
6179 | * for legal flags here. | |
6180 | */ | |
6181 | if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) { | |
6182 | error = EPERM; | |
6183 | KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)"); | |
6184 | goto out; | |
6185 | } | |
6186 | } else { | |
6187 | if (fdelta & ~UF_SETTABLE) { | |
6188 | error = EPERM; | |
6189 | KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)"); | |
6190 | goto out; | |
6191 | } | |
6192 | } | |
6193 | /* | |
6194 | * If the caller has the ability to manipulate file flags, | |
6195 | * security is not reduced by ignoring them for this operation. | |
6196 | * | |
6197 | * A more complete test here would consider the 'after' states of the flags | |
6198 | * to determine whether it would permit the operation, but this becomes | |
6199 | * very complex. | |
6200 | * | |
6201 | * Ignoring immutability is conditional on securelevel; this does not bypass | |
6202 | * the SF_* flags if securelevel > 0. | |
6203 | */ | |
6204 | required_action |= KAUTH_VNODE_NOIMMUTABLE; | |
6205 | } | |
6206 | } | |
6207 | ||
6208 | /* | |
6209 | * Validate ownership information. | |
6210 | */ | |
6211 | chowner = 0; | |
6212 | chgroup = 0; | |
2d21ac55 A |
6213 | clear_suid = 0; |
6214 | clear_sgid = 0; | |
91447636 A |
6215 | |
6216 | /* | |
6217 | * uid changing | |
6218 | * Note that if the filesystem didn't give us a UID, we expect that it doesn't | |
6219 | * support them in general, and will ignore it if/when we try to set it. | |
6220 | * We might want to clear the uid out of vap completely here. | |
6221 | */ | |
2d21ac55 A |
6222 | if (VATTR_IS_ACTIVE(vap, va_uid)) { |
6223 | if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) { | |
6224 | if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) { | |
91447636 A |
6225 | KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party"); |
6226 | error = EPERM; | |
6227 | goto out; | |
6228 | } | |
6229 | chowner = 1; | |
6230 | } | |
2d21ac55 A |
6231 | clear_suid = 1; |
6232 | } | |
91447636 A |
6233 | |
6234 | /* | |
6235 | * gid changing | |
6236 | * Note that if the filesystem didn't give us a GID, we expect that it doesn't | |
6237 | * support them in general, and will ignore it if/when we try to set it. | |
6238 | * We might want to clear the gid out of vap completely here. | |
6239 | */ | |
2d21ac55 A |
6240 | if (VATTR_IS_ACTIVE(vap, va_gid)) { |
6241 | if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) { | |
6242 | if (!has_priv_suser) { | |
91447636 A |
6243 | if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) { |
6244 | KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid); | |
6245 | goto out; | |
6246 | } | |
6247 | if (!ismember) { | |
6248 | KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group", | |
6249 | ova.va_gid, vap->va_gid); | |
6250 | error = EPERM; | |
6251 | goto out; | |
6252 | } | |
6253 | } | |
6254 | chgroup = 1; | |
6255 | } | |
2d21ac55 A |
6256 | clear_sgid = 1; |
6257 | } | |
91447636 A |
6258 | |
6259 | /* | |
6260 | * Owner UUID being set or changed. | |
6261 | */ | |
6262 | if (VATTR_IS_ACTIVE(vap, va_uuuid)) { | |
6263 | /* if the owner UUID is not actually changing ... */ | |
6264 | if (VATTR_IS_SUPPORTED(&ova, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) | |
6265 | goto no_uuuid_change; | |
6266 | ||
6267 | /* | |
6268 | * The owner UUID cannot be set by a non-superuser to anything other than | |
6269 | * their own. | |
6270 | */ | |
2d21ac55 | 6271 | if (!has_priv_suser) { |
91447636 A |
6272 | if ((error = kauth_cred_getguid(cred, &changer)) != 0) { |
6273 | KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error); | |
6274 | /* XXX ENOENT here - no UUID - should perhaps become EPERM */ | |
6275 | goto out; | |
6276 | } | |
6277 | if (!kauth_guid_equal(&vap->va_uuuid, &changer)) { | |
6278 | KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us"); | |
6279 | error = EPERM; | |
6280 | goto out; | |
6281 | } | |
6282 | } | |
6283 | chowner = 1; | |
2d21ac55 | 6284 | clear_suid = 1; |
91447636 A |
6285 | } |
6286 | no_uuuid_change: | |
6287 | /* | |
6288 | * Group UUID being set or changed. | |
6289 | */ | |
6290 | if (VATTR_IS_ACTIVE(vap, va_guuid)) { | |
6291 | /* if the group UUID is not actually changing ... */ | |
6292 | if (VATTR_IS_SUPPORTED(&ova, va_guuid) && kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) | |
6293 | goto no_guuid_change; | |
6294 | ||
6295 | /* | |
6296 | * The group UUID cannot be set by a non-superuser to anything other than | |
6297 | * one of which they are a member. | |
6298 | */ | |
2d21ac55 | 6299 | if (!has_priv_suser) { |
91447636 A |
6300 | if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) { |
6301 | KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error); | |
6302 | goto out; | |
6303 | } | |
6304 | if (!ismember) { | |
6305 | KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member"); | |
6306 | error = EPERM; | |
6307 | goto out; | |
6308 | } | |
6309 | } | |
6310 | chgroup = 1; | |
6311 | } | |
6312 | no_guuid_change: | |
6313 | ||
6314 | /* | |
6315 | * Compute authorisation for group/ownership changes. | |
6316 | */ | |
2d21ac55 A |
6317 | if (chowner || chgroup || clear_suid || clear_sgid) { |
6318 | if (has_priv_suser) { | |
91447636 A |
6319 | KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check"); |
6320 | required_action |= KAUTH_VNODE_CHECKIMMUTABLE; | |
6321 | } else { | |
6322 | if (chowner) { | |
6323 | KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP"); | |
6324 | required_action |= KAUTH_VNODE_TAKE_OWNERSHIP; | |
6325 | } | |
6326 | if (chgroup && !chowner) { | |
6327 | KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY"); | |
6328 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6329 | } | |
6330 | ||
6331 | /* clear set-uid and set-gid bits as required by Posix */ | |
6332 | if (VATTR_IS_ACTIVE(vap, va_mode)) { | |
6333 | newmode = vap->va_mode; | |
6334 | } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) { | |
6335 | newmode = ova.va_mode; | |
6336 | } else { | |
6337 | KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits"); | |
6338 | newmode = 0; | |
6339 | } | |
6340 | if (newmode & (S_ISUID | S_ISGID)) { | |
6341 | VATTR_SET(vap, va_mode, newmode & ~(S_ISUID | S_ISGID)); | |
6342 | KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode, vap->va_mode); | |
6343 | } | |
6344 | } | |
6345 | } | |
6346 | ||
6347 | /* | |
6348 | * Authorise changes in the ACL. | |
6349 | */ | |
6350 | if (VATTR_IS_ACTIVE(vap, va_acl)) { | |
6351 | ||
6352 | /* no existing ACL */ | |
6353 | if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) { | |
6354 | ||
6355 | /* adding an ACL */ | |
6356 | if (vap->va_acl != NULL) { | |
6357 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6358 | KAUTH_DEBUG("CHMOD - adding ACL"); | |
6359 | } | |
6360 | ||
6361 | /* removing an existing ACL */ | |
6362 | } else if (vap->va_acl == NULL) { | |
6363 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6364 | KAUTH_DEBUG("CHMOD - removing ACL"); | |
6365 | ||
6366 | /* updating an existing ACL */ | |
6367 | } else { | |
6368 | if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) { | |
6369 | /* entry count changed, must be different */ | |
6370 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6371 | KAUTH_DEBUG("CHMOD - adding/removing ACL entries"); | |
6372 | } else if (vap->va_acl->acl_entrycount > 0) { | |
6373 | /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */ | |
6374 | if (!memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0], | |
6375 | sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) { | |
6376 | required_action |= KAUTH_VNODE_WRITE_SECURITY; | |
6377 | KAUTH_DEBUG("CHMOD - changing ACL entries"); | |
6378 | } | |
6379 | } | |
6380 | } | |
6381 | } | |
6382 | ||
6383 | /* | |
6384 | * Other attributes that require authorisation. | |
6385 | */ | |
6386 | if (VATTR_IS_ACTIVE(vap, va_encoding)) | |
6387 | required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES; | |
6388 | ||
6389 | out: | |
6390 | if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) | |
6391 | kauth_acl_free(ova.va_acl); | |
6392 | if (error == 0) | |
6393 | *actionp = required_action; | |
6394 | return(error); | |
6395 | } | |
6396 | ||
6397 | ||
6398 | void | |
6399 | vfs_setlocklocal(mount_t mp) | |
6400 | { | |
6401 | vnode_t vp; | |
6402 | ||
6403 | mount_lock(mp); | |
6404 | mp->mnt_kern_flag |= MNTK_LOCK_LOCAL; | |
6405 | ||
6406 | /* | |
6407 | * We do not expect anyone to be using any vnodes at the | |
6408 | * time this routine is called. So no need for vnode locking | |
6409 | */ | |
6410 | TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { | |
6411 | vp->v_flag |= VLOCKLOCAL; | |
6412 | } | |
6413 | TAILQ_FOREACH(vp, &mp->mnt_workerqueue, v_mntvnodes) { | |
6414 | vp->v_flag |= VLOCKLOCAL; | |
6415 | } | |
6416 | TAILQ_FOREACH(vp, &mp->mnt_newvnodes, v_mntvnodes) { | |
6417 | vp->v_flag |= VLOCKLOCAL; | |
6418 | } | |
6419 | mount_unlock(mp); | |
6420 | } | |
6421 | ||
2d21ac55 A |
6422 | void |
6423 | vn_setunionwait(vnode_t vp) | |
6424 | { | |
6425 | vnode_lock_spin(vp); | |
6426 | vp->v_flag |= VISUNION; | |
6427 | vnode_unlock(vp); | |
6428 | } | |
6429 | ||
6430 | ||
6431 | void | |
6432 | vn_checkunionwait(vnode_t vp) | |
6433 | { | |
6434 | vnode_lock(vp); | |
6435 | while ((vp->v_flag & VISUNION) == VISUNION) | |
6436 | msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0); | |
6437 | vnode_unlock(vp); | |
6438 | } | |
6439 | ||
6440 | void | |
6441 | vn_clearunionwait(vnode_t vp, int locked) | |
6442 | { | |
6443 | if (!locked) | |
6444 | vnode_lock(vp); | |
6445 | if((vp->v_flag & VISUNION) == VISUNION) { | |
6446 | vp->v_flag &= ~VISUNION; | |
6447 | wakeup((caddr_t)&vp->v_flag); | |
6448 | } | |
6449 | if (!locked) | |
6450 | vnode_unlock(vp); | |
6451 | } | |
6452 | ||
6453 | /* | |
6454 | * XXX - get "don't trigger mounts" flag for thread; used by autofs. | |
6455 | */ | |
6456 | extern int thread_notrigger(void); | |
6457 | ||
6458 | int | |
6459 | thread_notrigger(void) | |
6460 | { | |
6461 | struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread()); | |
6462 | return (uth->uu_notrigger); | |
6463 | } | |
6464 | ||
6465 | /* | |
6466 | * Removes orphaned apple double files during a rmdir | |
6467 | * Works by: | |
6468 | * 1. vnode_suspend(). | |
6469 | * 2. Call VNOP_READDIR() till the end of directory is reached. | |
6470 | * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY. | |
6471 | * 4. Continue (2) and (3) till end of directory is reached. | |
6472 | * 5. If all the entries in the directory were files with "._" name, delete all the files. | |
6473 | * 6. vnode_resume() | |
6474 | * 7. If deletion of all files succeeded, call VNOP_RMDIR() again. | |
6475 | */ | |
6476 | ||
6477 | errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag) | |
6478 | { | |
6479 | ||
6480 | #define UIO_BUFF_SIZE 2048 | |
6481 | uio_t auio = NULL; | |
6482 | int eofflag, siz = UIO_BUFF_SIZE, nentries = 0; | |
6483 | int open_flag = 0, full_erase_flag = 0; | |
6484 | char uio_buf[ UIO_SIZEOF(1) ]; | |
6485 | char *rbuf = NULL, *cpos, *cend; | |
6486 | struct nameidata nd_temp; | |
6487 | struct dirent *dp; | |
6488 | errno_t error; | |
6489 | ||
6490 | error = vnode_suspend(vp); | |
6491 | ||
6492 | /* | |
6493 | * restart_flag is set so that the calling rmdir sleeps and resets | |
6494 | */ | |
6495 | if (error == EBUSY) | |
6496 | *restart_flag = 1; | |
6497 | if (error != 0) | |
6498 | goto outsc; | |
6499 | ||
6500 | /* | |
6501 | * set up UIO | |
6502 | */ | |
6503 | MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); | |
6504 | if (rbuf) | |
6505 | auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, | |
6506 | &uio_buf[0], sizeof(uio_buf)); | |
6507 | if (!rbuf || !auio) { | |
6508 | error = ENOMEM; | |
6509 | goto outsc; | |
6510 | } | |
6511 | ||
6512 | uio_setoffset(auio,0); | |
6513 | ||
6514 | eofflag = 0; | |
6515 | ||
6516 | if ((error = VNOP_OPEN(vp, FREAD, ctx))) | |
6517 | goto outsc; | |
6518 | else | |
6519 | open_flag = 1; | |
6520 | ||
6521 | /* | |
6522 | * First pass checks if all files are appleDouble files. | |
6523 | */ | |
6524 | ||
6525 | do { | |
6526 | siz = UIO_BUFF_SIZE; | |
6527 | uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ); | |
6528 | uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE); | |
6529 | ||
6530 | if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) | |
6531 | goto outsc; | |
6532 | ||
6533 | if (uio_resid(auio) != 0) | |
6534 | siz -= uio_resid(auio); | |
6535 | ||
6536 | /* | |
6537 | * Iterate through directory | |
6538 | */ | |
6539 | cpos = rbuf; | |
6540 | cend = rbuf + siz; | |
6541 | dp = (struct dirent*) cpos; | |
6542 | ||
6543 | if (cpos == cend) | |
6544 | eofflag = 1; | |
6545 | ||
6546 | while ((cpos < cend)) { | |
6547 | /* | |
6548 | * Check for . and .. as well as directories | |
6549 | */ | |
6550 | if (dp->d_ino != 0 && | |
6551 | !((dp->d_namlen == 1 && dp->d_name[0] == '.') || | |
6552 | (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) { | |
6553 | /* | |
6554 | * Check for irregular files and ._ files | |
6555 | * If there is a ._._ file abort the op | |
6556 | */ | |
6557 | if ( dp->d_namlen < 2 || | |
6558 | strncmp(dp->d_name,"._",2) || | |
6559 | (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) { | |
6560 | error = ENOTEMPTY; | |
6561 | goto outsc; | |
6562 | } | |
6563 | } | |
6564 | cpos += dp->d_reclen; | |
6565 | dp = (struct dirent*)cpos; | |
6566 | } | |
cf7d32b8 A |
6567 | |
6568 | /* | |
6569 | * workaround for HFS/NFS setting eofflag before end of file | |
6570 | */ | |
6571 | if (vp->v_tag == VT_HFS && nentries > 2) | |
6572 | eofflag=0; | |
6573 | ||
6574 | if (vp->v_tag == VT_NFS) { | |
6575 | if (eofflag && !full_erase_flag) { | |
6576 | full_erase_flag = 1; | |
6577 | eofflag = 0; | |
6578 | uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ); | |
6579 | } | |
6580 | else if (!eofflag && full_erase_flag) | |
6581 | full_erase_flag = 0; | |
6582 | } | |
2d21ac55 A |
6583 | |
6584 | } while (!eofflag); | |
6585 | /* | |
6586 | * If we've made it here all the files in the dir are AppleDouble | |
6587 | * We can delete the files even though the node is suspended | |
6588 | * because we are the owner of the file. | |
6589 | */ | |
6590 | ||
6591 | uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ); | |
6592 | eofflag = 0; | |
cf7d32b8 | 6593 | full_erase_flag = 0; |
2d21ac55 A |
6594 | |
6595 | do { | |
6596 | siz = UIO_BUFF_SIZE; | |
6597 | uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ); | |
6598 | uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE); | |
6599 | ||
6600 | error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx); | |
6601 | ||
6602 | if (error != 0) | |
6603 | goto outsc; | |
6604 | ||
6605 | if (uio_resid(auio) != 0) | |
6606 | siz -= uio_resid(auio); | |
6607 | ||
6608 | /* | |
6609 | * Iterate through directory | |
6610 | */ | |
6611 | cpos = rbuf; | |
6612 | cend = rbuf + siz; | |
6613 | dp = (struct dirent*) cpos; | |
6614 | ||
6615 | if (cpos == cend) | |
6616 | eofflag = 1; | |
6617 | ||
6618 | while ((cpos < cend)) { | |
6619 | /* | |
6620 | * Check for . and .. as well as directories | |
6621 | */ | |
6622 | if (dp->d_ino != 0 && | |
6623 | !((dp->d_namlen == 1 && dp->d_name[0] == '.') || | |
6624 | (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.')) | |
6625 | ) { | |
6626 | NDINIT(&nd_temp, DELETE, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), ctx); | |
6627 | nd_temp.ni_dvp = vp; | |
6628 | error = unlink1(ctx, &nd_temp, 0); | |
6629 | if(error && error != ENOENT) | |
6630 | goto outsc; | |
6631 | } | |
6632 | cpos += dp->d_reclen; | |
6633 | dp = (struct dirent*)cpos; | |
6634 | } | |
6635 | ||
6636 | /* | |
6637 | * workaround for HFS/NFS setting eofflag before end of file | |
6638 | */ | |
6639 | if (vp->v_tag == VT_HFS && nentries > 2) | |
6640 | eofflag=0; | |
6641 | ||
6642 | if (vp->v_tag == VT_NFS) { | |
6643 | if (eofflag && !full_erase_flag) { | |
6644 | full_erase_flag = 1; | |
6645 | eofflag = 0; | |
6646 | uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ); | |
6647 | } | |
6648 | else if (!eofflag && full_erase_flag) | |
6649 | full_erase_flag = 0; | |
6650 | } | |
6651 | ||
6652 | } while (!eofflag); | |
6653 | ||
6654 | ||
6655 | error = 0; | |
6656 | ||
6657 | outsc: | |
6658 | if (open_flag) | |
6659 | VNOP_CLOSE(vp, FREAD, ctx); | |
6660 | ||
6661 | uio_free(auio); | |
6662 | FREE(rbuf, M_TEMP); | |
6663 | ||
6664 | vnode_resume(vp); | |
6665 | ||
6666 | ||
6667 | return(error); | |
6668 | ||
6669 | } | |
6670 | ||
91447636 A |
6671 | |
6672 | #ifdef JOE_DEBUG | |
6673 | ||
6674 | record_vp(vnode_t vp, int count) { | |
6675 | struct uthread *ut; | |
6676 | int i; | |
6677 | ||
6678 | if ((vp->v_flag & VSYSTEM)) | |
6679 | return; | |
6680 | ||
6681 | ut = get_bsdthread_info(current_thread()); | |
6682 | ut->uu_iocount += count; | |
6683 | ||
6684 | if (ut->uu_vpindex < 32) { | |
6685 | for (i = 0; i < ut->uu_vpindex; i++) { | |
6686 | if (ut->uu_vps[i] == vp) | |
6687 | return; | |
6688 | } | |
6689 | ut->uu_vps[ut->uu_vpindex] = vp; | |
6690 | ut->uu_vpindex++; | |
6691 | } | |
6692 | } | |
6693 | #endif |