]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1989, 1993, 1995 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * This code is derived from software contributed to Berkeley by | |
34 | * Poul-Henning Kamp of the FreeBSD Project. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. All advertising materials mentioning features or use of this software | |
45 | * must display the following acknowledgement: | |
46 | * This product includes software developed by the University of | |
47 | * California, Berkeley and its contributors. | |
48 | * 4. Neither the name of the University nor the names of its contributors | |
49 | * may be used to endorse or promote products derived from this software | |
50 | * without specific prior written permission. | |
51 | * | |
52 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
53 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
54 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
55 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
56 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
57 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
58 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
59 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
60 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
61 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
62 | * SUCH DAMAGE. | |
63 | * | |
64 | * | |
65 | * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 | |
66 | */ | |
67 | /* | |
68 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |
69 | * support for mandatory and extensible security protections. This notice | |
70 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
71 | * Version 2.0. | |
72 | */ | |
73 | #include <sys/param.h> | |
74 | #include <sys/systm.h> | |
75 | #include <sys/time.h> | |
76 | #include <sys/mount_internal.h> | |
77 | #include <sys/vnode_internal.h> | |
78 | #include <miscfs/specfs/specdev.h> | |
79 | #include <sys/namei.h> | |
80 | #include <sys/errno.h> | |
81 | #include <sys/malloc.h> | |
82 | #include <sys/kauth.h> | |
83 | #include <sys/user.h> | |
84 | #include <sys/paths.h> | |
85 | ||
86 | #if CONFIG_MACF | |
87 | #include <security/mac_framework.h> | |
88 | #endif | |
89 | ||
90 | /* | |
91 | * Name caching works as follows: | |
92 | * | |
93 | * Names found by directory scans are retained in a cache | |
94 | * for future reference. It is managed LRU, so frequently | |
95 | * used names will hang around. Cache is indexed by hash value | |
96 | * obtained from (vp, name) where vp refers to the directory | |
97 | * containing name. | |
98 | * | |
99 | * If it is a "negative" entry, (i.e. for a name that is known NOT to | |
100 | * exist) the vnode pointer will be NULL. | |
101 | * | |
102 | * Upon reaching the last segment of a path, if the reference | |
103 | * is for DELETE, or NOCACHE is set (rewrite), and the | |
104 | * name is located in the cache, it will be dropped. | |
105 | */ | |
106 | ||
107 | /* | |
108 | * Structures associated with name cacheing. | |
109 | */ | |
110 | ||
111 | LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ | |
112 | u_long nchashmask; | |
113 | u_long nchash; /* size of hash table - 1 */ | |
114 | long numcache; /* number of cache entries allocated */ | |
115 | int desiredNodes; | |
116 | int desiredNegNodes; | |
117 | int ncs_negtotal; | |
118 | int nc_disabled = 0; | |
119 | TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ | |
120 | TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ | |
121 | ||
122 | ||
123 | #if COLLECT_STATS | |
124 | ||
125 | struct nchstats nchstats; /* cache effectiveness statistics */ | |
126 | ||
127 | #define NCHSTAT(v) { \ | |
128 | nchstats.v++; \ | |
129 | } | |
130 | #define NAME_CACHE_LOCK() name_cache_lock() | |
131 | #define NAME_CACHE_UNLOCK() name_cache_unlock() | |
132 | #define NAME_CACHE_LOCK_SHARED() name_cache_lock() | |
133 | ||
134 | #else | |
135 | ||
136 | #define NCHSTAT(v) | |
137 | #define NAME_CACHE_LOCK() name_cache_lock() | |
138 | #define NAME_CACHE_UNLOCK() name_cache_unlock() | |
139 | #define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared() | |
140 | ||
141 | #endif | |
142 | ||
143 | ||
144 | /* vars for name cache list lock */ | |
145 | lck_grp_t * namecache_lck_grp; | |
146 | lck_grp_attr_t * namecache_lck_grp_attr; | |
147 | lck_attr_t * namecache_lck_attr; | |
148 | ||
149 | lck_grp_t * strcache_lck_grp; | |
150 | lck_grp_attr_t * strcache_lck_grp_attr; | |
151 | lck_attr_t * strcache_lck_attr; | |
152 | ||
153 | lck_rw_t * namecache_rw_lock; | |
154 | lck_rw_t * strtable_rw_lock; | |
155 | ||
156 | #define NUM_STRCACHE_LOCKS 1024 | |
157 | ||
158 | lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS]; | |
159 | ||
160 | ||
161 | static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp); | |
162 | static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int); | |
163 | static void init_string_table(void); | |
164 | static void cache_delete(struct namecache *, int); | |
165 | static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname); | |
166 | ||
167 | #ifdef DUMP_STRING_TABLE | |
168 | /* | |
169 | * Internal dump function used for debugging | |
170 | */ | |
171 | void dump_string_table(void); | |
172 | #endif /* DUMP_STRING_TABLE */ | |
173 | ||
174 | static void init_crc32(void); | |
175 | static unsigned int crc32tab[256]; | |
176 | ||
177 | ||
178 | #define NCHHASH(dvp, hash_val) \ | |
179 | (&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask]) | |
180 | ||
181 | /* | |
182 | * This function tries to check if a directory vp is a subdirectory of dvp | |
183 | * only from valid v_parent pointers. It is called with the name cache lock | |
184 | * held and does not drop the lock anytime inside the function. | |
185 | * | |
186 | * It returns a boolean that indicates whether or not it was able to | |
187 | * successfully infer the parent/descendent relationship via the v_parent | |
188 | * pointers, or if it could not infer such relationship and that the decision | |
189 | * must be delegated to the owning filesystem. | |
190 | * | |
191 | * If it does not defer the decision, i.e. it was successfuly able to determine | |
192 | * the parent/descendent relationship, *is_subdir tells the caller if vp is a | |
193 | * subdirectory of dvp. | |
194 | * | |
195 | * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp | |
196 | * is the vnode whose parent is to be determined from the filesystem. | |
197 | * *is_subdir, in this case, is not indicative of anything and should be | |
198 | * ignored. | |
199 | * | |
200 | * The return value and output args should be used as follows : | |
201 | * | |
202 | * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp); | |
203 | * if (!defer) { | |
204 | * if (*is_subdir) | |
205 | * vp is subdirectory; | |
206 | * else | |
207 | * vp is not a subdirectory; | |
208 | * } else { | |
209 | * if (*next_vp) | |
210 | * check this vnode's parent from the filesystem | |
211 | * else | |
212 | * error (likely because of forced unmount). | |
213 | * } | |
214 | * | |
215 | */ | |
216 | static boolean_t | |
217 | cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir, | |
218 | vnode_t *next_vp) | |
219 | { | |
220 | vnode_t tvp = vp; | |
221 | int defer = FALSE; | |
222 | ||
223 | *is_subdir = FALSE; | |
224 | *next_vp = NULLVP; | |
225 | while (1) { | |
226 | mount_t tmp; | |
227 | ||
228 | if (tvp == dvp) { | |
229 | *is_subdir = TRUE; | |
230 | break; | |
231 | } else if (tvp == rootvnode) { | |
232 | /* *is_subdir = FALSE */ | |
233 | break; | |
234 | } | |
235 | ||
236 | tmp = tvp->v_mount; | |
237 | while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered && | |
238 | tvp != dvp && tvp != rootvnode) { | |
239 | tvp = tmp->mnt_vnodecovered; | |
240 | tmp = tvp->v_mount; | |
241 | } | |
242 | ||
243 | /* | |
244 | * If dvp is not at the top of a mount "stack" then | |
245 | * vp is not a subdirectory of dvp either. | |
246 | */ | |
247 | if (tvp == dvp || tvp == rootvnode) { | |
248 | /* *is_subdir = FALSE */ | |
249 | break; | |
250 | } | |
251 | ||
252 | if (!tmp) { | |
253 | defer = TRUE; | |
254 | *next_vp = NULLVP; | |
255 | break; | |
256 | } | |
257 | ||
258 | if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) { | |
259 | defer = TRUE; | |
260 | *next_vp = tvp; | |
261 | break; | |
262 | } | |
263 | ||
264 | tvp = tvp->v_parent; | |
265 | } | |
266 | ||
267 | return (defer); | |
268 | } | |
269 | ||
270 | /* maximum times retry from potentially transient errors in vnode_issubdir */ | |
271 | #define MAX_ERROR_RETRY 3 | |
272 | ||
273 | /* | |
274 | * This function checks if a given directory (vp) is a subdirectory of dvp. | |
275 | * It walks backwards from vp and if it hits dvp in its parent chain, | |
276 | * it is a subdirectory. If it encounters the root directory, it is not | |
277 | * a subdirectory. | |
278 | * | |
279 | * This function returns an error if it is unsuccessful and 0 on success. | |
280 | * | |
281 | * On entry (and exit) vp has an iocount and if this function has to take | |
282 | * any iocounts on other vnodes in the parent chain traversal, it releases them. | |
283 | */ | |
284 | int | |
285 | vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) | |
286 | { | |
287 | vnode_t start_vp, tvp; | |
288 | vnode_t vp_with_iocount; | |
289 | int error = 0; | |
290 | char dotdotbuf[] = ".."; | |
291 | int error_retry_count = 0; /* retry count for potentially transient | |
292 | errors */ | |
293 | ||
294 | *is_subdir = FALSE; | |
295 | tvp = start_vp = vp; | |
296 | /* | |
297 | * Anytime we acquire an iocount in this function, we save the vnode | |
298 | * in this variable and release it before exiting. | |
299 | */ | |
300 | vp_with_iocount = NULLVP; | |
301 | ||
302 | while (1) { | |
303 | boolean_t defer; | |
304 | vnode_t pvp; | |
305 | uint32_t vid; | |
306 | struct componentname cn; | |
307 | boolean_t is_subdir_locked = FALSE; | |
308 | ||
309 | if (tvp == dvp) { | |
310 | *is_subdir = TRUE; | |
311 | break; | |
312 | } else if (tvp == rootvnode) { | |
313 | /* *is_subdir = FALSE */ | |
314 | break; | |
315 | } | |
316 | ||
317 | NAME_CACHE_LOCK_SHARED(); | |
318 | ||
319 | defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked, | |
320 | &tvp); | |
321 | ||
322 | if (defer && tvp) | |
323 | vid = vnode_vid(tvp); | |
324 | ||
325 | NAME_CACHE_UNLOCK(); | |
326 | ||
327 | if (!defer) { | |
328 | *is_subdir = is_subdir_locked; | |
329 | break; | |
330 | } | |
331 | ||
332 | if (!tvp) { | |
333 | if (error_retry_count++ < MAX_ERROR_RETRY) { | |
334 | tvp = vp; | |
335 | continue; | |
336 | } | |
337 | error = ENOENT; | |
338 | break; | |
339 | } | |
340 | ||
341 | if (tvp != start_vp) { | |
342 | if (vp_with_iocount) { | |
343 | vnode_put(vp_with_iocount); | |
344 | vp_with_iocount = NULLVP; | |
345 | } | |
346 | ||
347 | error = vnode_getwithvid(tvp, vid); | |
348 | if (error) { | |
349 | if (error_retry_count++ < MAX_ERROR_RETRY) { | |
350 | tvp = vp; | |
351 | error = 0; | |
352 | continue; | |
353 | } | |
354 | break; | |
355 | } | |
356 | ||
357 | vp_with_iocount = tvp; | |
358 | } | |
359 | ||
360 | bzero(&cn, sizeof(cn)); | |
361 | cn.cn_nameiop = LOOKUP; | |
362 | cn.cn_flags = ISLASTCN | ISDOTDOT; | |
363 | cn.cn_context = ctx; | |
364 | cn.cn_pnbuf = &dotdotbuf[0]; | |
365 | cn.cn_pnlen = sizeof(dotdotbuf); | |
366 | cn.cn_nameptr = cn.cn_pnbuf; | |
367 | cn.cn_namelen = 2; | |
368 | ||
369 | pvp = NULLVP; | |
370 | if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) | |
371 | break; | |
372 | ||
373 | if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) { | |
374 | (void)vnode_update_identity(tvp, pvp, NULL, 0, 0, | |
375 | VNODE_UPDATE_PARENT); | |
376 | } | |
377 | ||
378 | if (vp_with_iocount) | |
379 | vnode_put(vp_with_iocount); | |
380 | ||
381 | vp_with_iocount = tvp = pvp; | |
382 | } | |
383 | ||
384 | if (vp_with_iocount) | |
385 | vnode_put(vp_with_iocount); | |
386 | ||
387 | return (error); | |
388 | } | |
389 | ||
390 | /* | |
391 | * This function builds the path to a filename in "buff". The | |
392 | * length of the buffer *INCLUDING* the trailing zero byte is | |
393 | * returned in outlen. NOTE: the length includes the trailing | |
394 | * zero byte and thus the length is one greater than what strlen | |
395 | * would return. This is important and lots of code elsewhere | |
396 | * in the kernel assumes this behavior. | |
397 | * | |
398 | * This function can call vnop in file system if the parent vnode | |
399 | * does not exist or when called for hardlinks via volfs path. | |
400 | * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present | |
401 | * in the name cache and does not enter the file system. | |
402 | * | |
403 | * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when | |
404 | * we encounter ENOENT during path reconstruction. ENOENT means that | |
405 | * one of the parents moved while we were building the path. The | |
406 | * caller can special handle this case by calling build_path again. | |
407 | * | |
408 | * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path | |
409 | * that is relative to the nearest mount point, i.e. do not | |
410 | * cross over mount points during building the path. | |
411 | * | |
412 | * passed in vp must have a valid io_count reference | |
413 | */ | |
414 | int | |
415 | build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx) | |
416 | { | |
417 | vnode_t vp, tvp; | |
418 | vnode_t vp_with_iocount; | |
419 | vnode_t proc_root_dir_vp; | |
420 | char *end; | |
421 | const char *str; | |
422 | int len; | |
423 | int ret = 0; | |
424 | int fixhardlink; | |
425 | ||
426 | if (first_vp == NULLVP) | |
427 | return (EINVAL); | |
428 | ||
429 | if (buflen <= 1) | |
430 | return (ENOSPC); | |
431 | ||
432 | /* | |
433 | * Grab the process fd so we can evaluate fd_rdir. | |
434 | */ | |
435 | if (vfs_context_proc(ctx)->p_fd) | |
436 | proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir; | |
437 | else | |
438 | proc_root_dir_vp = NULL; | |
439 | ||
440 | vp_with_iocount = NULLVP; | |
441 | again: | |
442 | vp = first_vp; | |
443 | ||
444 | end = &buff[buflen-1]; | |
445 | *end = '\0'; | |
446 | ||
447 | /* | |
448 | * holding the NAME_CACHE_LOCK in shared mode is | |
449 | * sufficient to stabilize both the vp->v_parent chain | |
450 | * and the 'vp->v_mount->mnt_vnodecovered' chain | |
451 | * | |
452 | * if we need to drop this lock, we must first grab the v_id | |
453 | * from the vnode we're currently working with... if that | |
454 | * vnode doesn't already have an io_count reference (the vp | |
455 | * passed in comes with one), we must grab a reference | |
456 | * after we drop the NAME_CACHE_LOCK via vnode_getwithvid... | |
457 | * deadlocks may result if you call vnode_get while holding | |
458 | * the NAME_CACHE_LOCK... we lazily release the reference | |
459 | * we pick up the next time we encounter a need to drop | |
460 | * the NAME_CACHE_LOCK or before we return from this routine | |
461 | */ | |
462 | NAME_CACHE_LOCK_SHARED(); | |
463 | ||
464 | /* | |
465 | * Check if this is the root of a file system. | |
466 | */ | |
467 | while (vp && vp->v_flag & VROOT) { | |
468 | if (vp->v_mount == NULL) { | |
469 | ret = EINVAL; | |
470 | goto out_unlock; | |
471 | } | |
472 | if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) { | |
473 | /* | |
474 | * It's the root of the root file system, so it's | |
475 | * just "/". | |
476 | */ | |
477 | *--end = '/'; | |
478 | ||
479 | goto out_unlock; | |
480 | } else { | |
481 | /* | |
482 | * This the root of the volume and the caller does not | |
483 | * want to cross mount points. Therefore just return | |
484 | * '/' as the relative path. | |
485 | */ | |
486 | if (flags & BUILDPATH_VOLUME_RELATIVE) { | |
487 | *--end = '/'; | |
488 | goto out_unlock; | |
489 | } else { | |
490 | vp = vp->v_mount->mnt_vnodecovered; | |
491 | } | |
492 | } | |
493 | } | |
494 | ||
495 | while ((vp != NULLVP) && (vp->v_parent != vp)) { | |
496 | int vid; | |
497 | ||
498 | /* | |
499 | * For hardlinks the v_name may be stale, so if its OK | |
500 | * to enter a file system, ask the file system for the | |
501 | * name and parent (below). | |
502 | */ | |
503 | fixhardlink = (vp->v_flag & VISHARDLINK) && | |
504 | (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) && | |
505 | !(flags & BUILDPATH_NO_FS_ENTER); | |
506 | ||
507 | if (!fixhardlink) { | |
508 | str = vp->v_name; | |
509 | ||
510 | if (str == NULL || *str == '\0') { | |
511 | if (vp->v_parent != NULL) | |
512 | ret = EINVAL; | |
513 | else | |
514 | ret = ENOENT; | |
515 | goto out_unlock; | |
516 | } | |
517 | len = strlen(str); | |
518 | /* | |
519 | * Check that there's enough space (including space for the '/') | |
520 | */ | |
521 | if ((end - buff) < (len + 1)) { | |
522 | ret = ENOSPC; | |
523 | goto out_unlock; | |
524 | } | |
525 | /* | |
526 | * Copy the name backwards. | |
527 | */ | |
528 | str += len; | |
529 | ||
530 | for (; len > 0; len--) | |
531 | *--end = *--str; | |
532 | /* | |
533 | * Add a path separator. | |
534 | */ | |
535 | *--end = '/'; | |
536 | } | |
537 | ||
538 | /* | |
539 | * Walk up the parent chain. | |
540 | */ | |
541 | if (((vp->v_parent != NULLVP) && !fixhardlink) || | |
542 | (flags & BUILDPATH_NO_FS_ENTER)) { | |
543 | ||
544 | /* | |
545 | * In this if () block we are not allowed to enter the filesystem | |
546 | * to conclusively get the most accurate parent identifier. | |
547 | * As a result, if 'vp' does not identify '/' and it | |
548 | * does not have a valid v_parent, then error out | |
549 | * and disallow further path construction | |
550 | */ | |
551 | if ((vp->v_parent == NULLVP) && (rootvnode != vp)) { | |
552 | /* | |
553 | * Only '/' is allowed to have a NULL parent | |
554 | * pointer. Upper level callers should ideally | |
555 | * re-drive name lookup on receiving a ENOENT. | |
556 | */ | |
557 | ret = ENOENT; | |
558 | ||
559 | /* The code below will exit early if 'tvp = vp' == NULL */ | |
560 | } | |
561 | vp = vp->v_parent; | |
562 | ||
563 | /* | |
564 | * if the vnode we have in hand isn't a directory and it | |
565 | * has a v_parent, then we started with the resource fork | |
566 | * so skip up to avoid getting a duplicate copy of the | |
567 | * file name in the path. | |
568 | */ | |
569 | if (vp && !vnode_isdir(vp) && vp->v_parent) { | |
570 | vp = vp->v_parent; | |
571 | } | |
572 | } else { | |
573 | /* | |
574 | * No parent, go get it if supported. | |
575 | */ | |
576 | struct vnode_attr va; | |
577 | vnode_t dvp; | |
578 | ||
579 | /* | |
580 | * Make sure file system supports obtaining a path from id. | |
581 | */ | |
582 | if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) { | |
583 | ret = ENOENT; | |
584 | goto out_unlock; | |
585 | } | |
586 | vid = vp->v_id; | |
587 | ||
588 | NAME_CACHE_UNLOCK(); | |
589 | ||
590 | if (vp != first_vp && vp != vp_with_iocount) { | |
591 | if (vp_with_iocount) { | |
592 | vnode_put(vp_with_iocount); | |
593 | vp_with_iocount = NULLVP; | |
594 | } | |
595 | if (vnode_getwithvid(vp, vid)) | |
596 | goto again; | |
597 | vp_with_iocount = vp; | |
598 | } | |
599 | VATTR_INIT(&va); | |
600 | VATTR_WANTED(&va, va_parentid); | |
601 | ||
602 | if (fixhardlink) { | |
603 | VATTR_WANTED(&va, va_name); | |
604 | MALLOC_ZONE(va.va_name, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); | |
605 | } else { | |
606 | va.va_name = NULL; | |
607 | } | |
608 | /* | |
609 | * Ask the file system for its parent id and for its name (optional). | |
610 | */ | |
611 | ret = vnode_getattr(vp, &va, ctx); | |
612 | ||
613 | if (fixhardlink) { | |
614 | if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) { | |
615 | str = va.va_name; | |
616 | vnode_update_identity(vp, NULL, str, strlen(str), 0, VNODE_UPDATE_NAME); | |
617 | } else if (vp->v_name) { | |
618 | str = vp->v_name; | |
619 | ret = 0; | |
620 | } else { | |
621 | ret = ENOENT; | |
622 | goto bad_news; | |
623 | } | |
624 | len = strlen(str); | |
625 | ||
626 | /* | |
627 | * Check that there's enough space. | |
628 | */ | |
629 | if ((end - buff) < (len + 1)) { | |
630 | ret = ENOSPC; | |
631 | } else { | |
632 | /* Copy the name backwards. */ | |
633 | str += len; | |
634 | ||
635 | for (; len > 0; len--) { | |
636 | *--end = *--str; | |
637 | } | |
638 | /* | |
639 | * Add a path separator. | |
640 | */ | |
641 | *--end = '/'; | |
642 | } | |
643 | bad_news: | |
644 | FREE_ZONE(va.va_name, MAXPATHLEN, M_NAMEI); | |
645 | } | |
646 | if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) { | |
647 | ret = ENOENT; | |
648 | goto out; | |
649 | } | |
650 | /* | |
651 | * Ask the file system for the parent vnode. | |
652 | */ | |
653 | if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) | |
654 | goto out; | |
655 | ||
656 | if (!fixhardlink && (vp->v_parent != dvp)) | |
657 | vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT); | |
658 | ||
659 | if (vp_with_iocount) | |
660 | vnode_put(vp_with_iocount); | |
661 | vp = dvp; | |
662 | vp_with_iocount = vp; | |
663 | ||
664 | NAME_CACHE_LOCK_SHARED(); | |
665 | ||
666 | /* | |
667 | * if the vnode we have in hand isn't a directory and it | |
668 | * has a v_parent, then we started with the resource fork | |
669 | * so skip up to avoid getting a duplicate copy of the | |
670 | * file name in the path. | |
671 | */ | |
672 | if (vp && !vnode_isdir(vp) && vp->v_parent) | |
673 | vp = vp->v_parent; | |
674 | } | |
675 | ||
676 | if (vp && (flags & BUILDPATH_CHECKACCESS)) { | |
677 | vid = vp->v_id; | |
678 | ||
679 | NAME_CACHE_UNLOCK(); | |
680 | ||
681 | if (vp != first_vp && vp != vp_with_iocount) { | |
682 | if (vp_with_iocount) { | |
683 | vnode_put(vp_with_iocount); | |
684 | vp_with_iocount = NULLVP; | |
685 | } | |
686 | if (vnode_getwithvid(vp, vid)) | |
687 | goto again; | |
688 | vp_with_iocount = vp; | |
689 | } | |
690 | if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) | |
691 | goto out; /* no peeking */ | |
692 | ||
693 | NAME_CACHE_LOCK_SHARED(); | |
694 | } | |
695 | ||
696 | /* | |
697 | * When a mount point is crossed switch the vp. | |
698 | * Continue until we find the root or we find | |
699 | * a vnode that's not the root of a mounted | |
700 | * file system. | |
701 | */ | |
702 | tvp = vp; | |
703 | ||
704 | while (tvp) { | |
705 | if (tvp == proc_root_dir_vp) | |
706 | goto out_unlock; /* encountered the root */ | |
707 | ||
708 | if (!(tvp->v_flag & VROOT) || !tvp->v_mount) | |
709 | break; /* not the root of a mounted FS */ | |
710 | ||
711 | if (flags & BUILDPATH_VOLUME_RELATIVE) { | |
712 | /* Do not cross over mount points */ | |
713 | tvp = NULL; | |
714 | } else { | |
715 | tvp = tvp->v_mount->mnt_vnodecovered; | |
716 | } | |
717 | } | |
718 | if (tvp == NULLVP) | |
719 | goto out_unlock; | |
720 | vp = tvp; | |
721 | } | |
722 | out_unlock: | |
723 | NAME_CACHE_UNLOCK(); | |
724 | out: | |
725 | if (vp_with_iocount) | |
726 | vnode_put(vp_with_iocount); | |
727 | /* | |
728 | * Slide the name down to the beginning of the buffer. | |
729 | */ | |
730 | memmove(buff, end, &buff[buflen] - end); | |
731 | ||
732 | /* | |
733 | * length includes the trailing zero byte | |
734 | */ | |
735 | *outlen = &buff[buflen] - end; | |
736 | ||
737 | /* One of the parents was moved during path reconstruction. | |
738 | * The caller is interested in knowing whether any of the | |
739 | * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN. | |
740 | */ | |
741 | if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) { | |
742 | ret = EAGAIN; | |
743 | } | |
744 | ||
745 | return (ret); | |
746 | } | |
747 | ||
748 | ||
749 | /* | |
750 | * return NULLVP if vp's parent doesn't | |
751 | * exist, or we can't get a valid iocount | |
752 | * else return the parent of vp | |
753 | */ | |
754 | vnode_t | |
755 | vnode_getparent(vnode_t vp) | |
756 | { | |
757 | vnode_t pvp = NULLVP; | |
758 | int pvid; | |
759 | ||
760 | NAME_CACHE_LOCK_SHARED(); | |
761 | /* | |
762 | * v_parent is stable behind the name_cache lock | |
763 | * however, the only thing we can really guarantee | |
764 | * is that we've grabbed a valid iocount on the | |
765 | * parent of 'vp' at the time we took the name_cache lock... | |
766 | * once we drop the lock, vp could get re-parented | |
767 | */ | |
768 | if ( (pvp = vp->v_parent) != NULLVP ) { | |
769 | pvid = pvp->v_id; | |
770 | ||
771 | NAME_CACHE_UNLOCK(); | |
772 | ||
773 | if (vnode_getwithvid(pvp, pvid) != 0) | |
774 | pvp = NULL; | |
775 | } else | |
776 | NAME_CACHE_UNLOCK(); | |
777 | return (pvp); | |
778 | } | |
779 | ||
780 | const char * | |
781 | vnode_getname(vnode_t vp) | |
782 | { | |
783 | const char *name = NULL; | |
784 | ||
785 | NAME_CACHE_LOCK_SHARED(); | |
786 | ||
787 | if (vp->v_name) | |
788 | name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0); | |
789 | NAME_CACHE_UNLOCK(); | |
790 | ||
791 | return (name); | |
792 | } | |
793 | ||
794 | void | |
795 | vnode_putname(const char *name) | |
796 | { | |
797 | vfs_removename(name); | |
798 | } | |
799 | ||
800 | static const char unknown_vnodename[] = "(unknown vnode name)"; | |
801 | ||
802 | const char * | |
803 | vnode_getname_printable(vnode_t vp) | |
804 | { | |
805 | const char *name = vnode_getname(vp); | |
806 | if (name != NULL) | |
807 | return name; | |
808 | ||
809 | switch (vp->v_type) { | |
810 | case VCHR: | |
811 | case VBLK: | |
812 | { | |
813 | /* | |
814 | * Create an artificial dev name from | |
815 | * major and minor device number | |
816 | */ | |
817 | char dev_name[64]; | |
818 | (void) snprintf(dev_name, sizeof(dev_name), | |
819 | "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b', | |
820 | major(vp->v_rdev), minor(vp->v_rdev)); | |
821 | /* | |
822 | * Add the newly created dev name to the name | |
823 | * cache to allow easier cleanup. Also, | |
824 | * vfs_addname allocates memory for the new name | |
825 | * and returns it. | |
826 | */ | |
827 | NAME_CACHE_LOCK_SHARED(); | |
828 | name = vfs_addname(dev_name, strlen(dev_name), 0, 0); | |
829 | NAME_CACHE_UNLOCK(); | |
830 | return name; | |
831 | } | |
832 | default: | |
833 | return unknown_vnodename; | |
834 | } | |
835 | } | |
836 | ||
837 | void | |
838 | vnode_putname_printable(const char *name) | |
839 | { | |
840 | if (name == unknown_vnodename) | |
841 | return; | |
842 | vnode_putname(name); | |
843 | } | |
844 | ||
845 | ||
846 | /* | |
847 | * if VNODE_UPDATE_PARENT, and we can take | |
848 | * a reference on dvp, then update vp with | |
849 | * it's new parent... if vp already has a parent, | |
850 | * then drop the reference vp held on it | |
851 | * | |
852 | * if VNODE_UPDATE_NAME, | |
853 | * then drop string ref on v_name if it exists, and if name is non-NULL | |
854 | * then pick up a string reference on name and record it in v_name... | |
855 | * optionally pass in the length and hashval of name if known | |
856 | * | |
857 | * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp | |
858 | */ | |
859 | void | |
860 | vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags) | |
861 | { | |
862 | struct namecache *ncp; | |
863 | vnode_t old_parentvp = NULLVP; | |
864 | #if NAMEDSTREAMS | |
865 | int isstream = (vp->v_flag & VISNAMEDSTREAM); | |
866 | int kusecountbumped = 0; | |
867 | #endif | |
868 | kauth_cred_t tcred = NULL; | |
869 | const char *vname = NULL; | |
870 | const char *tname = NULL; | |
871 | ||
872 | if (flags & VNODE_UPDATE_PARENT) { | |
873 | if (dvp && vnode_ref(dvp) != 0) { | |
874 | dvp = NULLVP; | |
875 | } | |
876 | #if NAMEDSTREAMS | |
877 | /* Don't count a stream's parent ref during unmounts */ | |
878 | if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) { | |
879 | vnode_lock_spin(dvp); | |
880 | ++dvp->v_kusecount; | |
881 | kusecountbumped = 1; | |
882 | vnode_unlock(dvp); | |
883 | } | |
884 | #endif | |
885 | } else { | |
886 | dvp = NULLVP; | |
887 | } | |
888 | if ( (flags & VNODE_UPDATE_NAME) ) { | |
889 | if (name != vp->v_name) { | |
890 | if (name && *name) { | |
891 | if (name_len == 0) | |
892 | name_len = strlen(name); | |
893 | tname = vfs_addname(name, name_len, name_hashval, 0); | |
894 | } | |
895 | } else | |
896 | flags &= ~VNODE_UPDATE_NAME; | |
897 | } | |
898 | if ( (flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME)) ) { | |
899 | ||
900 | NAME_CACHE_LOCK(); | |
901 | ||
902 | if ( (flags & VNODE_UPDATE_PURGE) ) { | |
903 | ||
904 | if (vp->v_parent) | |
905 | vp->v_parent->v_nc_generation++; | |
906 | ||
907 | while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) | |
908 | cache_delete(ncp, 1); | |
909 | ||
910 | while ( (ncp = TAILQ_FIRST(&vp->v_ncchildren)) ) | |
911 | cache_delete(ncp, 1); | |
912 | ||
913 | /* | |
914 | * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held | |
915 | */ | |
916 | tcred = vp->v_cred; | |
917 | vp->v_cred = NOCRED; | |
918 | vp->v_authorized_actions = 0; | |
919 | vp->v_cred_timestamp = 0; | |
920 | } | |
921 | if ( (flags & VNODE_UPDATE_NAME) ) { | |
922 | vname = vp->v_name; | |
923 | vp->v_name = tname; | |
924 | } | |
925 | if (flags & VNODE_UPDATE_PARENT) { | |
926 | if (dvp != vp && dvp != vp->v_parent) { | |
927 | old_parentvp = vp->v_parent; | |
928 | vp->v_parent = dvp; | |
929 | dvp = NULLVP; | |
930 | ||
931 | if (old_parentvp) | |
932 | flags |= VNODE_UPDATE_CACHE; | |
933 | } | |
934 | } | |
935 | if (flags & VNODE_UPDATE_CACHE) { | |
936 | while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) | |
937 | cache_delete(ncp, 1); | |
938 | } | |
939 | NAME_CACHE_UNLOCK(); | |
940 | ||
941 | if (vname != NULL) | |
942 | vfs_removename(vname); | |
943 | ||
944 | if (IS_VALID_CRED(tcred)) | |
945 | kauth_cred_unref(&tcred); | |
946 | } | |
947 | if (dvp != NULLVP) { | |
948 | #if NAMEDSTREAMS | |
949 | /* Back-out the ref we took if we lost a race for vp->v_parent. */ | |
950 | if (kusecountbumped) { | |
951 | vnode_lock_spin(dvp); | |
952 | if (dvp->v_kusecount > 0) | |
953 | --dvp->v_kusecount; | |
954 | vnode_unlock(dvp); | |
955 | } | |
956 | #endif | |
957 | vnode_rele(dvp); | |
958 | } | |
959 | if (old_parentvp) { | |
960 | struct uthread *ut; | |
961 | ||
962 | #if NAMEDSTREAMS | |
963 | if (isstream) { | |
964 | vnode_lock_spin(old_parentvp); | |
965 | if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) | |
966 | --old_parentvp->v_kusecount; | |
967 | vnode_unlock(old_parentvp); | |
968 | } | |
969 | #endif | |
970 | ut = get_bsdthread_info(current_thread()); | |
971 | ||
972 | /* | |
973 | * indicated to vnode_rele that it shouldn't do a | |
974 | * vnode_reclaim at this time... instead it will | |
975 | * chain the vnode to the uu_vreclaims list... | |
976 | * we'll be responsible for calling vnode_reclaim | |
977 | * on each of the vnodes in this list... | |
978 | */ | |
979 | ut->uu_defer_reclaims = 1; | |
980 | ut->uu_vreclaims = NULLVP; | |
981 | ||
982 | while ( (vp = old_parentvp) != NULLVP ) { | |
983 | ||
984 | vnode_lock_spin(vp); | |
985 | vnode_rele_internal(vp, 0, 0, 1); | |
986 | ||
987 | /* | |
988 | * check to see if the vnode is now in the state | |
989 | * that would have triggered a vnode_reclaim in vnode_rele | |
990 | * if it is, we save it's parent pointer and then NULL | |
991 | * out the v_parent field... we'll drop the reference | |
992 | * that was held on the next iteration of this loop... | |
993 | * this short circuits a potential deep recursion if we | |
994 | * have a long chain of parents in this state... | |
995 | * we'll sit in this loop until we run into | |
996 | * a parent in this chain that is not in this state | |
997 | * | |
998 | * make our check and the vnode_rele atomic | |
999 | * with respect to the current vnode we're working on | |
1000 | * by holding the vnode lock | |
1001 | * if vnode_rele deferred the vnode_reclaim and has put | |
1002 | * this vnode on the list to be reaped by us, than | |
1003 | * it has left this vnode with an iocount == 1 | |
1004 | */ | |
1005 | if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && | |
1006 | ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { | |
1007 | /* | |
1008 | * vnode_rele wanted to do a vnode_reclaim on this vnode | |
1009 | * it should be sitting on the head of the uu_vreclaims chain | |
1010 | * pull the parent pointer now so that when we do the | |
1011 | * vnode_reclaim for each of the vnodes in the uu_vreclaims | |
1012 | * list, we won't recurse back through here | |
1013 | * | |
1014 | * need to do a convert here in case vnode_rele_internal | |
1015 | * returns with the lock held in the spin mode... it | |
1016 | * can drop and retake the lock under certain circumstances | |
1017 | */ | |
1018 | vnode_lock_convert(vp); | |
1019 | ||
1020 | NAME_CACHE_LOCK(); | |
1021 | old_parentvp = vp->v_parent; | |
1022 | vp->v_parent = NULLVP; | |
1023 | NAME_CACHE_UNLOCK(); | |
1024 | } else { | |
1025 | /* | |
1026 | * we're done... we ran into a vnode that isn't | |
1027 | * being terminated | |
1028 | */ | |
1029 | old_parentvp = NULLVP; | |
1030 | } | |
1031 | vnode_unlock(vp); | |
1032 | } | |
1033 | ut->uu_defer_reclaims = 0; | |
1034 | ||
1035 | while ( (vp = ut->uu_vreclaims) != NULLVP) { | |
1036 | ut->uu_vreclaims = vp->v_defer_reclaimlist; | |
1037 | ||
1038 | /* | |
1039 | * vnode_put will drive the vnode_reclaim if | |
1040 | * we are still the only reference on this vnode | |
1041 | */ | |
1042 | vnode_put(vp); | |
1043 | } | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | ||
1048 | /* | |
1049 | * Mark a vnode as having multiple hard links. HFS makes use of this | |
1050 | * because it keeps track of each link separately, and wants to know | |
1051 | * which link was actually used. | |
1052 | * | |
1053 | * This will cause the name cache to force a VNOP_LOOKUP on the vnode | |
1054 | * so that HFS can post-process the lookup. Also, volfs will call | |
1055 | * VNOP_GETATTR2 to determine the parent, instead of using v_parent. | |
1056 | */ | |
1057 | void vnode_setmultipath(vnode_t vp) | |
1058 | { | |
1059 | vnode_lock_spin(vp); | |
1060 | ||
1061 | /* | |
1062 | * In theory, we're changing the vnode's identity as far as the | |
1063 | * name cache is concerned, so we ought to grab the name cache lock | |
1064 | * here. However, there is already a race, and grabbing the name | |
1065 | * cache lock only makes the race window slightly smaller. | |
1066 | * | |
1067 | * The race happens because the vnode already exists in the name | |
1068 | * cache, and could be found by one thread before another thread | |
1069 | * can set the hard link flag. | |
1070 | */ | |
1071 | ||
1072 | vp->v_flag |= VISHARDLINK; | |
1073 | ||
1074 | vnode_unlock(vp); | |
1075 | } | |
1076 | ||
1077 | ||
1078 | ||
1079 | /* | |
1080 | * backwards compatibility | |
1081 | */ | |
1082 | void vnode_uncache_credentials(vnode_t vp) | |
1083 | { | |
1084 | vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); | |
1085 | } | |
1086 | ||
1087 | ||
1088 | /* | |
1089 | * use the exclusive form of NAME_CACHE_LOCK to protect the update of the | |
1090 | * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions | |
1091 | * we use this lock so that we can look at the v_cred and v_authorized_actions | |
1092 | * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path', | |
1093 | * which is the super-hot path... if we are updating the authorized actions for this | |
1094 | * vnode, we are already in the super-slow and far less frequented path so its not | |
1095 | * that bad that we take the lock exclusive for this case... of course we strive | |
1096 | * to hold it for the minimum amount of time possible | |
1097 | */ | |
1098 | ||
1099 | void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) | |
1100 | { | |
1101 | kauth_cred_t tcred = NOCRED; | |
1102 | ||
1103 | NAME_CACHE_LOCK(); | |
1104 | ||
1105 | vp->v_authorized_actions &= ~action; | |
1106 | ||
1107 | if (action == KAUTH_INVALIDATE_CACHED_RIGHTS && | |
1108 | IS_VALID_CRED(vp->v_cred)) { | |
1109 | /* | |
1110 | * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held | |
1111 | */ | |
1112 | tcred = vp->v_cred; | |
1113 | vp->v_cred = NOCRED; | |
1114 | } | |
1115 | NAME_CACHE_UNLOCK(); | |
1116 | ||
1117 | if (tcred != NOCRED) | |
1118 | kauth_cred_unref(&tcred); | |
1119 | } | |
1120 | ||
1121 | ||
1122 | extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */ | |
1123 | ||
1124 | boolean_t | |
1125 | vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action) | |
1126 | { | |
1127 | kauth_cred_t ucred; | |
1128 | boolean_t retval = FALSE; | |
1129 | ||
1130 | /* Boot argument to defeat rights caching */ | |
1131 | if (bootarg_vnode_cache_defeat) | |
1132 | return FALSE; | |
1133 | ||
1134 | if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { | |
1135 | /* | |
1136 | * a TTL is enabled on the rights cache... handle it here | |
1137 | * a TTL of 0 indicates that no rights should be cached | |
1138 | */ | |
1139 | if (vp->v_mount->mnt_authcache_ttl) { | |
1140 | if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { | |
1141 | /* | |
1142 | * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones), | |
1143 | * we will only allow a SEARCH right on a directory to be cached... | |
1144 | * that cached right always has a default TTL associated with it | |
1145 | */ | |
1146 | if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) | |
1147 | vp = NULLVP; | |
1148 | } | |
1149 | if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) { | |
1150 | vnode_uncache_authorized_action(vp, vp->v_authorized_actions); | |
1151 | vp = NULLVP; | |
1152 | } | |
1153 | } else | |
1154 | vp = NULLVP; | |
1155 | } | |
1156 | if (vp != NULLVP) { | |
1157 | ucred = vfs_context_ucred(ctx); | |
1158 | ||
1159 | NAME_CACHE_LOCK_SHARED(); | |
1160 | ||
1161 | if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) | |
1162 | retval = TRUE; | |
1163 | ||
1164 | NAME_CACHE_UNLOCK(); | |
1165 | } | |
1166 | return retval; | |
1167 | } | |
1168 | ||
1169 | ||
1170 | void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action) | |
1171 | { | |
1172 | kauth_cred_t tcred = NOCRED; | |
1173 | kauth_cred_t ucred; | |
1174 | struct timeval tv; | |
1175 | boolean_t ttl_active = FALSE; | |
1176 | ||
1177 | ucred = vfs_context_ucred(ctx); | |
1178 | ||
1179 | if (!IS_VALID_CRED(ucred) || action == 0) | |
1180 | return; | |
1181 | ||
1182 | if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { | |
1183 | /* | |
1184 | * a TTL is enabled on the rights cache... handle it here | |
1185 | * a TTL of 0 indicates that no rights should be cached | |
1186 | */ | |
1187 | if (vp->v_mount->mnt_authcache_ttl == 0) | |
1188 | return; | |
1189 | ||
1190 | if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { | |
1191 | /* | |
1192 | * only cache SEARCH action for filesystems marked | |
1193 | * MNTK_AUTH_OPAQUE on VDIRs... | |
1194 | * the lookup_path code will time these out | |
1195 | */ | |
1196 | if ( (action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR ) | |
1197 | return; | |
1198 | } | |
1199 | ttl_active = TRUE; | |
1200 | ||
1201 | microuptime(&tv); | |
1202 | } | |
1203 | NAME_CACHE_LOCK(); | |
1204 | ||
1205 | if (vp->v_cred != ucred) { | |
1206 | kauth_cred_ref(ucred); | |
1207 | /* | |
1208 | * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held | |
1209 | */ | |
1210 | tcred = vp->v_cred; | |
1211 | vp->v_cred = ucred; | |
1212 | vp->v_authorized_actions = 0; | |
1213 | } | |
1214 | if (ttl_active == TRUE && vp->v_authorized_actions == 0) { | |
1215 | /* | |
1216 | * only reset the timestamnp on the | |
1217 | * first authorization cached after the previous | |
1218 | * timer has expired or we're switching creds... | |
1219 | * 'vnode_cache_is_authorized' will clear the | |
1220 | * authorized actions if the TTL is active and | |
1221 | * it has expired | |
1222 | */ | |
1223 | vp->v_cred_timestamp = tv.tv_sec; | |
1224 | } | |
1225 | vp->v_authorized_actions |= action; | |
1226 | ||
1227 | NAME_CACHE_UNLOCK(); | |
1228 | ||
1229 | if (IS_VALID_CRED(tcred)) | |
1230 | kauth_cred_unref(&tcred); | |
1231 | } | |
1232 | ||
1233 | ||
1234 | boolean_t vnode_cache_is_stale(vnode_t vp) | |
1235 | { | |
1236 | struct timeval tv; | |
1237 | boolean_t retval; | |
1238 | ||
1239 | microuptime(&tv); | |
1240 | ||
1241 | if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) | |
1242 | retval = TRUE; | |
1243 | else | |
1244 | retval = FALSE; | |
1245 | ||
1246 | return retval; | |
1247 | } | |
1248 | ||
1249 | ||
1250 | ||
1251 | /* | |
1252 | * Returns: 0 Success | |
1253 | * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei. | |
1254 | * This errno value should not be seen by anyone outside of the kernel. | |
1255 | */ | |
1256 | int | |
1257 | cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, | |
1258 | vfs_context_t ctx, int *dp_authorized, vnode_t last_dp) | |
1259 | { | |
1260 | char *cp; /* pointer into pathname argument */ | |
1261 | int vid; | |
1262 | int vvid = 0; /* protected by vp != NULLVP */ | |
1263 | vnode_t vp = NULLVP; | |
1264 | vnode_t tdp = NULLVP; | |
1265 | kauth_cred_t ucred; | |
1266 | boolean_t ttl_enabled = FALSE; | |
1267 | struct timeval tv; | |
1268 | mount_t mp; | |
1269 | unsigned int hash; | |
1270 | int error = 0; | |
1271 | boolean_t dotdotchecked = FALSE; | |
1272 | ||
1273 | #if CONFIG_TRIGGERS | |
1274 | vnode_t trigger_vp; | |
1275 | #endif /* CONFIG_TRIGGERS */ | |
1276 | ||
1277 | ucred = vfs_context_ucred(ctx); | |
1278 | ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH); | |
1279 | ||
1280 | NAME_CACHE_LOCK_SHARED(); | |
1281 | ||
1282 | if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { | |
1283 | ttl_enabled = TRUE; | |
1284 | microuptime(&tv); | |
1285 | } | |
1286 | for (;;) { | |
1287 | /* | |
1288 | * Search a directory. | |
1289 | * | |
1290 | * The cn_hash value is for use by cache_lookup | |
1291 | * The last component of the filename is left accessible via | |
1292 | * cnp->cn_nameptr for callers that need the name. | |
1293 | */ | |
1294 | hash = 0; | |
1295 | cp = cnp->cn_nameptr; | |
1296 | ||
1297 | while (*cp && (*cp != '/')) { | |
1298 | hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; | |
1299 | } | |
1300 | /* | |
1301 | * the crc generator can legitimately generate | |
1302 | * a 0... however, 0 for us means that we | |
1303 | * haven't computed a hash, so use 1 instead | |
1304 | */ | |
1305 | if (hash == 0) | |
1306 | hash = 1; | |
1307 | cnp->cn_hash = hash; | |
1308 | cnp->cn_namelen = cp - cnp->cn_nameptr; | |
1309 | ||
1310 | ndp->ni_pathlen -= cnp->cn_namelen; | |
1311 | ndp->ni_next = cp; | |
1312 | ||
1313 | /* | |
1314 | * Replace multiple slashes by a single slash and trailing slashes | |
1315 | * by a null. This must be done before VNOP_LOOKUP() because some | |
1316 | * fs's don't know about trailing slashes. Remember if there were | |
1317 | * trailing slashes to handle symlinks, existing non-directories | |
1318 | * and non-existing files that won't be directories specially later. | |
1319 | */ | |
1320 | while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { | |
1321 | cp++; | |
1322 | ndp->ni_pathlen--; | |
1323 | ||
1324 | if (*cp == '\0') { | |
1325 | ndp->ni_flag |= NAMEI_TRAILINGSLASH; | |
1326 | *ndp->ni_next = '\0'; | |
1327 | } | |
1328 | } | |
1329 | ndp->ni_next = cp; | |
1330 | ||
1331 | cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT); | |
1332 | ||
1333 | if (*cp == '\0') | |
1334 | cnp->cn_flags |= ISLASTCN; | |
1335 | ||
1336 | if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') | |
1337 | cnp->cn_flags |= ISDOTDOT; | |
1338 | ||
1339 | *dp_authorized = 0; | |
1340 | #if NAMEDRSRCFORK | |
1341 | /* | |
1342 | * Process a request for a file's resource fork. | |
1343 | * | |
1344 | * Consume the _PATH_RSRCFORKSPEC suffix and tag the path. | |
1345 | */ | |
1346 | if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) && | |
1347 | (cp[1] == '.' && cp[2] == '.') && | |
1348 | bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) { | |
1349 | /* Skip volfs file systems that don't support native streams. */ | |
1350 | if ((dp->v_mount != NULL) && | |
1351 | (dp->v_mount->mnt_flag & MNT_DOVOLFS) && | |
1352 | (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { | |
1353 | goto skiprsrcfork; | |
1354 | } | |
1355 | cnp->cn_flags |= CN_WANTSRSRCFORK; | |
1356 | cnp->cn_flags |= ISLASTCN; | |
1357 | ndp->ni_next[0] = '\0'; | |
1358 | ndp->ni_pathlen = 1; | |
1359 | } | |
1360 | skiprsrcfork: | |
1361 | #endif | |
1362 | ||
1363 | #if CONFIG_MACF | |
1364 | ||
1365 | /* | |
1366 | * Name cache provides authorization caching (see below) | |
1367 | * that will short circuit MAC checks in lookup(). | |
1368 | * We must perform MAC check here. On denial | |
1369 | * dp_authorized will remain 0 and second check will | |
1370 | * be perfomed in lookup(). | |
1371 | */ | |
1372 | if (!(cnp->cn_flags & DONOTAUTH)) { | |
1373 | error = mac_vnode_check_lookup(ctx, dp, cnp); | |
1374 | if (error) { | |
1375 | NAME_CACHE_UNLOCK(); | |
1376 | goto errorout; | |
1377 | } | |
1378 | } | |
1379 | #endif /* MAC */ | |
1380 | if (ttl_enabled && | |
1381 | (dp->v_mount->mnt_authcache_ttl == 0 || | |
1382 | ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl))) { | |
1383 | break; | |
1384 | } | |
1385 | ||
1386 | /* | |
1387 | * NAME_CACHE_LOCK holds these fields stable | |
1388 | * | |
1389 | * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly | |
1390 | * so we make an ugly check for root here. root is always | |
1391 | * allowed and breaking out of here only to find out that is | |
1392 | * authorized by virtue of being root is very very expensive. | |
1393 | * However, the check for not root is valid only for filesystems | |
1394 | * which use local authorization. | |
1395 | * | |
1396 | * XXX: Remove the check for root when we can reliably set | |
1397 | * KAUTH_VNODE_SEARCHBYANYONE as root. | |
1398 | */ | |
1399 | if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) && | |
1400 | !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) && | |
1401 | (ttl_enabled || !vfs_context_issuser(ctx))) { | |
1402 | break; | |
1403 | } | |
1404 | ||
1405 | /* | |
1406 | * indicate that we're allowed to traverse this directory... | |
1407 | * even if we fail the cache lookup or decide to bail for | |
1408 | * some other reason, this information is valid and is used | |
1409 | * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP | |
1410 | */ | |
1411 | *dp_authorized = 1; | |
1412 | ||
1413 | if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { | |
1414 | if (cnp->cn_nameiop != LOOKUP) | |
1415 | break; | |
1416 | if (cnp->cn_flags & LOCKPARENT) | |
1417 | break; | |
1418 | if (cnp->cn_flags & NOCACHE) | |
1419 | break; | |
1420 | if (cnp->cn_flags & ISDOTDOT) { | |
1421 | /* | |
1422 | * Force directory hardlinks to go to | |
1423 | * file system for ".." requests. | |
1424 | */ | |
1425 | if (dp && (dp->v_flag & VISHARDLINK)) { | |
1426 | break; | |
1427 | } | |
1428 | /* | |
1429 | * Quit here only if we can't use | |
1430 | * the parent directory pointer or | |
1431 | * don't have one. Otherwise, we'll | |
1432 | * use it below. | |
1433 | */ | |
1434 | if ((dp->v_flag & VROOT) || | |
1435 | dp == ndp->ni_rootdir || | |
1436 | dp->v_parent == NULLVP) | |
1437 | break; | |
1438 | } | |
1439 | } | |
1440 | ||
1441 | if ((cnp->cn_flags & CN_SKIPNAMECACHE)) { | |
1442 | /* | |
1443 | * Force lookup to go to the filesystem with | |
1444 | * all cnp fields set up. | |
1445 | */ | |
1446 | break; | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * "." and ".." aren't supposed to be cached, so check | |
1451 | * for them before checking the cache. | |
1452 | */ | |
1453 | if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') | |
1454 | vp = dp; | |
1455 | else if ( (cnp->cn_flags & ISDOTDOT) ) { | |
1456 | /* | |
1457 | * If this is a chrooted process, we need to check if | |
1458 | * the process is trying to break out of its chrooted | |
1459 | * jail. We do that by trying to determine if dp is | |
1460 | * a subdirectory of ndp->ni_rootdir. If we aren't | |
1461 | * able to determine that by the v_parent pointers, we | |
1462 | * will leave the fast path. | |
1463 | * | |
1464 | * Since this function may see dotdot components | |
1465 | * many times and it has the name cache lock held for | |
1466 | * the entire duration, we optimise this by doing this | |
1467 | * check only once per cache_lookup_path call. | |
1468 | * If dotdotchecked is set, it means we've done this | |
1469 | * check once already and don't need to do it again. | |
1470 | */ | |
1471 | if (!dotdotchecked && (ndp->ni_rootdir != rootvnode)) { | |
1472 | vnode_t tvp = dp; | |
1473 | boolean_t defer = FALSE; | |
1474 | boolean_t is_subdir = FALSE; | |
1475 | ||
1476 | defer = cache_check_vnode_issubdir(tvp, | |
1477 | ndp->ni_rootdir, &is_subdir, &tvp); | |
1478 | ||
1479 | if (defer) { | |
1480 | /* defer to Filesystem */ | |
1481 | break; | |
1482 | } else if (!is_subdir) { | |
1483 | /* | |
1484 | * This process is trying to break out | |
1485 | * of its chrooted jail, so all its | |
1486 | * dotdot accesses will be translated to | |
1487 | * its root directory. | |
1488 | */ | |
1489 | vp = ndp->ni_rootdir; | |
1490 | } else { | |
1491 | /* | |
1492 | * All good, let this dotdot access | |
1493 | * proceed normally | |
1494 | */ | |
1495 | vp = dp->v_parent; | |
1496 | } | |
1497 | dotdotchecked = TRUE; | |
1498 | } else { | |
1499 | vp = dp->v_parent; | |
1500 | } | |
1501 | } else { | |
1502 | if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) | |
1503 | break; | |
1504 | ||
1505 | if ( (vp->v_flag & VISHARDLINK) ) { | |
1506 | /* | |
1507 | * The file system wants a VNOP_LOOKUP on this vnode | |
1508 | */ | |
1509 | vp = NULL; | |
1510 | break; | |
1511 | } | |
1512 | } | |
1513 | if ( (cnp->cn_flags & ISLASTCN) ) | |
1514 | break; | |
1515 | ||
1516 | if (vp->v_type != VDIR) { | |
1517 | if (vp->v_type != VLNK) | |
1518 | vp = NULL; | |
1519 | break; | |
1520 | } | |
1521 | ||
1522 | if ( (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) { | |
1523 | vnode_t tmp_vp = mp->mnt_realrootvp; | |
1524 | if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation || | |
1525 | mp->mnt_realrootvp_vid != tmp_vp->v_id) | |
1526 | break; | |
1527 | vp = tmp_vp; | |
1528 | } | |
1529 | ||
1530 | #if CONFIG_TRIGGERS | |
1531 | /* | |
1532 | * After traversing all mountpoints stacked here, if we have a | |
1533 | * trigger in hand, resolve it. Note that we don't need to | |
1534 | * leave the fast path if the mount has already happened. | |
1535 | */ | |
1536 | if (vp->v_resolve) | |
1537 | break; | |
1538 | #endif /* CONFIG_TRIGGERS */ | |
1539 | ||
1540 | ||
1541 | dp = vp; | |
1542 | vp = NULLVP; | |
1543 | ||
1544 | cnp->cn_nameptr = ndp->ni_next + 1; | |
1545 | ndp->ni_pathlen--; | |
1546 | while (*cnp->cn_nameptr == '/') { | |
1547 | cnp->cn_nameptr++; | |
1548 | ndp->ni_pathlen--; | |
1549 | } | |
1550 | } | |
1551 | if (vp != NULLVP) | |
1552 | vvid = vp->v_id; | |
1553 | vid = dp->v_id; | |
1554 | ||
1555 | NAME_CACHE_UNLOCK(); | |
1556 | ||
1557 | if ((vp != NULLVP) && (vp->v_type != VLNK) && | |
1558 | ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) { | |
1559 | /* | |
1560 | * if we've got a child and it's the last component, and | |
1561 | * the lookup doesn't need to return the parent then we | |
1562 | * can skip grabbing an iocount on the parent, since all | |
1563 | * we're going to do with it is a vnode_put just before | |
1564 | * we return from 'lookup'. If it's a symbolic link, | |
1565 | * we need the parent in case the link happens to be | |
1566 | * a relative pathname. | |
1567 | */ | |
1568 | tdp = dp; | |
1569 | dp = NULLVP; | |
1570 | } else { | |
1571 | need_dp: | |
1572 | /* | |
1573 | * return the last directory we looked at | |
1574 | * with an io reference held. If it was the one passed | |
1575 | * in as a result of the last iteration of VNOP_LOOKUP, | |
1576 | * it should already hold an io ref. No need to increase ref. | |
1577 | */ | |
1578 | if (last_dp != dp){ | |
1579 | ||
1580 | if (dp == ndp->ni_usedvp) { | |
1581 | /* | |
1582 | * if this vnode matches the one passed in via USEDVP | |
1583 | * than this context already holds an io_count... just | |
1584 | * use vnode_get to get an extra ref for lookup to play | |
1585 | * with... can't use the getwithvid variant here because | |
1586 | * it will block behind a vnode_drain which would result | |
1587 | * in a deadlock (since we already own an io_count that the | |
1588 | * vnode_drain is waiting on)... vnode_get grabs the io_count | |
1589 | * immediately w/o waiting... it always succeeds | |
1590 | */ | |
1591 | vnode_get(dp); | |
1592 | } else if ((error = vnode_getwithvid_drainok(dp, vid))) { | |
1593 | /* | |
1594 | * failure indicates the vnode | |
1595 | * changed identity or is being | |
1596 | * TERMINATED... in either case | |
1597 | * punt this lookup. | |
1598 | * | |
1599 | * don't necessarily return ENOENT, though, because | |
1600 | * we really want to go back to disk and make sure it's | |
1601 | * there or not if someone else is changing this | |
1602 | * vnode. That being said, the one case where we do want | |
1603 | * to return ENOENT is when the vnode's mount point is | |
1604 | * in the process of unmounting and we might cause a deadlock | |
1605 | * in our attempt to take an iocount. An ENODEV error return | |
1606 | * is from vnode_get* is an indication this but we change that | |
1607 | * ENOENT for upper layers. | |
1608 | */ | |
1609 | if (error == ENODEV) { | |
1610 | error = ENOENT; | |
1611 | } else { | |
1612 | error = ERECYCLE; | |
1613 | } | |
1614 | goto errorout; | |
1615 | } | |
1616 | } | |
1617 | } | |
1618 | if (vp != NULLVP) { | |
1619 | if ( (vnode_getwithvid_drainok(vp, vvid)) ) { | |
1620 | vp = NULLVP; | |
1621 | ||
1622 | /* | |
1623 | * can't get reference on the vp we'd like | |
1624 | * to return... if we didn't grab a reference | |
1625 | * on the directory (due to fast path bypass), | |
1626 | * then we need to do it now... we can't return | |
1627 | * with both ni_dvp and ni_vp NULL, and no | |
1628 | * error condition | |
1629 | */ | |
1630 | if (dp == NULLVP) { | |
1631 | dp = tdp; | |
1632 | goto need_dp; | |
1633 | } | |
1634 | } | |
1635 | } | |
1636 | ||
1637 | ndp->ni_dvp = dp; | |
1638 | ndp->ni_vp = vp; | |
1639 | ||
1640 | #if CONFIG_TRIGGERS | |
1641 | trigger_vp = vp ? vp : dp; | |
1642 | if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) { | |
1643 | error = vnode_trigger_resolve(trigger_vp, ndp, ctx); | |
1644 | if (error) { | |
1645 | if (vp) | |
1646 | vnode_put(vp); | |
1647 | if (dp) | |
1648 | vnode_put(dp); | |
1649 | goto errorout; | |
1650 | } | |
1651 | } | |
1652 | #endif /* CONFIG_TRIGGERS */ | |
1653 | ||
1654 | errorout: | |
1655 | /* | |
1656 | * If we came into cache_lookup_path after an iteration of the lookup loop that | |
1657 | * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref | |
1658 | * on it. It is now the job of cache_lookup_path to drop the ref on this vnode | |
1659 | * when it is no longer needed. If we get to this point, and last_dp is not NULL | |
1660 | * and it is ALSO not the dvp we want to return to caller of this function, it MUST be | |
1661 | * the case that we got to a subsequent path component and this previous vnode is | |
1662 | * no longer needed. We can then drop the io ref on it. | |
1663 | */ | |
1664 | if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)){ | |
1665 | vnode_put(last_dp); | |
1666 | } | |
1667 | ||
1668 | //initialized to 0, should be the same if no error cases occurred. | |
1669 | return error; | |
1670 | } | |
1671 | ||
1672 | ||
1673 | static vnode_t | |
1674 | cache_lookup_locked(vnode_t dvp, struct componentname *cnp) | |
1675 | { | |
1676 | struct namecache *ncp; | |
1677 | struct nchashhead *ncpp; | |
1678 | long namelen = cnp->cn_namelen; | |
1679 | unsigned int hashval = cnp->cn_hash; | |
1680 | ||
1681 | if (nc_disabled) { | |
1682 | return NULL; | |
1683 | } | |
1684 | ||
1685 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
1686 | LIST_FOREACH(ncp, ncpp, nc_hash) { | |
1687 | if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { | |
1688 | if (memcmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) | |
1689 | break; | |
1690 | } | |
1691 | } | |
1692 | if (ncp == 0) { | |
1693 | /* | |
1694 | * We failed to find an entry | |
1695 | */ | |
1696 | NCHSTAT(ncs_miss); | |
1697 | return (NULL); | |
1698 | } | |
1699 | NCHSTAT(ncs_goodhits); | |
1700 | ||
1701 | return (ncp->nc_vp); | |
1702 | } | |
1703 | ||
1704 | ||
1705 | unsigned int hash_string(const char *cp, int len); | |
1706 | // | |
1707 | // Have to take a len argument because we may only need to | |
1708 | // hash part of a componentname. | |
1709 | // | |
1710 | unsigned int | |
1711 | hash_string(const char *cp, int len) | |
1712 | { | |
1713 | unsigned hash = 0; | |
1714 | ||
1715 | if (len) { | |
1716 | while (len--) { | |
1717 | hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; | |
1718 | } | |
1719 | } else { | |
1720 | while (*cp != '\0') { | |
1721 | hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; | |
1722 | } | |
1723 | } | |
1724 | /* | |
1725 | * the crc generator can legitimately generate | |
1726 | * a 0... however, 0 for us means that we | |
1727 | * haven't computed a hash, so use 1 instead | |
1728 | */ | |
1729 | if (hash == 0) | |
1730 | hash = 1; | |
1731 | return hash; | |
1732 | } | |
1733 | ||
1734 | ||
1735 | /* | |
1736 | * Lookup an entry in the cache | |
1737 | * | |
1738 | * We don't do this if the segment name is long, simply so the cache | |
1739 | * can avoid holding long names (which would either waste space, or | |
1740 | * add greatly to the complexity). | |
1741 | * | |
1742 | * Lookup is called with dvp pointing to the directory to search, | |
1743 | * cnp pointing to the name of the entry being sought. If the lookup | |
1744 | * succeeds, the vnode is returned in *vpp, and a status of -1 is | |
1745 | * returned. If the lookup determines that the name does not exist | |
1746 | * (negative cacheing), a status of ENOENT is returned. If the lookup | |
1747 | * fails, a status of zero is returned. | |
1748 | */ | |
1749 | ||
1750 | int | |
1751 | cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) | |
1752 | { | |
1753 | struct namecache *ncp; | |
1754 | struct nchashhead *ncpp; | |
1755 | long namelen = cnp->cn_namelen; | |
1756 | unsigned int hashval; | |
1757 | boolean_t have_exclusive = FALSE; | |
1758 | uint32_t vid; | |
1759 | vnode_t vp; | |
1760 | ||
1761 | if (cnp->cn_hash == 0) | |
1762 | cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); | |
1763 | hashval = cnp->cn_hash; | |
1764 | ||
1765 | if (nc_disabled) { | |
1766 | return 0; | |
1767 | } | |
1768 | ||
1769 | NAME_CACHE_LOCK_SHARED(); | |
1770 | ||
1771 | relook: | |
1772 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
1773 | LIST_FOREACH(ncp, ncpp, nc_hash) { | |
1774 | if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { | |
1775 | if (memcmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) | |
1776 | break; | |
1777 | } | |
1778 | } | |
1779 | /* We failed to find an entry */ | |
1780 | if (ncp == 0) { | |
1781 | NCHSTAT(ncs_miss); | |
1782 | NAME_CACHE_UNLOCK(); | |
1783 | return (0); | |
1784 | } | |
1785 | ||
1786 | /* We don't want to have an entry, so dump it */ | |
1787 | if ((cnp->cn_flags & MAKEENTRY) == 0) { | |
1788 | if (have_exclusive == TRUE) { | |
1789 | NCHSTAT(ncs_badhits); | |
1790 | cache_delete(ncp, 1); | |
1791 | NAME_CACHE_UNLOCK(); | |
1792 | return (0); | |
1793 | } | |
1794 | NAME_CACHE_UNLOCK(); | |
1795 | NAME_CACHE_LOCK(); | |
1796 | have_exclusive = TRUE; | |
1797 | goto relook; | |
1798 | } | |
1799 | vp = ncp->nc_vp; | |
1800 | ||
1801 | /* We found a "positive" match, return the vnode */ | |
1802 | if (vp) { | |
1803 | NCHSTAT(ncs_goodhits); | |
1804 | ||
1805 | vid = vp->v_id; | |
1806 | NAME_CACHE_UNLOCK(); | |
1807 | ||
1808 | if (vnode_getwithvid(vp, vid)) { | |
1809 | #if COLLECT_STATS | |
1810 | NAME_CACHE_LOCK(); | |
1811 | NCHSTAT(ncs_badvid); | |
1812 | NAME_CACHE_UNLOCK(); | |
1813 | #endif | |
1814 | return (0); | |
1815 | } | |
1816 | *vpp = vp; | |
1817 | return (-1); | |
1818 | } | |
1819 | ||
1820 | /* We found a negative match, and want to create it, so purge */ | |
1821 | if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) { | |
1822 | if (have_exclusive == TRUE) { | |
1823 | NCHSTAT(ncs_badhits); | |
1824 | cache_delete(ncp, 1); | |
1825 | NAME_CACHE_UNLOCK(); | |
1826 | return (0); | |
1827 | } | |
1828 | NAME_CACHE_UNLOCK(); | |
1829 | NAME_CACHE_LOCK(); | |
1830 | have_exclusive = TRUE; | |
1831 | goto relook; | |
1832 | } | |
1833 | ||
1834 | /* | |
1835 | * We found a "negative" match, ENOENT notifies client of this match. | |
1836 | */ | |
1837 | NCHSTAT(ncs_neghits); | |
1838 | ||
1839 | NAME_CACHE_UNLOCK(); | |
1840 | return (ENOENT); | |
1841 | } | |
1842 | ||
1843 | const char * | |
1844 | cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) | |
1845 | { | |
1846 | const char *strname; | |
1847 | ||
1848 | if (cnp->cn_hash == 0) | |
1849 | cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); | |
1850 | ||
1851 | /* | |
1852 | * grab 2 references on the string entered | |
1853 | * one for the cache_enter_locked to consume | |
1854 | * and the second to be consumed by v_name (vnode_create call point) | |
1855 | */ | |
1856 | strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0); | |
1857 | ||
1858 | NAME_CACHE_LOCK(); | |
1859 | ||
1860 | cache_enter_locked(dvp, vp, cnp, strname); | |
1861 | ||
1862 | NAME_CACHE_UNLOCK(); | |
1863 | ||
1864 | return (strname); | |
1865 | } | |
1866 | ||
1867 | ||
1868 | /* | |
1869 | * Add an entry to the cache... | |
1870 | * but first check to see if the directory | |
1871 | * that this entry is to be associated with has | |
1872 | * had any cache_purges applied since we took | |
1873 | * our identity snapshot... this check needs to | |
1874 | * be done behind the name cache lock | |
1875 | */ | |
1876 | void | |
1877 | cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen) | |
1878 | { | |
1879 | ||
1880 | if (cnp->cn_hash == 0) | |
1881 | cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); | |
1882 | ||
1883 | NAME_CACHE_LOCK(); | |
1884 | ||
1885 | if (dvp->v_nc_generation == gen) | |
1886 | (void)cache_enter_locked(dvp, vp, cnp, NULL); | |
1887 | ||
1888 | NAME_CACHE_UNLOCK(); | |
1889 | } | |
1890 | ||
1891 | ||
1892 | /* | |
1893 | * Add an entry to the cache. | |
1894 | */ | |
1895 | void | |
1896 | cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) | |
1897 | { | |
1898 | const char *strname; | |
1899 | ||
1900 | if (cnp->cn_hash == 0) | |
1901 | cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); | |
1902 | ||
1903 | /* | |
1904 | * grab 1 reference on the string entered | |
1905 | * for the cache_enter_locked to consume | |
1906 | */ | |
1907 | strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); | |
1908 | ||
1909 | NAME_CACHE_LOCK(); | |
1910 | ||
1911 | cache_enter_locked(dvp, vp, cnp, strname); | |
1912 | ||
1913 | NAME_CACHE_UNLOCK(); | |
1914 | } | |
1915 | ||
1916 | ||
1917 | static void | |
1918 | cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname) | |
1919 | { | |
1920 | struct namecache *ncp, *negp; | |
1921 | struct nchashhead *ncpp; | |
1922 | ||
1923 | if (nc_disabled) | |
1924 | return; | |
1925 | ||
1926 | /* | |
1927 | * if the entry is for -ve caching vp is null | |
1928 | */ | |
1929 | if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) { | |
1930 | /* | |
1931 | * someone beat us to the punch.. | |
1932 | * this vnode is already in the cache | |
1933 | */ | |
1934 | if (strname != NULL) | |
1935 | vfs_removename(strname); | |
1936 | return; | |
1937 | } | |
1938 | /* | |
1939 | * We allocate a new entry if we are less than the maximum | |
1940 | * allowed and the one at the front of the list is in use. | |
1941 | * Otherwise we use the one at the front of the list. | |
1942 | */ | |
1943 | if (numcache < desiredNodes && | |
1944 | ((ncp = nchead.tqh_first) == NULL || | |
1945 | ncp->nc_hash.le_prev != 0)) { | |
1946 | /* | |
1947 | * Allocate one more entry | |
1948 | */ | |
1949 | ncp = (struct namecache *)_MALLOC_ZONE(sizeof(*ncp), M_CACHE, M_WAITOK); | |
1950 | numcache++; | |
1951 | } else { | |
1952 | /* | |
1953 | * reuse an old entry | |
1954 | */ | |
1955 | ncp = TAILQ_FIRST(&nchead); | |
1956 | TAILQ_REMOVE(&nchead, ncp, nc_entry); | |
1957 | ||
1958 | if (ncp->nc_hash.le_prev != 0) { | |
1959 | /* | |
1960 | * still in use... we need to | |
1961 | * delete it before re-using it | |
1962 | */ | |
1963 | NCHSTAT(ncs_stolen); | |
1964 | cache_delete(ncp, 0); | |
1965 | } | |
1966 | } | |
1967 | NCHSTAT(ncs_enters); | |
1968 | ||
1969 | /* | |
1970 | * Fill in cache info, if vp is NULL this is a "negative" cache entry. | |
1971 | */ | |
1972 | ncp->nc_vp = vp; | |
1973 | ncp->nc_dvp = dvp; | |
1974 | ncp->nc_hashval = cnp->cn_hash; | |
1975 | ||
1976 | if (strname == NULL) | |
1977 | ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); | |
1978 | else | |
1979 | ncp->nc_name = strname; | |
1980 | ||
1981 | // | |
1982 | // If the bytes of the name associated with the vnode differ, | |
1983 | // use the name associated with the vnode since the file system | |
1984 | // may have set that explicitly in the case of a lookup on a | |
1985 | // case-insensitive file system where the case of the looked up | |
1986 | // name differs from what is on disk. For more details, see: | |
1987 | // <rdar://problem/8044697> FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories | |
1988 | // | |
1989 | const char *vn_name = vp ? vp->v_name : NULL; | |
1990 | unsigned int len = vn_name ? strlen(vn_name) : 0; | |
1991 | if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) { | |
1992 | unsigned int hash = hash_string(vn_name, len); | |
1993 | ||
1994 | vfs_removename(ncp->nc_name); | |
1995 | ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0); | |
1996 | ncp->nc_hashval = hash; | |
1997 | } | |
1998 | ||
1999 | /* | |
2000 | * make us the newest entry in the cache | |
2001 | * i.e. we'll be the last to be stolen | |
2002 | */ | |
2003 | TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry); | |
2004 | ||
2005 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
2006 | #if DIAGNOSTIC | |
2007 | { | |
2008 | struct namecache *p; | |
2009 | ||
2010 | for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) | |
2011 | if (p == ncp) | |
2012 | panic("cache_enter: duplicate"); | |
2013 | } | |
2014 | #endif | |
2015 | /* | |
2016 | * make us available to be found via lookup | |
2017 | */ | |
2018 | LIST_INSERT_HEAD(ncpp, ncp, nc_hash); | |
2019 | ||
2020 | if (vp) { | |
2021 | /* | |
2022 | * add to the list of name cache entries | |
2023 | * that point at vp | |
2024 | */ | |
2025 | LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link); | |
2026 | } else { | |
2027 | /* | |
2028 | * this is a negative cache entry (vp == NULL) | |
2029 | * stick it on the negative cache list. | |
2030 | */ | |
2031 | TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); | |
2032 | ||
2033 | ncs_negtotal++; | |
2034 | ||
2035 | if (ncs_negtotal > desiredNegNodes) { | |
2036 | /* | |
2037 | * if we've reached our desired limit | |
2038 | * of negative cache entries, delete | |
2039 | * the oldest | |
2040 | */ | |
2041 | negp = TAILQ_FIRST(&neghead); | |
2042 | cache_delete(negp, 1); | |
2043 | } | |
2044 | } | |
2045 | /* | |
2046 | * add us to the list of name cache entries that | |
2047 | * are children of dvp | |
2048 | */ | |
2049 | if (vp) | |
2050 | TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child); | |
2051 | else | |
2052 | TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); | |
2053 | } | |
2054 | ||
2055 | ||
2056 | /* | |
2057 | * Initialize CRC-32 remainder table. | |
2058 | */ | |
2059 | static void init_crc32(void) | |
2060 | { | |
2061 | /* | |
2062 | * the CRC-32 generator polynomial is: | |
2063 | * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10 | |
2064 | * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 | |
2065 | */ | |
2066 | unsigned int crc32_polynomial = 0x04c11db7; | |
2067 | unsigned int i,j; | |
2068 | ||
2069 | /* | |
2070 | * pre-calculate the CRC-32 remainder for each possible octet encoding | |
2071 | */ | |
2072 | for (i = 0; i < 256; i++) { | |
2073 | unsigned int crc_rem = i << 24; | |
2074 | ||
2075 | for (j = 0; j < 8; j++) { | |
2076 | if (crc_rem & 0x80000000) | |
2077 | crc_rem = (crc_rem << 1) ^ crc32_polynomial; | |
2078 | else | |
2079 | crc_rem = (crc_rem << 1); | |
2080 | } | |
2081 | crc32tab[i] = crc_rem; | |
2082 | } | |
2083 | } | |
2084 | ||
2085 | ||
2086 | /* | |
2087 | * Name cache initialization, from vfs_init() when we are booting | |
2088 | */ | |
2089 | void | |
2090 | nchinit(void) | |
2091 | { | |
2092 | int i; | |
2093 | ||
2094 | desiredNegNodes = (desiredvnodes / 10); | |
2095 | desiredNodes = desiredvnodes + desiredNegNodes; | |
2096 | ||
2097 | TAILQ_INIT(&nchead); | |
2098 | TAILQ_INIT(&neghead); | |
2099 | ||
2100 | init_crc32(); | |
2101 | ||
2102 | nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 *desiredNodes)), M_CACHE, &nchash); | |
2103 | nchashmask = nchash; | |
2104 | nchash++; | |
2105 | ||
2106 | init_string_table(); | |
2107 | ||
2108 | /* Allocate name cache lock group attribute and group */ | |
2109 | namecache_lck_grp_attr= lck_grp_attr_alloc_init(); | |
2110 | ||
2111 | namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); | |
2112 | ||
2113 | /* Allocate name cache lock attribute */ | |
2114 | namecache_lck_attr = lck_attr_alloc_init(); | |
2115 | ||
2116 | /* Allocate name cache lock */ | |
2117 | namecache_rw_lock = lck_rw_alloc_init(namecache_lck_grp, namecache_lck_attr); | |
2118 | ||
2119 | ||
2120 | /* Allocate string cache lock group attribute and group */ | |
2121 | strcache_lck_grp_attr= lck_grp_attr_alloc_init(); | |
2122 | ||
2123 | strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr); | |
2124 | ||
2125 | /* Allocate string cache lock attribute */ | |
2126 | strcache_lck_attr = lck_attr_alloc_init(); | |
2127 | ||
2128 | /* Allocate string cache lock */ | |
2129 | strtable_rw_lock = lck_rw_alloc_init(strcache_lck_grp, strcache_lck_attr); | |
2130 | ||
2131 | for (i = 0; i < NUM_STRCACHE_LOCKS; i++) | |
2132 | lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr); | |
2133 | } | |
2134 | ||
2135 | void | |
2136 | name_cache_lock_shared(void) | |
2137 | { | |
2138 | lck_rw_lock_shared(namecache_rw_lock); | |
2139 | } | |
2140 | ||
2141 | void | |
2142 | name_cache_lock(void) | |
2143 | { | |
2144 | lck_rw_lock_exclusive(namecache_rw_lock); | |
2145 | } | |
2146 | ||
2147 | void | |
2148 | name_cache_unlock(void) | |
2149 | { | |
2150 | lck_rw_done(namecache_rw_lock); | |
2151 | } | |
2152 | ||
2153 | ||
2154 | int | |
2155 | resize_namecache(u_int newsize) | |
2156 | { | |
2157 | struct nchashhead *new_table; | |
2158 | struct nchashhead *old_table; | |
2159 | struct nchashhead *old_head, *head; | |
2160 | struct namecache *entry, *next; | |
2161 | uint32_t i, hashval; | |
2162 | int dNodes, dNegNodes; | |
2163 | u_long new_size, old_size; | |
2164 | ||
2165 | dNegNodes = (newsize / 10); | |
2166 | dNodes = newsize + dNegNodes; | |
2167 | ||
2168 | // we don't support shrinking yet | |
2169 | if (dNodes <= desiredNodes) { | |
2170 | return 0; | |
2171 | } | |
2172 | new_table = hashinit(2 * dNodes, M_CACHE, &nchashmask); | |
2173 | new_size = nchashmask + 1; | |
2174 | ||
2175 | if (new_table == NULL) { | |
2176 | return ENOMEM; | |
2177 | } | |
2178 | ||
2179 | NAME_CACHE_LOCK(); | |
2180 | // do the switch! | |
2181 | old_table = nchashtbl; | |
2182 | nchashtbl = new_table; | |
2183 | old_size = nchash; | |
2184 | nchash = new_size; | |
2185 | ||
2186 | // walk the old table and insert all the entries into | |
2187 | // the new table | |
2188 | // | |
2189 | for(i=0; i < old_size; i++) { | |
2190 | old_head = &old_table[i]; | |
2191 | for (entry=old_head->lh_first; entry != NULL; entry=next) { | |
2192 | // | |
2193 | // XXXdbg - Beware: this assumes that hash_string() does | |
2194 | // the same thing as what happens in | |
2195 | // lookup() over in vfs_lookup.c | |
2196 | hashval = hash_string(entry->nc_name, 0); | |
2197 | entry->nc_hashval = hashval; | |
2198 | head = NCHHASH(entry->nc_dvp, hashval); | |
2199 | ||
2200 | next = entry->nc_hash.le_next; | |
2201 | LIST_INSERT_HEAD(head, entry, nc_hash); | |
2202 | } | |
2203 | } | |
2204 | desiredNodes = dNodes; | |
2205 | desiredNegNodes = dNegNodes; | |
2206 | ||
2207 | NAME_CACHE_UNLOCK(); | |
2208 | FREE(old_table, M_CACHE); | |
2209 | ||
2210 | return 0; | |
2211 | } | |
2212 | ||
2213 | static void | |
2214 | cache_delete(struct namecache *ncp, int age_entry) | |
2215 | { | |
2216 | NCHSTAT(ncs_deletes); | |
2217 | ||
2218 | if (ncp->nc_vp) { | |
2219 | LIST_REMOVE(ncp, nc_un.nc_link); | |
2220 | } else { | |
2221 | TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); | |
2222 | ncs_negtotal--; | |
2223 | } | |
2224 | TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child); | |
2225 | ||
2226 | LIST_REMOVE(ncp, nc_hash); | |
2227 | /* | |
2228 | * this field is used to indicate | |
2229 | * that the entry is in use and | |
2230 | * must be deleted before it can | |
2231 | * be reused... | |
2232 | */ | |
2233 | ncp->nc_hash.le_prev = NULL; | |
2234 | ||
2235 | if (age_entry) { | |
2236 | /* | |
2237 | * make it the next one available | |
2238 | * for cache_enter's use | |
2239 | */ | |
2240 | TAILQ_REMOVE(&nchead, ncp, nc_entry); | |
2241 | TAILQ_INSERT_HEAD(&nchead, ncp, nc_entry); | |
2242 | } | |
2243 | vfs_removename(ncp->nc_name); | |
2244 | ncp->nc_name = NULL; | |
2245 | } | |
2246 | ||
2247 | ||
2248 | /* | |
2249 | * purge the entry associated with the | |
2250 | * specified vnode from the name cache | |
2251 | */ | |
2252 | void | |
2253 | cache_purge(vnode_t vp) | |
2254 | { | |
2255 | struct namecache *ncp; | |
2256 | kauth_cred_t tcred = NULL; | |
2257 | ||
2258 | if ((LIST_FIRST(&vp->v_nclinks) == NULL) && | |
2259 | (TAILQ_FIRST(&vp->v_ncchildren) == NULL) && | |
2260 | (vp->v_cred == NOCRED) && | |
2261 | (vp->v_parent == NULLVP)) | |
2262 | return; | |
2263 | ||
2264 | NAME_CACHE_LOCK(); | |
2265 | ||
2266 | if (vp->v_parent) | |
2267 | vp->v_parent->v_nc_generation++; | |
2268 | ||
2269 | while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) | |
2270 | cache_delete(ncp, 1); | |
2271 | ||
2272 | while ( (ncp = TAILQ_FIRST(&vp->v_ncchildren)) ) | |
2273 | cache_delete(ncp, 1); | |
2274 | ||
2275 | /* | |
2276 | * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held | |
2277 | */ | |
2278 | tcred = vp->v_cred; | |
2279 | vp->v_cred = NOCRED; | |
2280 | vp->v_authorized_actions = 0; | |
2281 | ||
2282 | NAME_CACHE_UNLOCK(); | |
2283 | ||
2284 | if (IS_VALID_CRED(tcred)) | |
2285 | kauth_cred_unref(&tcred); | |
2286 | } | |
2287 | ||
2288 | /* | |
2289 | * Purge all negative cache entries that are children of the | |
2290 | * given vnode. A case-insensitive file system (or any file | |
2291 | * system that has multiple equivalent names for the same | |
2292 | * directory entry) can use this when creating or renaming | |
2293 | * to remove negative entries that may no longer apply. | |
2294 | */ | |
2295 | void | |
2296 | cache_purge_negatives(vnode_t vp) | |
2297 | { | |
2298 | struct namecache *ncp, *next_ncp; | |
2299 | ||
2300 | NAME_CACHE_LOCK(); | |
2301 | ||
2302 | TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) { | |
2303 | if (ncp->nc_vp) | |
2304 | break; | |
2305 | ||
2306 | cache_delete(ncp, 1); | |
2307 | } | |
2308 | ||
2309 | NAME_CACHE_UNLOCK(); | |
2310 | } | |
2311 | ||
2312 | /* | |
2313 | * Flush all entries referencing a particular filesystem. | |
2314 | * | |
2315 | * Since we need to check it anyway, we will flush all the invalid | |
2316 | * entries at the same time. | |
2317 | */ | |
2318 | void | |
2319 | cache_purgevfs(struct mount *mp) | |
2320 | { | |
2321 | struct nchashhead *ncpp; | |
2322 | struct namecache *ncp; | |
2323 | ||
2324 | NAME_CACHE_LOCK(); | |
2325 | /* Scan hash tables for applicable entries */ | |
2326 | for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) { | |
2327 | restart: | |
2328 | for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) { | |
2329 | if (ncp->nc_dvp->v_mount == mp) { | |
2330 | cache_delete(ncp, 0); | |
2331 | goto restart; | |
2332 | } | |
2333 | } | |
2334 | } | |
2335 | NAME_CACHE_UNLOCK(); | |
2336 | } | |
2337 | ||
2338 | ||
2339 | ||
2340 | // | |
2341 | // String ref routines | |
2342 | // | |
2343 | static LIST_HEAD(stringhead, string_t) *string_ref_table; | |
2344 | static u_long string_table_mask; | |
2345 | static uint32_t filled_buckets=0; | |
2346 | ||
2347 | ||
2348 | typedef struct string_t { | |
2349 | LIST_ENTRY(string_t) hash_chain; | |
2350 | const char *str; | |
2351 | uint32_t refcount; | |
2352 | } string_t; | |
2353 | ||
2354 | ||
2355 | static void | |
2356 | resize_string_ref_table(void) | |
2357 | { | |
2358 | struct stringhead *new_table; | |
2359 | struct stringhead *old_table; | |
2360 | struct stringhead *old_head, *head; | |
2361 | string_t *entry, *next; | |
2362 | uint32_t i, hashval; | |
2363 | u_long new_mask, old_mask; | |
2364 | ||
2365 | /* | |
2366 | * need to hold the table lock exclusively | |
2367 | * in order to grow the table... need to recheck | |
2368 | * the need to resize again after we've taken | |
2369 | * the lock exclusively in case some other thread | |
2370 | * beat us to the punch | |
2371 | */ | |
2372 | lck_rw_lock_exclusive(strtable_rw_lock); | |
2373 | ||
2374 | if (4 * filled_buckets < ((string_table_mask + 1) * 3)) { | |
2375 | lck_rw_done(strtable_rw_lock); | |
2376 | return; | |
2377 | } | |
2378 | new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); | |
2379 | ||
2380 | if (new_table == NULL) { | |
2381 | printf("failed to resize the hash table.\n"); | |
2382 | lck_rw_done(strtable_rw_lock); | |
2383 | return; | |
2384 | } | |
2385 | ||
2386 | // do the switch! | |
2387 | old_table = string_ref_table; | |
2388 | string_ref_table = new_table; | |
2389 | old_mask = string_table_mask; | |
2390 | string_table_mask = new_mask; | |
2391 | filled_buckets = 0; | |
2392 | ||
2393 | // walk the old table and insert all the entries into | |
2394 | // the new table | |
2395 | // | |
2396 | for (i = 0; i <= old_mask; i++) { | |
2397 | old_head = &old_table[i]; | |
2398 | for (entry = old_head->lh_first; entry != NULL; entry = next) { | |
2399 | hashval = hash_string((const char *)entry->str, 0); | |
2400 | head = &string_ref_table[hashval & string_table_mask]; | |
2401 | if (head->lh_first == NULL) { | |
2402 | filled_buckets++; | |
2403 | } | |
2404 | next = entry->hash_chain.le_next; | |
2405 | LIST_INSERT_HEAD(head, entry, hash_chain); | |
2406 | } | |
2407 | } | |
2408 | lck_rw_done(strtable_rw_lock); | |
2409 | ||
2410 | FREE(old_table, M_CACHE); | |
2411 | } | |
2412 | ||
2413 | ||
2414 | static void | |
2415 | init_string_table(void) | |
2416 | { | |
2417 | string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask); | |
2418 | } | |
2419 | ||
2420 | ||
2421 | const char * | |
2422 | vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags) | |
2423 | { | |
2424 | return (add_name_internal(name, len, hashval, FALSE, flags)); | |
2425 | } | |
2426 | ||
2427 | ||
2428 | static const char * | |
2429 | add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags) | |
2430 | { | |
2431 | struct stringhead *head; | |
2432 | string_t *entry; | |
2433 | uint32_t chain_len = 0; | |
2434 | uint32_t hash_index; | |
2435 | uint32_t lock_index; | |
2436 | char *ptr; | |
2437 | ||
2438 | if (len > MAXPATHLEN) | |
2439 | len = MAXPATHLEN; | |
2440 | ||
2441 | /* | |
2442 | * if the length already accounts for the null-byte, then | |
2443 | * subtract one so later on we don't index past the end | |
2444 | * of the string. | |
2445 | */ | |
2446 | if (len > 0 && name[len-1] == '\0') { | |
2447 | len--; | |
2448 | } | |
2449 | if (hashval == 0) { | |
2450 | hashval = hash_string(name, len); | |
2451 | } | |
2452 | ||
2453 | /* | |
2454 | * take this lock 'shared' to keep the hash stable | |
2455 | * if someone else decides to grow the pool they | |
2456 | * will take this lock exclusively | |
2457 | */ | |
2458 | lck_rw_lock_shared(strtable_rw_lock); | |
2459 | ||
2460 | /* | |
2461 | * If the table gets more than 3/4 full, resize it | |
2462 | */ | |
2463 | if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) { | |
2464 | lck_rw_done(strtable_rw_lock); | |
2465 | ||
2466 | resize_string_ref_table(); | |
2467 | ||
2468 | lck_rw_lock_shared(strtable_rw_lock); | |
2469 | } | |
2470 | hash_index = hashval & string_table_mask; | |
2471 | lock_index = hash_index % NUM_STRCACHE_LOCKS; | |
2472 | ||
2473 | head = &string_ref_table[hash_index]; | |
2474 | ||
2475 | lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); | |
2476 | ||
2477 | for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) { | |
2478 | if (memcmp(entry->str, name, len) == 0 && entry->str[len] == 0) { | |
2479 | entry->refcount++; | |
2480 | break; | |
2481 | } | |
2482 | } | |
2483 | if (entry == NULL) { | |
2484 | lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]); | |
2485 | /* | |
2486 | * it wasn't already there so add it. | |
2487 | */ | |
2488 | MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); | |
2489 | ||
2490 | if (head->lh_first == NULL) { | |
2491 | OSAddAtomic(1, &filled_buckets); | |
2492 | } | |
2493 | ptr = (char *)((char *)entry + sizeof(string_t)); | |
2494 | strncpy(ptr, name, len); | |
2495 | ptr[len] = '\0'; | |
2496 | entry->str = ptr; | |
2497 | entry->refcount = 1; | |
2498 | LIST_INSERT_HEAD(head, entry, hash_chain); | |
2499 | } | |
2500 | if (need_extra_ref == TRUE) | |
2501 | entry->refcount++; | |
2502 | ||
2503 | lck_mtx_unlock(&strcache_mtx_locks[lock_index]); | |
2504 | lck_rw_done(strtable_rw_lock); | |
2505 | ||
2506 | return (const char *)entry->str; | |
2507 | } | |
2508 | ||
2509 | ||
2510 | int | |
2511 | vfs_removename(const char *nameref) | |
2512 | { | |
2513 | struct stringhead *head; | |
2514 | string_t *entry; | |
2515 | uint32_t hashval; | |
2516 | uint32_t hash_index; | |
2517 | uint32_t lock_index; | |
2518 | int retval = ENOENT; | |
2519 | ||
2520 | hashval = hash_string(nameref, 0); | |
2521 | ||
2522 | /* | |
2523 | * take this lock 'shared' to keep the hash stable | |
2524 | * if someone else decides to grow the pool they | |
2525 | * will take this lock exclusively | |
2526 | */ | |
2527 | lck_rw_lock_shared(strtable_rw_lock); | |
2528 | /* | |
2529 | * must compute the head behind the table lock | |
2530 | * since the size and location of the table | |
2531 | * can change on the fly | |
2532 | */ | |
2533 | hash_index = hashval & string_table_mask; | |
2534 | lock_index = hash_index % NUM_STRCACHE_LOCKS; | |
2535 | ||
2536 | head = &string_ref_table[hash_index]; | |
2537 | ||
2538 | lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]); | |
2539 | ||
2540 | for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) { | |
2541 | if (entry->str == nameref) { | |
2542 | entry->refcount--; | |
2543 | ||
2544 | if (entry->refcount == 0) { | |
2545 | LIST_REMOVE(entry, hash_chain); | |
2546 | ||
2547 | if (head->lh_first == NULL) { | |
2548 | OSAddAtomic(-1, &filled_buckets); | |
2549 | } | |
2550 | } else { | |
2551 | entry = NULL; | |
2552 | } | |
2553 | retval = 0; | |
2554 | break; | |
2555 | } | |
2556 | } | |
2557 | lck_mtx_unlock(&strcache_mtx_locks[lock_index]); | |
2558 | lck_rw_done(strtable_rw_lock); | |
2559 | ||
2560 | if (entry != NULL) | |
2561 | FREE(entry, M_TEMP); | |
2562 | ||
2563 | return retval; | |
2564 | } | |
2565 | ||
2566 | ||
2567 | #ifdef DUMP_STRING_TABLE | |
2568 | void | |
2569 | dump_string_table(void) | |
2570 | { | |
2571 | struct stringhead *head; | |
2572 | string_t *entry; | |
2573 | u_long i; | |
2574 | ||
2575 | lck_rw_lock_shared(strtable_rw_lock); | |
2576 | ||
2577 | for (i = 0; i <= string_table_mask; i++) { | |
2578 | head = &string_ref_table[i]; | |
2579 | for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { | |
2580 | printf("%6d - %s\n", entry->refcount, entry->str); | |
2581 | } | |
2582 | } | |
2583 | lck_rw_done(strtable_rw_lock); | |
2584 | } | |
2585 | #endif /* DUMP_STRING_TABLE */ |