]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_cache.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cache.c
CommitLineData
1c79356b 1/*
c18c124e 2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Poul-Henning Kamp of the FreeBSD Project.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 *
65 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
66 */
2d21ac55
A
67/*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
1c79356b
A
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/time.h>
91447636
A
76#include <sys/mount_internal.h>
77#include <sys/vnode_internal.h>
39236c6e 78#include <miscfs/specfs/specdev.h>
1c79356b
A
79#include <sys/namei.h>
80#include <sys/errno.h>
81#include <sys/malloc.h>
91447636
A
82#include <sys/kauth.h>
83#include <sys/user.h>
2d21ac55 84#include <sys/paths.h>
d9a64523 85#include <os/overflow.h>
2d21ac55
A
86
87#if CONFIG_MACF
88#include <security/mac_framework.h>
89#endif
1c79356b
A
90
91/*
92 * Name caching works as follows:
93 *
94 * Names found by directory scans are retained in a cache
95 * for future reference. It is managed LRU, so frequently
96 * used names will hang around. Cache is indexed by hash value
97 * obtained from (vp, name) where vp refers to the directory
98 * containing name.
99 *
100 * If it is a "negative" entry, (i.e. for a name that is known NOT to
101 * exist) the vnode pointer will be NULL.
102 *
1c79356b
A
103 * Upon reaching the last segment of a path, if the reference
104 * is for DELETE, or NOCACHE is set (rewrite), and the
105 * name is located in the cache, it will be dropped.
106 */
107
108/*
109 * Structures associated with name cacheing.
110 */
91447636 111
0a7de745
A
112LIST_HEAD(nchashhead, namecache) * nchashtbl; /* Hash Table */
113u_long nchashmask;
114u_long nchash; /* size of hash table - 1 */
115long numcache; /* number of cache entries allocated */
116int desiredNodes;
117int desiredNegNodes;
118int ncs_negtotal;
119int nc_disabled = 0;
120TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */
121TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */
0c530ab8
A
122
123
124#if COLLECT_STATS
125
0a7de745 126struct nchstats nchstats; /* cache effectiveness statistics */
91447636 127
0a7de745
A
128#define NCHSTAT(v) { \
129 nchstats.v++; \
0c530ab8 130}
0a7de745
A
131#define NAME_CACHE_LOCK() name_cache_lock()
132#define NAME_CACHE_UNLOCK() name_cache_unlock()
133#define NAME_CACHE_LOCK_SHARED() name_cache_lock()
0c530ab8
A
134
135#else
136
137#define NCHSTAT(v)
0a7de745
A
138#define NAME_CACHE_LOCK() name_cache_lock()
139#define NAME_CACHE_UNLOCK() name_cache_unlock()
140#define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared()
0c530ab8
A
141
142#endif
143
144
91447636
A
145/* vars for name cache list lock */
146lck_grp_t * namecache_lck_grp;
147lck_grp_attr_t * namecache_lck_grp_attr;
148lck_attr_t * namecache_lck_attr;
b0d623f7
A
149
150lck_grp_t * strcache_lck_grp;
151lck_grp_attr_t * strcache_lck_grp_attr;
152lck_attr_t * strcache_lck_attr;
153
154lck_rw_t * namecache_rw_lock;
155lck_rw_t * strtable_rw_lock;
156
157#define NUM_STRCACHE_LOCKS 1024
158
159lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS];
160
91447636
A
161
162static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp);
b0d623f7 163static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int);
39236c6e 164static void init_string_table(void);
91447636 165static void cache_delete(struct namecache *, int);
b0d623f7 166static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname);
cb323159 167static void cache_purge_locked(vnode_t vp, kauth_cred_t *credp);
2d21ac55
A
168
169#ifdef DUMP_STRING_TABLE
170/*
171 * Internal dump function used for debugging
172 */
173void dump_string_table(void);
0a7de745 174#endif /* DUMP_STRING_TABLE */
91447636 175
39236c6e 176static void init_crc32(void);
91447636
A
177static unsigned int crc32tab[256];
178
179
180#define NCHHASH(dvp, hash_val) \
181 (&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask])
182
39037602
A
183/*
184 * This function tries to check if a directory vp is a subdirectory of dvp
185 * only from valid v_parent pointers. It is called with the name cache lock
186 * held and does not drop the lock anytime inside the function.
187 *
188 * It returns a boolean that indicates whether or not it was able to
189 * successfully infer the parent/descendent relationship via the v_parent
190 * pointers, or if it could not infer such relationship and that the decision
191 * must be delegated to the owning filesystem.
192 *
193 * If it does not defer the decision, i.e. it was successfuly able to determine
194 * the parent/descendent relationship, *is_subdir tells the caller if vp is a
195 * subdirectory of dvp.
196 *
197 * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp
198 * is the vnode whose parent is to be determined from the filesystem.
199 * *is_subdir, in this case, is not indicative of anything and should be
200 * ignored.
201 *
202 * The return value and output args should be used as follows :
203 *
204 * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp);
205 * if (!defer) {
0a7de745
A
206 * if (*is_subdir)
207 * vp is subdirectory;
208 * else
209 * vp is not a subdirectory;
39037602 210 * } else {
0a7de745
A
211 * if (*next_vp)
212 * check this vnode's parent from the filesystem
213 * else
214 * error (likely because of forced unmount).
39037602
A
215 * }
216 *
217 */
218static boolean_t
219cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir,
220 vnode_t *next_vp)
221{
222 vnode_t tvp = vp;
223 int defer = FALSE;
224
225 *is_subdir = FALSE;
226 *next_vp = NULLVP;
227 while (1) {
228 mount_t tmp;
229
230 if (tvp == dvp) {
231 *is_subdir = TRUE;
232 break;
233 } else if (tvp == rootvnode) {
234 /* *is_subdir = FALSE */
235 break;
236 }
237
238 tmp = tvp->v_mount;
239 while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered &&
240 tvp != dvp && tvp != rootvnode) {
241 tvp = tmp->mnt_vnodecovered;
242 tmp = tvp->v_mount;
243 }
244
245 /*
246 * If dvp is not at the top of a mount "stack" then
247 * vp is not a subdirectory of dvp either.
248 */
249 if (tvp == dvp || tvp == rootvnode) {
250 /* *is_subdir = FALSE */
251 break;
252 }
253
254 if (!tmp) {
255 defer = TRUE;
256 *next_vp = NULLVP;
257 break;
258 }
259
260 if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) {
261 defer = TRUE;
262 *next_vp = tvp;
263 break;
264 }
91447636 265
39037602
A
266 tvp = tvp->v_parent;
267 }
268
0a7de745 269 return defer;
39037602
A
270}
271
272/* maximum times retry from potentially transient errors in vnode_issubdir */
273#define MAX_ERROR_RETRY 3
274
275/*
276 * This function checks if a given directory (vp) is a subdirectory of dvp.
277 * It walks backwards from vp and if it hits dvp in its parent chain,
278 * it is a subdirectory. If it encounters the root directory, it is not
279 * a subdirectory.
280 *
281 * This function returns an error if it is unsuccessful and 0 on success.
282 *
283 * On entry (and exit) vp has an iocount and if this function has to take
284 * any iocounts on other vnodes in the parent chain traversal, it releases them.
285 */
286int
287vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx)
288{
289 vnode_t start_vp, tvp;
290 vnode_t vp_with_iocount;
291 int error = 0;
292 char dotdotbuf[] = "..";
293 int error_retry_count = 0; /* retry count for potentially transient
0a7de745 294 * errors */
39037602
A
295
296 *is_subdir = FALSE;
297 tvp = start_vp = vp;
298 /*
299 * Anytime we acquire an iocount in this function, we save the vnode
300 * in this variable and release it before exiting.
301 */
302 vp_with_iocount = NULLVP;
303
304 while (1) {
305 boolean_t defer;
306 vnode_t pvp;
307 uint32_t vid;
308 struct componentname cn;
309 boolean_t is_subdir_locked = FALSE;
310
311 if (tvp == dvp) {
312 *is_subdir = TRUE;
313 break;
314 } else if (tvp == rootvnode) {
315 /* *is_subdir = FALSE */
316 break;
317 }
318
319 NAME_CACHE_LOCK_SHARED();
320
321 defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked,
322 &tvp);
323
0a7de745 324 if (defer && tvp) {
39037602 325 vid = vnode_vid(tvp);
0a7de745 326 }
39037602
A
327
328 NAME_CACHE_UNLOCK();
329
330 if (!defer) {
331 *is_subdir = is_subdir_locked;
332 break;
333 }
334
335 if (!tvp) {
336 if (error_retry_count++ < MAX_ERROR_RETRY) {
337 tvp = vp;
338 continue;
339 }
340 error = ENOENT;
341 break;
342 }
343
344 if (tvp != start_vp) {
345 if (vp_with_iocount) {
346 vnode_put(vp_with_iocount);
347 vp_with_iocount = NULLVP;
348 }
349
350 error = vnode_getwithvid(tvp, vid);
351 if (error) {
352 if (error_retry_count++ < MAX_ERROR_RETRY) {
353 tvp = vp;
354 error = 0;
355 continue;
356 }
357 break;
358 }
359
360 vp_with_iocount = tvp;
361 }
362
363 bzero(&cn, sizeof(cn));
364 cn.cn_nameiop = LOOKUP;
365 cn.cn_flags = ISLASTCN | ISDOTDOT;
366 cn.cn_context = ctx;
367 cn.cn_pnbuf = &dotdotbuf[0];
368 cn.cn_pnlen = sizeof(dotdotbuf);
369 cn.cn_nameptr = cn.cn_pnbuf;
370 cn.cn_namelen = 2;
371
372 pvp = NULLVP;
0a7de745 373 if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) {
39037602 374 break;
0a7de745 375 }
39037602
A
376
377 if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) {
378 (void)vnode_update_identity(tvp, pvp, NULL, 0, 0,
379 VNODE_UPDATE_PARENT);
380 }
381
0a7de745 382 if (vp_with_iocount) {
39037602 383 vnode_put(vp_with_iocount);
0a7de745 384 }
39037602
A
385
386 vp_with_iocount = tvp = pvp;
387 }
388
0a7de745 389 if (vp_with_iocount) {
39037602 390 vnode_put(vp_with_iocount);
0a7de745 391 }
39037602 392
0a7de745 393 return error;
39037602 394}
91447636 395
b0d623f7 396/*
5ba3f43e
A
397 * This function builds the path in "buff" from the supplied vnode.
398 * The length of the buffer *INCLUDING* the trailing zero byte is
399 * returned in outlen. NOTE: the length includes the trailing zero
400 * byte and thus the length is one greater than what strlen would
401 * return. This is important and lots of code elsewhere in the kernel
402 * assumes this behavior.
0a7de745
A
403 *
404 * This function can call vnop in file system if the parent vnode
405 * does not exist or when called for hardlinks via volfs path.
b0d623f7
A
406 * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present
407 * in the name cache and does not enter the file system.
408 *
0a7de745
A
409 * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when
410 * we encounter ENOENT during path reconstruction. ENOENT means that
411 * one of the parents moved while we were building the path. The
316670eb
A
412 * caller can special handle this case by calling build_path again.
413 *
0a7de745
A
414 * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path
415 * that is relative to the nearest mount point, i.e. do not
416 * cross over mount points during building the path.
39236c6e 417 *
b0d623f7 418 * passed in vp must have a valid io_count reference
5ba3f43e
A
419 *
420 * If parent vnode is non-NULL it also must have an io count. This
421 * allows build_path_with_parent to be safely called for operations
422 * unlink, rmdir and rename that already have io counts on the target
423 * and the directory. In this way build_path_with_parent does not have
424 * to try and obtain an additional io count on the parent. Taking an
425 * io count ont the parent can lead to dead lock if a forced unmount
426 * occures at the right moment. For a fuller explaination on how this
427 * can occur see the comment for vn_getpath_with_parent.
428 *
b0d623f7 429 */
91447636 430int
5ba3f43e 431build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
91447636 432{
0a7de745 433 vnode_t vp, tvp;
b0d623f7 434 vnode_t vp_with_iocount;
0a7de745 435 vnode_t proc_root_dir_vp;
2d21ac55
A
436 char *end;
437 const char *str;
438 int len;
439 int ret = 0;
440 int fixhardlink;
441
0a7de745
A
442 if (first_vp == NULLVP) {
443 return EINVAL;
444 }
445
446 if (buflen <= 1) {
447 return ENOSPC;
448 }
b0d623f7
A
449
450 /*
451 * Grab the process fd so we can evaluate fd_rdir.
452 */
0a7de745 453 if (vfs_context_proc(ctx)->p_fd) {
b0d623f7 454 proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir;
0a7de745 455 } else {
b0d623f7 456 proc_root_dir_vp = NULL;
0a7de745 457 }
b0d623f7
A
458
459 vp_with_iocount = NULLVP;
2d21ac55
A
460again:
461 vp = first_vp;
b0d623f7 462
0a7de745 463 end = &buff[buflen - 1];
91447636
A
464 *end = '\0';
465
b0d623f7
A
466 /*
467 * holding the NAME_CACHE_LOCK in shared mode is
468 * sufficient to stabilize both the vp->v_parent chain
469 * and the 'vp->v_mount->mnt_vnodecovered' chain
470 *
471 * if we need to drop this lock, we must first grab the v_id
472 * from the vnode we're currently working with... if that
473 * vnode doesn't already have an io_count reference (the vp
474 * passed in comes with one), we must grab a reference
475 * after we drop the NAME_CACHE_LOCK via vnode_getwithvid...
476 * deadlocks may result if you call vnode_get while holding
477 * the NAME_CACHE_LOCK... we lazily release the reference
0a7de745 478 * we pick up the next time we encounter a need to drop
b0d623f7
A
479 * the NAME_CACHE_LOCK or before we return from this routine
480 */
481 NAME_CACHE_LOCK_SHARED();
482
cb323159
A
483#if CONFIG_FIRMLINKS
484 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
485 (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) {
486 vp = vp->v_fmlink;
487 }
488#endif
489
b0d623f7
A
490 /*
491 * Check if this is the root of a file system.
492 */
2d21ac55
A
493 while (vp && vp->v_flag & VROOT) {
494 if (vp->v_mount == NULL) {
b0d623f7
A
495 ret = EINVAL;
496 goto out_unlock;
2d21ac55 497 }
0a7de745 498 if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) {
2d21ac55
A
499 /*
500 * It's the root of the root file system, so it's
501 * just "/".
502 */
0a7de745 503 *--end = '/';
b0d623f7
A
504
505 goto out_unlock;
91447636 506 } else {
0a7de745
A
507 /*
508 * This the root of the volume and the caller does not
509 * want to cross mount points. Therefore just return
510 * '/' as the relative path.
39236c6e 511 */
cb323159
A
512#if CONFIG_FIRMLINKS
513 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
514 (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) {
515 vp = vp->v_fmlink;
516 } else
517#endif
39236c6e
A
518 if (flags & BUILDPATH_VOLUME_RELATIVE) {
519 *--end = '/';
520 goto out_unlock;
521 } else {
522 vp = vp->v_mount->mnt_vnodecovered;
523 }
91447636
A
524 }
525 }
91447636 526
2d21ac55 527 while ((vp != NULLVP) && (vp->v_parent != vp)) {
b0d623f7
A
528 int vid;
529
91447636 530 /*
2d21ac55
A
531 * For hardlinks the v_name may be stale, so if its OK
532 * to enter a file system, ask the file system for the
533 * name and parent (below).
91447636 534 */
2d21ac55 535 fixhardlink = (vp->v_flag & VISHARDLINK) &&
0a7de745
A
536 (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
537 !(flags & BUILDPATH_NO_FS_ENTER);
b0d623f7 538
2d21ac55
A
539 if (!fixhardlink) {
540 str = vp->v_name;
b0d623f7 541
2d21ac55 542 if (str == NULL || *str == '\0') {
0a7de745 543 if (vp->v_parent != NULL) {
2d21ac55 544 ret = EINVAL;
0a7de745 545 } else {
2d21ac55 546 ret = ENOENT;
0a7de745 547 }
b0d623f7 548 goto out_unlock;
2d21ac55
A
549 }
550 len = strlen(str);
551 /*
552 * Check that there's enough space (including space for the '/')
553 */
554 if ((end - buff) < (len + 1)) {
555 ret = ENOSPC;
b0d623f7 556 goto out_unlock;
2d21ac55 557 }
b0d623f7
A
558 /*
559 * Copy the name backwards.
560 */
2d21ac55 561 str += len;
0a7de745
A
562
563 for (; len > 0; len--) {
564 *--end = *--str;
565 }
b0d623f7
A
566 /*
567 * Add a path separator.
568 */
2d21ac55 569 *--end = '/';
91447636 570 }
91447636 571
91447636 572 /*
2d21ac55 573 * Walk up the parent chain.
91447636 574 */
2d21ac55 575 if (((vp->v_parent != NULLVP) && !fixhardlink) ||
0a7de745 576 (flags & BUILDPATH_NO_FS_ENTER)) {
6d2010ae
A
577 /*
578 * In this if () block we are not allowed to enter the filesystem
579 * to conclusively get the most accurate parent identifier.
580 * As a result, if 'vp' does not identify '/' and it
581 * does not have a valid v_parent, then error out
582 * and disallow further path construction
583 */
584 if ((vp->v_parent == NULLVP) && (rootvnode != vp)) {
c18c124e
A
585 /*
586 * Only '/' is allowed to have a NULL parent
587 * pointer. Upper level callers should ideally
588 * re-drive name lookup on receiving a ENOENT.
589 */
590 ret = ENOENT;
6d2010ae
A
591
592 /* The code below will exit early if 'tvp = vp' == NULL */
593 }
6d2010ae 594 vp = vp->v_parent;
316670eb 595
b0d623f7
A
596 /*
597 * if the vnode we have in hand isn't a directory and it
598 * has a v_parent, then we started with the resource fork
599 * so skip up to avoid getting a duplicate copy of the
600 * file name in the path.
601 */
316670eb 602 if (vp && !vnode_isdir(vp) && vp->v_parent) {
b0d623f7 603 vp = vp->v_parent;
316670eb 604 }
b0d623f7
A
605 } else {
606 /*
607 * No parent, go get it if supported.
608 */
2d21ac55
A
609 struct vnode_attr va;
610 vnode_t dvp;
2d21ac55 611
b0d623f7
A
612 /*
613 * Make sure file system supports obtaining a path from id.
614 */
2d21ac55
A
615 if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
616 ret = ENOENT;
b0d623f7 617 goto out_unlock;
2d21ac55 618 }
b0d623f7
A
619 vid = vp->v_id;
620
2d21ac55 621 NAME_CACHE_UNLOCK();
91447636 622
5ba3f43e 623 if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
b0d623f7
A
624 if (vp_with_iocount) {
625 vnode_put(vp_with_iocount);
626 vp_with_iocount = NULLVP;
627 }
0a7de745 628 if (vnode_getwithvid(vp, vid)) {
b0d623f7 629 goto again;
0a7de745 630 }
b0d623f7
A
631 vp_with_iocount = vp;
632 }
2d21ac55
A
633 VATTR_INIT(&va);
634 VATTR_WANTED(&va, va_parentid);
b0d623f7 635
2d21ac55
A
636 if (fixhardlink) {
637 VATTR_WANTED(&va, va_name);
638 MALLOC_ZONE(va.va_name, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
91447636 639 } else {
2d21ac55
A
640 va.va_name = NULL;
641 }
b0d623f7
A
642 /*
643 * Ask the file system for its parent id and for its name (optional).
644 */
2d21ac55 645 ret = vnode_getattr(vp, &va, ctx);
935ed37a 646
2d21ac55 647 if (fixhardlink) {
935ed37a
A
648 if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) {
649 str = va.va_name;
b0d623f7 650 vnode_update_identity(vp, NULL, str, strlen(str), 0, VNODE_UPDATE_NAME);
935ed37a
A
651 } else if (vp->v_name) {
652 str = vp->v_name;
653 ret = 0;
654 } else {
655 ret = ENOENT;
656 goto bad_news;
657 }
658 len = strlen(str);
2d21ac55 659
b0d623f7
A
660 /*
661 * Check that there's enough space.
662 */
935ed37a
A
663 if ((end - buff) < (len + 1)) {
664 ret = ENOSPC;
665 } else {
666 /* Copy the name backwards. */
667 str += len;
b0d623f7 668
935ed37a
A
669 for (; len > 0; len--) {
670 *--end = *--str;
2d21ac55 671 }
b0d623f7
A
672 /*
673 * Add a path separator.
674 */
935ed37a 675 *--end = '/';
2d21ac55 676 }
b0d623f7 677bad_news:
2d21ac55
A
678 FREE_ZONE(va.va_name, MAXPATHLEN, M_NAMEI);
679 }
680 if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
2d21ac55
A
681 ret = ENOENT;
682 goto out;
683 }
b0d623f7
A
684 /*
685 * Ask the file system for the parent vnode.
686 */
0a7de745 687 if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) {
2d21ac55 688 goto out;
0a7de745 689 }
b0d623f7 690
0a7de745 691 if (!fixhardlink && (vp->v_parent != dvp)) {
2d21ac55 692 vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT);
0a7de745 693 }
b0d623f7 694
0a7de745 695 if (vp_with_iocount) {
b0d623f7 696 vnode_put(vp_with_iocount);
0a7de745 697 }
2d21ac55 698 vp = dvp;
b0d623f7 699 vp_with_iocount = vp;
2d21ac55 700
b0d623f7 701 NAME_CACHE_LOCK_SHARED();
2d21ac55 702
b0d623f7
A
703 /*
704 * if the vnode we have in hand isn't a directory and it
705 * has a v_parent, then we started with the resource fork
706 * so skip up to avoid getting a duplicate copy of the
707 * file name in the path.
708 */
0a7de745 709 if (vp && !vnode_isdir(vp) && vp->v_parent) {
b0d623f7 710 vp = vp->v_parent;
0a7de745 711 }
91447636 712 }
39236c6e 713
39037602
A
714 if (vp && (flags & BUILDPATH_CHECKACCESS)) {
715 vid = vp->v_id;
716
717 NAME_CACHE_UNLOCK();
718
5ba3f43e 719 if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
39037602
A
720 if (vp_with_iocount) {
721 vnode_put(vp_with_iocount);
722 vp_with_iocount = NULLVP;
723 }
0a7de745 724 if (vnode_getwithvid(vp, vid)) {
39037602 725 goto again;
0a7de745 726 }
39037602
A
727 vp_with_iocount = vp;
728 }
0a7de745
A
729 if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) {
730 goto out; /* no peeking */
731 }
39037602
A
732 NAME_CACHE_LOCK_SHARED();
733 }
734
91447636 735 /*
2d21ac55
A
736 * When a mount point is crossed switch the vp.
737 * Continue until we find the root or we find
738 * a vnode that's not the root of a mounted
739 * file system.
91447636 740 */
b0d623f7
A
741 tvp = vp;
742
743 while (tvp) {
0a7de745
A
744 if (tvp == proc_root_dir_vp) {
745 goto out_unlock; /* encountered the root */
746 }
cb323159
A
747
748#if CONFIG_FIRMLINKS
749 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
750 (tvp->v_flag & VFMLINKTARGET) && tvp->v_fmlink) {
751 tvp = tvp->v_fmlink;
752 break;
753 }
754#endif
755
0a7de745
A
756 if (!(tvp->v_flag & VROOT) || !tvp->v_mount) {
757 break; /* not the root of a mounted FS */
758 }
39236c6e
A
759 if (flags & BUILDPATH_VOLUME_RELATIVE) {
760 /* Do not cross over mount points */
761 tvp = NULL;
762 } else {
763 tvp = tvp->v_mount->mnt_vnodecovered;
764 }
b0d623f7 765 }
0a7de745 766 if (tvp == NULLVP) {
b0d623f7 767 goto out_unlock;
0a7de745 768 }
b0d623f7 769 vp = tvp;
91447636 770 }
b0d623f7 771out_unlock:
0c530ab8 772 NAME_CACHE_UNLOCK();
91447636 773out:
0a7de745 774 if (vp_with_iocount) {
b0d623f7 775 vnode_put(vp_with_iocount);
0a7de745 776 }
b0d623f7
A
777 /*
778 * Slide the name down to the beginning of the buffer.
779 */
91447636 780 memmove(buff, end, &buff[buflen] - end);
b0d623f7
A
781
782 /*
783 * length includes the trailing zero byte
784 */
785 *outlen = &buff[buflen] - end;
0a7de745
A
786
787 /* One of the parents was moved during path reconstruction.
788 * The caller is interested in knowing whether any of the
316670eb
A
789 * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN.
790 */
791 if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) {
792 ret = EAGAIN;
793 }
794
0a7de745 795 return ret;
91447636
A
796}
797
5ba3f43e
A
798int
799build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
800{
0a7de745 801 return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, flags, ctx);
5ba3f43e 802}
1c79356b
A
803
804/*
91447636
A
805 * return NULLVP if vp's parent doesn't
806 * exist, or we can't get a valid iocount
807 * else return the parent of vp
1c79356b 808 */
91447636
A
809vnode_t
810vnode_getparent(vnode_t vp)
811{
0a7de745
A
812 vnode_t pvp = NULLVP;
813 int pvid;
91447636 814
0c530ab8 815 NAME_CACHE_LOCK_SHARED();
cb323159
A
816
817 pvp = vp->v_parent;
818
91447636
A
819 /*
820 * v_parent is stable behind the name_cache lock
821 * however, the only thing we can really guarantee
822 * is that we've grabbed a valid iocount on the
823 * parent of 'vp' at the time we took the name_cache lock...
824 * once we drop the lock, vp could get re-parented
825 */
cb323159 826 if (pvp != NULLVP) {
0a7de745 827 pvid = pvp->v_id;
91447636 828
0c530ab8 829 NAME_CACHE_UNLOCK();
91447636 830
0a7de745
A
831 if (vnode_getwithvid(pvp, pvid) != 0) {
832 pvp = NULL;
833 }
834 } else {
835 NAME_CACHE_UNLOCK();
836 }
837 return pvp;
91447636
A
838}
839
2d21ac55 840const char *
91447636
A
841vnode_getname(vnode_t vp)
842{
0a7de745
A
843 const char *name = NULL;
844
b0d623f7 845 NAME_CACHE_LOCK_SHARED();
0a7de745
A
846
847 if (vp->v_name) {
848 name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0);
849 }
0c530ab8 850 NAME_CACHE_UNLOCK();
91447636 851
0a7de745 852 return name;
1c79356b 853}
91447636
A
854
855void
2d21ac55 856vnode_putname(const char *name)
91447636 857{
b0d623f7 858 vfs_removename(name);
91447636
A
859}
860
39236c6e
A
861static const char unknown_vnodename[] = "(unknown vnode name)";
862
863const char *
864vnode_getname_printable(vnode_t vp)
865{
866 const char *name = vnode_getname(vp);
0a7de745 867 if (name != NULL) {
39236c6e 868 return name;
0a7de745
A
869 }
870
39236c6e 871 switch (vp->v_type) {
0a7de745
A
872 case VCHR:
873 case VBLK:
874 {
875 /*
876 * Create an artificial dev name from
877 * major and minor device number
878 */
879 char dev_name[64];
880 (void) snprintf(dev_name, sizeof(dev_name),
881 "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b',
882 major(vp->v_rdev), minor(vp->v_rdev));
883 /*
884 * Add the newly created dev name to the name
885 * cache to allow easier cleanup. Also,
886 * vfs_addname allocates memory for the new name
887 * and returns it.
888 */
889 NAME_CACHE_LOCK_SHARED();
890 name = vfs_addname(dev_name, strlen(dev_name), 0, 0);
891 NAME_CACHE_UNLOCK();
892 return name;
893 }
894 default:
895 return unknown_vnodename;
39236c6e
A
896 }
897}
898
0a7de745 899void
39236c6e
A
900vnode_putname_printable(const char *name)
901{
0a7de745 902 if (name == unknown_vnodename) {
39236c6e 903 return;
0a7de745 904 }
39236c6e
A
905 vnode_putname(name);
906}
0a7de745 907
91447636
A
908
909/*
910 * if VNODE_UPDATE_PARENT, and we can take
911 * a reference on dvp, then update vp with
912 * it's new parent... if vp already has a parent,
913 * then drop the reference vp held on it
914 *
915 * if VNODE_UPDATE_NAME,
916 * then drop string ref on v_name if it exists, and if name is non-NULL
917 * then pick up a string reference on name and record it in v_name...
918 * optionally pass in the length and hashval of name if known
919 *
920 * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp
921 */
922void
b0d623f7 923vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags)
91447636 924{
0a7de745
A
925 struct namecache *ncp;
926 vnode_t old_parentvp = NULLVP;
2d21ac55
A
927 int isstream = (vp->v_flag & VISNAMEDSTREAM);
928 int kusecountbumped = 0;
b0d623f7
A
929 kauth_cred_t tcred = NULL;
930 const char *vname = NULL;
931 const char *tname = NULL;
91447636
A
932
933 if (flags & VNODE_UPDATE_PARENT) {
0a7de745 934 if (dvp && vnode_ref(dvp) != 0) {
2d21ac55
A
935 dvp = NULLVP;
936 }
2d21ac55
A
937 /* Don't count a stream's parent ref during unmounts */
938 if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) {
939 vnode_lock_spin(dvp);
940 ++dvp->v_kusecount;
941 kusecountbumped = 1;
942 vnode_unlock(dvp);
943 }
2d21ac55 944 } else {
0a7de745 945 dvp = NULLVP;
2d21ac55 946 }
0a7de745 947 if ((flags & VNODE_UPDATE_NAME)) {
b0d623f7
A
948 if (name != vp->v_name) {
949 if (name && *name) {
0a7de745 950 if (name_len == 0) {
b0d623f7 951 name_len = strlen(name);
0a7de745
A
952 }
953 tname = vfs_addname(name, name_len, name_hashval, 0);
b0d623f7 954 }
0a7de745 955 } else {
b0d623f7 956 flags &= ~VNODE_UPDATE_NAME;
0a7de745 957 }
b0d623f7 958 }
cb323159 959 if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGEFIRMLINK))) {
b0d623f7
A
960 NAME_CACHE_LOCK();
961
cb323159
A
962#if CONFIG_FIRMLINKS
963 if (flags & VNODE_UPDATE_PURGEFIRMLINK) {
964 vnode_t old_fvp = vp->v_fmlink;
965 if (old_fvp) {
966 vnode_lock_spin(vp);
967 vp->v_flag &= ~VFMLINKTARGET;
968 vp->v_fmlink = NULLVP;
969 vnode_unlock(vp);
970 NAME_CACHE_UNLOCK();
971
972 /*
973 * vnode_rele can result in cascading series of
974 * usecount releases. The combination of calling
975 * vnode_recycle and dont_reenter (3rd arg to
976 * vnode_rele_internal) ensures we don't have
977 * that issue.
978 */
979 vnode_recycle(old_fvp);
980 vnode_rele_internal(old_fvp, O_EVTONLY, 1, 0);
981
982 NAME_CACHE_LOCK();
983 }
984 }
985#endif
986
0a7de745
A
987 if ((flags & VNODE_UPDATE_PURGE)) {
988 if (vp->v_parent) {
b0d623f7 989 vp->v_parent->v_nc_generation++;
0a7de745 990 }
b0d623f7 991
0a7de745 992 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
b0d623f7 993 cache_delete(ncp, 1);
0a7de745 994 }
b0d623f7 995
0a7de745 996 while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
b0d623f7 997 cache_delete(ncp, 1);
0a7de745 998 }
b0d623f7
A
999
1000 /*
1001 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1002 */
1003 tcred = vp->v_cred;
1004 vp->v_cred = NOCRED;
1005 vp->v_authorized_actions = 0;
813fb2f6 1006 vp->v_cred_timestamp = 0;
91447636 1007 }
0a7de745 1008 if ((flags & VNODE_UPDATE_NAME)) {
b0d623f7
A
1009 vname = vp->v_name;
1010 vp->v_name = tname;
91447636 1011 }
b0d623f7
A
1012 if (flags & VNODE_UPDATE_PARENT) {
1013 if (dvp != vp && dvp != vp->v_parent) {
1014 old_parentvp = vp->v_parent;
1015 vp->v_parent = dvp;
1016 dvp = NULLVP;
91447636 1017
0a7de745 1018 if (old_parentvp) {
b0d623f7 1019 flags |= VNODE_UPDATE_CACHE;
0a7de745 1020 }
b0d623f7 1021 }
91447636 1022 }
b0d623f7 1023 if (flags & VNODE_UPDATE_CACHE) {
0a7de745 1024 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
b0d623f7 1025 cache_delete(ncp, 1);
0a7de745 1026 }
b0d623f7
A
1027 }
1028 NAME_CACHE_UNLOCK();
0a7de745
A
1029
1030 if (vname != NULL) {
b0d623f7 1031 vfs_removename(vname);
0a7de745 1032 }
b0d623f7 1033
0a7de745 1034 if (IS_VALID_CRED(tcred)) {
b0d623f7 1035 kauth_cred_unref(&tcred);
0a7de745 1036 }
b0d623f7 1037 }
2d21ac55 1038 if (dvp != NULLVP) {
2d21ac55
A
1039 /* Back-out the ref we took if we lost a race for vp->v_parent. */
1040 if (kusecountbumped) {
1041 vnode_lock_spin(dvp);
0a7de745
A
1042 if (dvp->v_kusecount > 0) {
1043 --dvp->v_kusecount;
1044 }
2d21ac55
A
1045 vnode_unlock(dvp);
1046 }
0a7de745 1047 vnode_rele(dvp);
2d21ac55 1048 }
91447636 1049 if (old_parentvp) {
0a7de745 1050 struct uthread *ut;
91447636 1051
2d21ac55 1052 if (isstream) {
0a7de745
A
1053 vnode_lock_spin(old_parentvp);
1054 if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) {
2d21ac55 1055 --old_parentvp->v_kusecount;
0a7de745 1056 }
2d21ac55
A
1057 vnode_unlock(old_parentvp);
1058 }
0a7de745 1059 ut = get_bsdthread_info(current_thread());
91447636
A
1060
1061 /*
1062 * indicated to vnode_rele that it shouldn't do a
1063 * vnode_reclaim at this time... instead it will
1064 * chain the vnode to the uu_vreclaims list...
1065 * we'll be responsible for calling vnode_reclaim
1066 * on each of the vnodes in this list...
1067 */
1068 ut->uu_defer_reclaims = 1;
1069 ut->uu_vreclaims = NULLVP;
1070
0a7de745
A
1071 while ((vp = old_parentvp) != NULLVP) {
1072 vnode_lock_spin(vp);
91447636
A
1073 vnode_rele_internal(vp, 0, 0, 1);
1074
1075 /*
1076 * check to see if the vnode is now in the state
1077 * that would have triggered a vnode_reclaim in vnode_rele
1078 * if it is, we save it's parent pointer and then NULL
1079 * out the v_parent field... we'll drop the reference
1080 * that was held on the next iteration of this loop...
1081 * this short circuits a potential deep recursion if we
0a7de745 1082 * have a long chain of parents in this state...
91447636
A
1083 * we'll sit in this loop until we run into
1084 * a parent in this chain that is not in this state
1085 *
b0d623f7 1086 * make our check and the vnode_rele atomic
91447636
A
1087 * with respect to the current vnode we're working on
1088 * by holding the vnode lock
1089 * if vnode_rele deferred the vnode_reclaim and has put
1090 * this vnode on the list to be reaped by us, than
1091 * it has left this vnode with an iocount == 1
1092 */
0a7de745
A
1093 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1094 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1095 /*
91447636
A
1096 * vnode_rele wanted to do a vnode_reclaim on this vnode
1097 * it should be sitting on the head of the uu_vreclaims chain
1098 * pull the parent pointer now so that when we do the
1099 * vnode_reclaim for each of the vnodes in the uu_vreclaims
1100 * list, we won't recurse back through here
2d21ac55
A
1101 *
1102 * need to do a convert here in case vnode_rele_internal
0a7de745 1103 * returns with the lock held in the spin mode... it
2d21ac55 1104 * can drop and retake the lock under certain circumstances
91447636 1105 */
0a7de745 1106 vnode_lock_convert(vp);
2d21ac55 1107
0a7de745 1108 NAME_CACHE_LOCK();
91447636
A
1109 old_parentvp = vp->v_parent;
1110 vp->v_parent = NULLVP;
0c530ab8 1111 NAME_CACHE_UNLOCK();
91447636 1112 } else {
0a7de745 1113 /*
91447636
A
1114 * we're done... we ran into a vnode that isn't
1115 * being terminated
1116 */
0a7de745 1117 old_parentvp = NULLVP;
91447636
A
1118 }
1119 vnode_unlock(vp);
1120 }
1121 ut->uu_defer_reclaims = 0;
1122
0a7de745
A
1123 while ((vp = ut->uu_vreclaims) != NULLVP) {
1124 ut->uu_vreclaims = vp->v_defer_reclaimlist;
1125
91447636
A
1126 /*
1127 * vnode_put will drive the vnode_reclaim if
1128 * we are still the only reference on this vnode
1129 */
1130 vnode_put(vp);
1131 }
1132 }
1c79356b 1133}
91447636 1134
cb323159
A
1135#if CONFIG_FIRMLINKS
1136errno_t
1137vnode_setasfirmlink(vnode_t vp, vnode_t target_vp)
1138{
1139 int error = 0;
1140 vnode_t old_target_vp = NULLVP;
1141 vnode_t old_target_vp_v_fmlink = NULLVP;
1142 kauth_cred_t target_vp_cred = NULL;
1143 kauth_cred_t old_target_vp_cred = NULL;
1144
1145 if (!vp) {
1146 return EINVAL;
1147 }
1148
1149 if (target_vp) {
1150 if (vp->v_fmlink == target_vp) { /* Will be checked again under the name cache lock */
1151 return 0;
1152 }
1153
1154 /*
1155 * Firmlink source and target will take both a usecount
1156 * and kusecount on each other.
1157 */
1158 if ((error = vnode_ref_ext(target_vp, O_EVTONLY, VNODE_REF_FORCE))) {
1159 return error;
1160 }
1161
1162 if ((error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE))) {
1163 vnode_rele_ext(target_vp, O_EVTONLY, 1);
1164 return error;
1165 }
1166 }
1167
1168 NAME_CACHE_LOCK();
1169
1170 old_target_vp = vp->v_fmlink;
1171 if (target_vp && (target_vp == old_target_vp)) {
1172 NAME_CACHE_UNLOCK();
1173 return 0;
1174 }
1175 vp->v_fmlink = target_vp;
1176
1177 vnode_lock_spin(vp);
1178 vp->v_flag &= ~VFMLINKTARGET;
1179 vnode_unlock(vp);
1180
1181 if (target_vp) {
1182 target_vp->v_fmlink = vp;
1183 vnode_lock_spin(target_vp);
1184 target_vp->v_flag |= VFMLINKTARGET;
1185 vnode_unlock(target_vp);
1186 cache_purge_locked(vp, &target_vp_cred);
1187 }
1188
1189 if (old_target_vp) {
1190 old_target_vp_v_fmlink = old_target_vp->v_fmlink;
1191 old_target_vp->v_fmlink = NULLVP;
1192 vnode_lock_spin(old_target_vp);
1193 old_target_vp->v_flag &= ~VFMLINKTARGET;
1194 vnode_unlock(old_target_vp);
1195 cache_purge_locked(vp, &old_target_vp_cred);
1196 }
1197
1198 NAME_CACHE_UNLOCK();
1199
1200 if (target_vp_cred && IS_VALID_CRED(target_vp_cred)) {
1201 kauth_cred_unref(&target_vp_cred);
1202 }
1203
1204 if (old_target_vp) {
1205 if (old_target_vp_cred && IS_VALID_CRED(old_target_vp_cred)) {
1206 kauth_cred_unref(&old_target_vp_cred);
1207 }
1208
1209 vnode_rele_ext(old_target_vp, O_EVTONLY, 1);
1210 if (old_target_vp_v_fmlink) {
1211 vnode_rele_ext(old_target_vp_v_fmlink, O_EVTONLY, 1);
1212 }
1213 }
1214
1215 return 0;
1216}
1217
1218errno_t
1219vnode_getfirmlink(vnode_t vp, vnode_t *target_vp)
1220{
1221 int error;
1222
1223 if (!vp->v_fmlink) {
1224 return ENODEV;
1225 }
1226
1227 NAME_CACHE_LOCK_SHARED();
1228 if (vp->v_fmlink && !(vp->v_flag & VFMLINKTARGET) &&
1229 (vnode_get(vp->v_fmlink) == 0)) {
1230 vnode_t tvp = vp->v_fmlink;
1231
1232 vnode_lock_spin(tvp);
1233 if (tvp->v_lflag & (VL_TERMINATE | VL_DEAD)) {
1234 vnode_unlock(tvp);
1235 NAME_CACHE_UNLOCK();
1236 vnode_put(tvp);
1237 return ENOENT;
1238 }
1239 if (!(tvp->v_flag & VFMLINKTARGET)) {
1240 panic("firmlink target for vnode %p does not have flag set", vp);
1241 }
1242 vnode_unlock(tvp);
1243 *target_vp = tvp;
1244 error = 0;
1245 } else {
1246 *target_vp = NULLVP;
1247 error = ENODEV;
1248 }
1249 NAME_CACHE_UNLOCK();
1250 return error;
1251}
1252
1253#else /* CONFIG_FIRMLINKS */
1254
1255errno_t
1256vnode_setasfirmlink(__unused vnode_t vp, __unused vnode_t src_vp)
1257{
1258 return ENOTSUP;
1259}
1260
1261errno_t
1262vnode_getfirmlink(__unused vnode_t vp, __unused vnode_t *target_vp)
1263{
1264 return ENOTSUP;
1265}
1266
1267#endif
1c79356b
A
1268
1269/*
91447636
A
1270 * Mark a vnode as having multiple hard links. HFS makes use of this
1271 * because it keeps track of each link separately, and wants to know
1272 * which link was actually used.
1273 *
1274 * This will cause the name cache to force a VNOP_LOOKUP on the vnode
1275 * so that HFS can post-process the lookup. Also, volfs will call
1276 * VNOP_GETATTR2 to determine the parent, instead of using v_parent.
1c79356b 1277 */
0a7de745
A
1278void
1279vnode_setmultipath(vnode_t vp)
91447636 1280{
2d21ac55 1281 vnode_lock_spin(vp);
91447636
A
1282
1283 /*
1284 * In theory, we're changing the vnode's identity as far as the
1285 * name cache is concerned, so we ought to grab the name cache lock
1286 * here. However, there is already a race, and grabbing the name
1287 * cache lock only makes the race window slightly smaller.
1288 *
1289 * The race happens because the vnode already exists in the name
1290 * cache, and could be found by one thread before another thread
1291 * can set the hard link flag.
1292 */
1293
1294 vp->v_flag |= VISHARDLINK;
1295
1296 vnode_unlock(vp);
1297}
1298
1299
2d21ac55
A
1300
1301/*
1302 * backwards compatibility
1303 */
0a7de745
A
1304void
1305vnode_uncache_credentials(vnode_t vp)
91447636 1306{
0a7de745 1307 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
91447636
A
1308}
1309
1310
2d21ac55
A
1311/*
1312 * use the exclusive form of NAME_CACHE_LOCK to protect the update of the
1313 * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions
1314 * we use this lock so that we can look at the v_cred and v_authorized_actions
1315 * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path',
1316 * which is the super-hot path... if we are updating the authorized actions for this
1317 * vnode, we are already in the super-slow and far less frequented path so its not
1318 * that bad that we take the lock exclusive for this case... of course we strive
1319 * to hold it for the minimum amount of time possible
1320 */
91447636 1321
0a7de745
A
1322void
1323vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action)
2d21ac55 1324{
0a7de745 1325 kauth_cred_t tcred = NOCRED;
91447636 1326
2d21ac55 1327 NAME_CACHE_LOCK();
91447636 1328
2d21ac55 1329 vp->v_authorized_actions &= ~action;
91447636 1330
2d21ac55
A
1331 if (action == KAUTH_INVALIDATE_CACHED_RIGHTS &&
1332 IS_VALID_CRED(vp->v_cred)) {
0a7de745 1333 /*
2d21ac55
A
1334 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1335 */
0a7de745 1336 tcred = vp->v_cred;
2d21ac55 1337 vp->v_cred = NOCRED;
91447636 1338 }
2d21ac55 1339 NAME_CACHE_UNLOCK();
0c530ab8 1340
0a7de745 1341 if (tcred != NOCRED) {
0c530ab8 1342 kauth_cred_unref(&tcred);
0a7de745 1343 }
91447636
A
1344}
1345
2d21ac55 1346
0a7de745 1347extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */
6d2010ae
A
1348
1349boolean_t
1350vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
91447636 1351{
0a7de745
A
1352 kauth_cred_t ucred;
1353 boolean_t retval = FALSE;
91447636 1354
6d2010ae 1355 /* Boot argument to defeat rights caching */
0a7de745 1356 if (bootarg_vnode_cache_defeat) {
6d2010ae 1357 return FALSE;
0a7de745 1358 }
6d2010ae 1359
0a7de745
A
1360 if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1361 /*
2d21ac55
A
1362 * a TTL is enabled on the rights cache... handle it here
1363 * a TTL of 0 indicates that no rights should be cached
1364 */
0a7de745
A
1365 if (vp->v_mount->mnt_authcache_ttl) {
1366 if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1367 /*
2d21ac55
A
1368 * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones),
1369 * we will only allow a SEARCH right on a directory to be cached...
1370 * that cached right always has a default TTL associated with it
1371 */
0a7de745
A
1372 if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) {
1373 vp = NULLVP;
1374 }
2d21ac55
A
1375 }
1376 if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) {
0a7de745 1377 vnode_uncache_authorized_action(vp, vp->v_authorized_actions);
2d21ac55
A
1378 vp = NULLVP;
1379 }
0a7de745
A
1380 } else {
1381 vp = NULLVP;
1382 }
2d21ac55
A
1383 }
1384 if (vp != NULLVP) {
0a7de745 1385 ucred = vfs_context_ucred(ctx);
91447636 1386
2d21ac55 1387 NAME_CACHE_LOCK_SHARED();
91447636 1388
0a7de745
A
1389 if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) {
1390 retval = TRUE;
1391 }
1392
2d21ac55 1393 NAME_CACHE_UNLOCK();
91447636 1394 }
2d21ac55
A
1395 return retval;
1396}
91447636 1397
2d21ac55 1398
0a7de745
A
1399void
1400vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
2d21ac55
A
1401{
1402 kauth_cred_t tcred = NOCRED;
1403 kauth_cred_t ucred;
1404 struct timeval tv;
1405 boolean_t ttl_active = FALSE;
1406
1407 ucred = vfs_context_ucred(ctx);
1408
0a7de745
A
1409 if (!IS_VALID_CRED(ucred) || action == 0) {
1410 return;
1411 }
2d21ac55 1412
0a7de745
A
1413 if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1414 /*
2d21ac55
A
1415 * a TTL is enabled on the rights cache... handle it here
1416 * a TTL of 0 indicates that no rights should be cached
91447636 1417 */
0a7de745
A
1418 if (vp->v_mount->mnt_authcache_ttl == 0) {
1419 return;
1420 }
91447636 1421
0a7de745
A
1422 if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1423 /*
2d21ac55
A
1424 * only cache SEARCH action for filesystems marked
1425 * MNTK_AUTH_OPAQUE on VDIRs...
1426 * the lookup_path code will time these out
1427 */
0a7de745
A
1428 if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) {
1429 return;
1430 }
91447636 1431 }
2d21ac55 1432 ttl_active = TRUE;
91447636 1433
2d21ac55
A
1434 microuptime(&tv);
1435 }
1436 NAME_CACHE_LOCK();
91447636 1437
2d21ac55 1438 if (vp->v_cred != ucred) {
0a7de745
A
1439 kauth_cred_ref(ucred);
1440 /*
2d21ac55
A
1441 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1442 */
1443 tcred = vp->v_cred;
1444 vp->v_cred = ucred;
1445 vp->v_authorized_actions = 0;
1446 }
1447 if (ttl_active == TRUE && vp->v_authorized_actions == 0) {
0a7de745 1448 /*
2d21ac55
A
1449 * only reset the timestamnp on the
1450 * first authorization cached after the previous
1451 * timer has expired or we're switching creds...
0a7de745 1452 * 'vnode_cache_is_authorized' will clear the
2d21ac55
A
1453 * authorized actions if the TTL is active and
1454 * it has expired
1455 */
0a7de745 1456 vp->v_cred_timestamp = tv.tv_sec;
2d21ac55
A
1457 }
1458 vp->v_authorized_actions |= action;
91447636 1459
0c530ab8 1460 NAME_CACHE_UNLOCK();
91447636 1461
0a7de745 1462 if (IS_VALID_CRED(tcred)) {
2d21ac55 1463 kauth_cred_unref(&tcred);
0a7de745 1464 }
91447636
A
1465}
1466
2d21ac55 1467
0a7de745
A
1468boolean_t
1469vnode_cache_is_stale(vnode_t vp)
2d21ac55 1470{
0a7de745
A
1471 struct timeval tv;
1472 boolean_t retval;
2d21ac55
A
1473
1474 microuptime(&tv);
1475
0a7de745
A
1476 if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) {
1477 retval = TRUE;
1478 } else {
1479 retval = FALSE;
1480 }
2d21ac55
A
1481
1482 return retval;
1483}
1484
1485
1486
1487/*
1488 * Returns: 0 Success
4a3eedf9 1489 * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei.
0a7de745 1490 * This errno value should not be seen by anyone outside of the kernel.
2d21ac55 1491 */
0a7de745
A
1492int
1493cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
1494 vfs_context_t ctx, int *dp_authorized, vnode_t last_dp)
91447636 1495{
0a7de745
A
1496 char *cp; /* pointer into pathname argument */
1497 int vid;
1498 int vvid = 0; /* protected by vp != NULLVP */
1499 vnode_t vp = NULLVP;
1500 vnode_t tdp = NULLVP;
1501 kauth_cred_t ucred;
1502 boolean_t ttl_enabled = FALSE;
1503 struct timeval tv;
1504 mount_t mp;
1505 unsigned int hash;
1506 int error = 0;
1507 boolean_t dotdotchecked = FALSE;
91447636 1508
6d2010ae 1509#if CONFIG_TRIGGERS
0a7de745 1510 vnode_t trigger_vp;
6d2010ae
A
1511#endif /* CONFIG_TRIGGERS */
1512
2d21ac55 1513 ucred = vfs_context_ucred(ctx);
6d2010ae 1514 ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
91447636 1515
0c530ab8 1516 NAME_CACHE_LOCK_SHARED();
91447636 1517
0a7de745 1518 if (dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
2d21ac55 1519 ttl_enabled = TRUE;
91447636
A
1520 microuptime(&tv);
1521 }
1522 for (;;) {
39236c6e 1523 /*
91447636
A
1524 * Search a directory.
1525 *
1526 * The cn_hash value is for use by cache_lookup
1527 * The last component of the filename is left accessible via
1528 * cnp->cn_nameptr for callers that need the name.
1529 */
0a7de745 1530 hash = 0;
91447636
A
1531 cp = cnp->cn_nameptr;
1532
1533 while (*cp && (*cp != '/')) {
b0d623f7 1534 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
91447636
A
1535 }
1536 /*
1537 * the crc generator can legitimately generate
1538 * a 0... however, 0 for us means that we
1539 * haven't computed a hash, so use 1 instead
1540 */
0a7de745
A
1541 if (hash == 0) {
1542 hash = 1;
1543 }
91447636
A
1544 cnp->cn_hash = hash;
1545 cnp->cn_namelen = cp - cnp->cn_nameptr;
1546
1547 ndp->ni_pathlen -= cnp->cn_namelen;
1548 ndp->ni_next = cp;
1549
1550 /*
1551 * Replace multiple slashes by a single slash and trailing slashes
1552 * by a null. This must be done before VNOP_LOOKUP() because some
1553 * fs's don't know about trailing slashes. Remember if there were
1554 * trailing slashes to handle symlinks, existing non-directories
1555 * and non-existing files that won't be directories specially later.
1556 */
1557 while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) {
0a7de745 1558 cp++;
91447636
A
1559 ndp->ni_pathlen--;
1560
1561 if (*cp == '\0') {
0a7de745 1562 ndp->ni_flag |= NAMEI_TRAILINGSLASH;
91447636
A
1563 *ndp->ni_next = '\0';
1564 }
1565 }
1566 ndp->ni_next = cp;
1567
1568 cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT);
1569
0a7de745
A
1570 if (*cp == '\0') {
1571 cnp->cn_flags |= ISLASTCN;
1572 }
91447636 1573
0a7de745
A
1574 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') {
1575 cnp->cn_flags |= ISDOTDOT;
1576 }
91447636
A
1577
1578 *dp_authorized = 0;
2d21ac55
A
1579#if NAMEDRSRCFORK
1580 /*
1581 * Process a request for a file's resource fork.
1582 *
1583 * Consume the _PATH_RSRCFORKSPEC suffix and tag the path.
1584 */
1585 if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) &&
1586 (cp[1] == '.' && cp[2] == '.') &&
1587 bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) {
0a7de745 1588 /* Skip volfs file systems that don't support native streams. */
2d21ac55
A
1589 if ((dp->v_mount != NULL) &&
1590 (dp->v_mount->mnt_flag & MNT_DOVOLFS) &&
1591 (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) {
1592 goto skiprsrcfork;
1593 }
1594 cnp->cn_flags |= CN_WANTSRSRCFORK;
1595 cnp->cn_flags |= ISLASTCN;
1596 ndp->ni_next[0] = '\0';
1597 ndp->ni_pathlen = 1;
1598 }
1599skiprsrcfork:
1600#endif
91447636 1601
2d21ac55
A
1602#if CONFIG_MACF
1603
5ba3f43e 1604 /*
2d21ac55
A
1605 * Name cache provides authorization caching (see below)
1606 * that will short circuit MAC checks in lookup().
1607 * We must perform MAC check here. On denial
1608 * dp_authorized will remain 0 and second check will
1609 * be perfomed in lookup().
1610 */
1611 if (!(cnp->cn_flags & DONOTAUTH)) {
1612 error = mac_vnode_check_lookup(ctx, dp, cnp);
1613 if (error) {
b0d623f7 1614 NAME_CACHE_UNLOCK();
4a3eedf9 1615 goto errorout;
2d21ac55
A
1616 }
1617 }
1618#endif /* MAC */
813fb2f6
A
1619 if (ttl_enabled &&
1620 (dp->v_mount->mnt_authcache_ttl == 0 ||
1621 ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl))) {
0a7de745 1622 break;
813fb2f6 1623 }
91447636 1624
2d21ac55
A
1625 /*
1626 * NAME_CACHE_LOCK holds these fields stable
490019cf
A
1627 *
1628 * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly
39037602 1629 * so we make an ugly check for root here. root is always
490019cf
A
1630 * allowed and breaking out of here only to find out that is
1631 * authorized by virtue of being root is very very expensive.
813fb2f6
A
1632 * However, the check for not root is valid only for filesystems
1633 * which use local authorization.
1634 *
1635 * XXX: Remove the check for root when we can reliably set
1636 * KAUTH_VNODE_SEARCHBYANYONE as root.
2d21ac55
A
1637 */
1638 if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) &&
490019cf 1639 !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) &&
0a7de745
A
1640 (ttl_enabled || !vfs_context_issuser(ctx))) {
1641 break;
813fb2f6 1642 }
2d21ac55 1643
91447636
A
1644 /*
1645 * indicate that we're allowed to traverse this directory...
1646 * even if we fail the cache lookup or decide to bail for
1647 * some other reason, this information is valid and is used
1648 * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP
1649 */
1650 *dp_authorized = 1;
1651
0a7de745
A
1652 if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) {
1653 if (cnp->cn_nameiop != LOOKUP) {
6d2010ae 1654 break;
0a7de745
A
1655 }
1656 if (cnp->cn_flags & LOCKPARENT) {
6d2010ae 1657 break;
0a7de745
A
1658 }
1659 if (cnp->cn_flags & NOCACHE) {
6d2010ae 1660 break;
0a7de745 1661 }
743b1565 1662 if (cnp->cn_flags & ISDOTDOT) {
cb323159
A
1663#if CONFIG_FIRMLINKS
1664 if (dp->v_fmlink && (dp->v_flag & VFMLINKTARGET)) {
1665 dp = dp->v_fmlink;
1666 }
1667#endif
1668
2d21ac55
A
1669 /*
1670 * Force directory hardlinks to go to
1671 * file system for ".." requests.
1672 */
d9a64523 1673 if ((dp->v_flag & VISHARDLINK)) {
2d21ac55
A
1674 break;
1675 }
743b1565
A
1676 /*
1677 * Quit here only if we can't use
1678 * the parent directory pointer or
1679 * don't have one. Otherwise, we'll
1680 * use it below.
1681 */
0a7de745 1682 if ((dp->v_flag & VROOT) ||
6601e61a 1683 dp == ndp->ni_rootdir ||
0a7de745 1684 dp->v_parent == NULLVP) {
743b1565 1685 break;
0a7de745 1686 }
743b1565
A
1687 }
1688 }
1689
39236c6e
A
1690 if ((cnp->cn_flags & CN_SKIPNAMECACHE)) {
1691 /*
1692 * Force lookup to go to the filesystem with
1693 * all cnp fields set up.
1694 */
1695 break;
1696 }
1697
743b1565
A
1698 /*
1699 * "." and ".." aren't supposed to be cached, so check
1700 * for them before checking the cache.
1701 */
0a7de745 1702 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
743b1565 1703 vp = dp;
0a7de745 1704 } else if ((cnp->cn_flags & ISDOTDOT)) {
39037602
A
1705 /*
1706 * If this is a chrooted process, we need to check if
1707 * the process is trying to break out of its chrooted
1708 * jail. We do that by trying to determine if dp is
1709 * a subdirectory of ndp->ni_rootdir. If we aren't
1710 * able to determine that by the v_parent pointers, we
1711 * will leave the fast path.
1712 *
1713 * Since this function may see dotdot components
1714 * many times and it has the name cache lock held for
1715 * the entire duration, we optimise this by doing this
1716 * check only once per cache_lookup_path call.
1717 * If dotdotchecked is set, it means we've done this
1718 * check once already and don't need to do it again.
1719 */
1720 if (!dotdotchecked && (ndp->ni_rootdir != rootvnode)) {
1721 vnode_t tvp = dp;
1722 boolean_t defer = FALSE;
1723 boolean_t is_subdir = FALSE;
1724
1725 defer = cache_check_vnode_issubdir(tvp,
1726 ndp->ni_rootdir, &is_subdir, &tvp);
1727
1728 if (defer) {
1729 /* defer to Filesystem */
1730 break;
1731 } else if (!is_subdir) {
1732 /*
1733 * This process is trying to break out
1734 * of its chrooted jail, so all its
1735 * dotdot accesses will be translated to
1736 * its root directory.
1737 */
1738 vp = ndp->ni_rootdir;
1739 } else {
1740 /*
1741 * All good, let this dotdot access
1742 * proceed normally
1743 */
1744 vp = dp->v_parent;
1745 }
1746 dotdotchecked = TRUE;
1747 } else {
1748 vp = dp->v_parent;
1749 }
1750 } else {
0a7de745 1751 if ((vp = cache_lookup_locked(dp, cnp)) == NULLVP) {
743b1565 1752 break;
0a7de745 1753 }
91447636 1754
0a7de745 1755 if ((vp->v_flag & VISHARDLINK)) {
b0d623f7
A
1756 /*
1757 * The file system wants a VNOP_LOOKUP on this vnode
1758 */
1759 vp = NULL;
1760 break;
1761 }
1762 }
0a7de745
A
1763 if ((cnp->cn_flags & ISLASTCN)) {
1764 break;
1765 }
91447636
A
1766
1767 if (vp->v_type != VDIR) {
0a7de745
A
1768 if (vp->v_type != VLNK) {
1769 vp = NULL;
1770 }
1771 break;
91447636 1772 }
6d2010ae 1773
0a7de745 1774 if ((mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
3e170ce0
A
1775 vnode_t tmp_vp = mp->mnt_realrootvp;
1776 if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation ||
0a7de745 1777 mp->mnt_realrootvp_vid != tmp_vp->v_id) {
3e170ce0 1778 break;
0a7de745 1779 }
3e170ce0 1780 vp = tmp_vp;
2d21ac55 1781 }
6d2010ae
A
1782
1783#if CONFIG_TRIGGERS
1784 /*
1785 * After traversing all mountpoints stacked here, if we have a
0a7de745 1786 * trigger in hand, resolve it. Note that we don't need to
6d2010ae
A
1787 * leave the fast path if the mount has already happened.
1788 */
0a7de745 1789 if (vp->v_resolve) {
6d2010ae 1790 break;
0a7de745 1791 }
6d2010ae
A
1792#endif /* CONFIG_TRIGGERS */
1793
1794
91447636
A
1795 dp = vp;
1796 vp = NULLVP;
1797
1798 cnp->cn_nameptr = ndp->ni_next + 1;
1799 ndp->ni_pathlen--;
1800 while (*cnp->cn_nameptr == '/') {
0a7de745 1801 cnp->cn_nameptr++;
91447636
A
1802 ndp->ni_pathlen--;
1803 }
1804 }
0a7de745
A
1805 if (vp != NULLVP) {
1806 vvid = vp->v_id;
1807 }
91447636 1808 vid = dp->v_id;
0a7de745 1809
0c530ab8 1810 NAME_CACHE_UNLOCK();
91447636 1811
91447636
A
1812 if ((vp != NULLVP) && (vp->v_type != VLNK) &&
1813 ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
0a7de745
A
1814 /*
1815 * if we've got a child and it's the last component, and
91447636
A
1816 * the lookup doesn't need to return the parent then we
1817 * can skip grabbing an iocount on the parent, since all
1818 * we're going to do with it is a vnode_put just before
1819 * we return from 'lookup'. If it's a symbolic link,
1820 * we need the parent in case the link happens to be
1821 * a relative pathname.
1822 */
0a7de745
A
1823 tdp = dp;
1824 dp = NULLVP;
91447636
A
1825 } else {
1826need_dp:
4a3eedf9 1827 /*
91447636 1828 * return the last directory we looked at
4a3eedf9
A
1829 * with an io reference held. If it was the one passed
1830 * in as a result of the last iteration of VNOP_LOOKUP,
1831 * it should already hold an io ref. No need to increase ref.
91447636 1832 */
0a7de745 1833 if (last_dp != dp) {
4a3eedf9
A
1834 if (dp == ndp->ni_usedvp) {
1835 /*
1836 * if this vnode matches the one passed in via USEDVP
1837 * than this context already holds an io_count... just
1838 * use vnode_get to get an extra ref for lookup to play
1839 * with... can't use the getwithvid variant here because
1840 * it will block behind a vnode_drain which would result
1841 * in a deadlock (since we already own an io_count that the
1842 * vnode_drain is waiting on)... vnode_get grabs the io_count
1843 * immediately w/o waiting... it always succeeds
1844 */
1845 vnode_get(dp);
39236c6e 1846 } else if ((error = vnode_getwithvid_drainok(dp, vid))) {
4a3eedf9
A
1847 /*
1848 * failure indicates the vnode
1849 * changed identity or is being
1850 * TERMINATED... in either case
1851 * punt this lookup.
0a7de745 1852 *
4a3eedf9
A
1853 * don't necessarily return ENOENT, though, because
1854 * we really want to go back to disk and make sure it's
1855 * there or not if someone else is changing this
39236c6e
A
1856 * vnode. That being said, the one case where we do want
1857 * to return ENOENT is when the vnode's mount point is
1858 * in the process of unmounting and we might cause a deadlock
1859 * in our attempt to take an iocount. An ENODEV error return
1860 * is from vnode_get* is an indication this but we change that
1861 * ENOENT for upper layers.
4a3eedf9 1862 */
39236c6e
A
1863 if (error == ENODEV) {
1864 error = ENOENT;
1865 } else {
1866 error = ERECYCLE;
1867 }
4a3eedf9
A
1868 goto errorout;
1869 }
91447636
A
1870 }
1871 }
1872 if (vp != NULLVP) {
0a7de745
A
1873 if ((vnode_getwithvid_drainok(vp, vvid))) {
1874 vp = NULLVP;
91447636 1875
0a7de745 1876 /*
91447636
A
1877 * can't get reference on the vp we'd like
1878 * to return... if we didn't grab a reference
1879 * on the directory (due to fast path bypass),
1880 * then we need to do it now... we can't return
0a7de745 1881 * with both ni_dvp and ni_vp NULL, and no
91447636
A
1882 * error condition
1883 */
1884 if (dp == NULLVP) {
0a7de745 1885 dp = tdp;
91447636
A
1886 goto need_dp;
1887 }
1888 }
1889 }
6d2010ae 1890
91447636
A
1891 ndp->ni_dvp = dp;
1892 ndp->ni_vp = vp;
1893
6d2010ae
A
1894#if CONFIG_TRIGGERS
1895 trigger_vp = vp ? vp : dp;
1896 if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) {
1897 error = vnode_trigger_resolve(trigger_vp, ndp, ctx);
1898 if (error) {
0a7de745 1899 if (vp) {
6d2010ae 1900 vnode_put(vp);
0a7de745
A
1901 }
1902 if (dp) {
6d2010ae 1903 vnode_put(dp);
0a7de745 1904 }
6d2010ae
A
1905 goto errorout;
1906 }
0a7de745 1907 }
6d2010ae
A
1908#endif /* CONFIG_TRIGGERS */
1909
4a3eedf9 1910errorout:
0a7de745 1911 /*
4a3eedf9
A
1912 * If we came into cache_lookup_path after an iteration of the lookup loop that
1913 * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref
0a7de745 1914 * on it. It is now the job of cache_lookup_path to drop the ref on this vnode
4a3eedf9
A
1915 * when it is no longer needed. If we get to this point, and last_dp is not NULL
1916 * and it is ALSO not the dvp we want to return to caller of this function, it MUST be
0a7de745 1917 * the case that we got to a subsequent path component and this previous vnode is
4a3eedf9
A
1918 * no longer needed. We can then drop the io ref on it.
1919 */
0a7de745 1920 if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) {
4a3eedf9
A
1921 vnode_put(last_dp);
1922 }
0a7de745 1923
4a3eedf9
A
1924 //initialized to 0, should be the same if no error cases occurred.
1925 return error;
91447636
A
1926}
1927
1928
1929static vnode_t
1930cache_lookup_locked(vnode_t dvp, struct componentname *cnp)
1931{
2d21ac55
A
1932 struct namecache *ncp;
1933 struct nchashhead *ncpp;
1934 long namelen = cnp->cn_namelen;
fe8ab488 1935 unsigned int hashval = cnp->cn_hash;
0a7de745 1936
6d2010ae
A
1937 if (nc_disabled) {
1938 return NULL;
1939 }
1940
91447636
A
1941 ncpp = NCHHASH(dvp, cnp->cn_hash);
1942 LIST_FOREACH(ncp, ncpp, nc_hash) {
0a7de745
A
1943 if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
1944 if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
1945 break;
1946 }
91447636
A
1947 }
1948 }
0c530ab8 1949 if (ncp == 0) {
91447636
A
1950 /*
1951 * We failed to find an entry
1952 */
0c530ab8 1953 NCHSTAT(ncs_miss);
0a7de745 1954 return NULL;
0c530ab8
A
1955 }
1956 NCHSTAT(ncs_goodhits);
91447636 1957
0a7de745 1958 return ncp->nc_vp;
1c79356b
A
1959}
1960
55e303ae 1961
39236c6e 1962unsigned int hash_string(const char *cp, int len);
55e303ae
A
1963//
1964// Have to take a len argument because we may only need to
1965// hash part of a componentname.
1966//
39236c6e 1967unsigned int
91447636 1968hash_string(const char *cp, int len)
55e303ae 1969{
0a7de745
A
1970 unsigned hash = 0;
1971
1972 if (len) {
1973 while (len--) {
1974 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1975 }
1976 } else {
1977 while (*cp != '\0') {
1978 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1979 }
1980 }
1981 /*
1982 * the crc generator can legitimately generate
1983 * a 0... however, 0 for us means that we
1984 * haven't computed a hash, so use 1 instead
1985 */
1986 if (hash == 0) {
1987 hash = 1;
1988 }
1989 return hash;
55e303ae
A
1990}
1991
1992
1c79356b 1993/*
0a7de745 1994 * Lookup an entry in the cache
1c79356b 1995 *
0a7de745 1996 * We don't do this if the segment name is long, simply so the cache
1c79356b
A
1997 * can avoid holding long names (which would either waste space, or
1998 * add greatly to the complexity).
1999 *
2000 * Lookup is called with dvp pointing to the directory to search,
2001 * cnp pointing to the name of the entry being sought. If the lookup
2002 * succeeds, the vnode is returned in *vpp, and a status of -1 is
2003 * returned. If the lookup determines that the name does not exist
2004 * (negative cacheing), a status of ENOENT is returned. If the lookup
2005 * fails, a status of zero is returned.
2006 */
2007
2008int
2d21ac55 2009cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
1c79356b 2010{
2d21ac55
A
2011 struct namecache *ncp;
2012 struct nchashhead *ncpp;
2013 long namelen = cnp->cn_namelen;
b0d623f7 2014 unsigned int hashval;
0a7de745 2015 boolean_t have_exclusive = FALSE;
91447636 2016 uint32_t vid;
0a7de745 2017 vnode_t vp;
1c79356b 2018
0a7de745 2019 if (cnp->cn_hash == 0) {
b0d623f7 2020 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
0a7de745 2021 }
fe8ab488 2022 hashval = cnp->cn_hash;
b0d623f7 2023
6d2010ae
A
2024 if (nc_disabled) {
2025 return 0;
2026 }
2027
0c530ab8 2028 NAME_CACHE_LOCK_SHARED();
1c79356b 2029
0c530ab8 2030relook:
b0d623f7 2031 ncpp = NCHHASH(dvp, cnp->cn_hash);
91447636 2032 LIST_FOREACH(ncp, ncpp, nc_hash) {
0a7de745
A
2033 if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2034 if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2035 break;
2036 }
55e303ae 2037 }
1c79356b 2038 }
1c79356b
A
2039 /* We failed to find an entry */
2040 if (ncp == 0) {
0c530ab8
A
2041 NCHSTAT(ncs_miss);
2042 NAME_CACHE_UNLOCK();
0a7de745 2043 return 0;
1c79356b
A
2044 }
2045
2046 /* We don't want to have an entry, so dump it */
2047 if ((cnp->cn_flags & MAKEENTRY) == 0) {
0a7de745
A
2048 if (have_exclusive == TRUE) {
2049 NCHSTAT(ncs_badhits);
0c530ab8
A
2050 cache_delete(ncp, 1);
2051 NAME_CACHE_UNLOCK();
0a7de745 2052 return 0;
0c530ab8
A
2053 }
2054 NAME_CACHE_UNLOCK();
2055 NAME_CACHE_LOCK();
2056 have_exclusive = TRUE;
2057 goto relook;
0a7de745 2058 }
91447636 2059 vp = ncp->nc_vp;
1c79356b
A
2060
2061 /* We found a "positive" match, return the vnode */
0a7de745 2062 if (vp) {
0c530ab8 2063 NCHSTAT(ncs_goodhits);
91447636
A
2064
2065 vid = vp->v_id;
0c530ab8 2066 NAME_CACHE_UNLOCK();
91447636
A
2067
2068 if (vnode_getwithvid(vp, vid)) {
0c530ab8 2069#if COLLECT_STATS
0a7de745 2070 NAME_CACHE_LOCK();
0c530ab8
A
2071 NCHSTAT(ncs_badvid);
2072 NAME_CACHE_UNLOCK();
2073#endif
0a7de745 2074 return 0;
91447636
A
2075 }
2076 *vpp = vp;
0a7de745 2077 return -1;
1c79356b
A
2078 }
2079
2080 /* We found a negative match, and want to create it, so purge */
91447636 2081 if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
0a7de745
A
2082 if (have_exclusive == TRUE) {
2083 NCHSTAT(ncs_badhits);
0c530ab8
A
2084 cache_delete(ncp, 1);
2085 NAME_CACHE_UNLOCK();
0a7de745 2086 return 0;
0c530ab8
A
2087 }
2088 NAME_CACHE_UNLOCK();
2089 NAME_CACHE_LOCK();
2090 have_exclusive = TRUE;
2091 goto relook;
1c79356b
A
2092 }
2093
2094 /*
2095 * We found a "negative" match, ENOENT notifies client of this match.
1c79356b 2096 */
0c530ab8 2097 NCHSTAT(ncs_neghits);
91447636 2098
0c530ab8 2099 NAME_CACHE_UNLOCK();
0a7de745 2100 return ENOENT;
1c79356b
A
2101}
2102
b0d623f7
A
2103const char *
2104cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp)
2105{
2106 const char *strname;
2107
0a7de745
A
2108 if (cnp->cn_hash == 0) {
2109 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2110 }
b0d623f7
A
2111
2112 /*
2113 * grab 2 references on the string entered
2114 * one for the cache_enter_locked to consume
2115 * and the second to be consumed by v_name (vnode_create call point)
2116 */
2117 strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0);
2118
2119 NAME_CACHE_LOCK();
2120
2121 cache_enter_locked(dvp, vp, cnp, strname);
2122
2123 NAME_CACHE_UNLOCK();
2124
0a7de745 2125 return strname;
b0d623f7
A
2126}
2127
2d21ac55 2128
1c79356b 2129/*
2d21ac55
A
2130 * Add an entry to the cache...
2131 * but first check to see if the directory
2132 * that this entry is to be associated with has
2133 * had any cache_purges applied since we took
2134 * our identity snapshot... this check needs to
2135 * be done behind the name cache lock
1c79356b
A
2136 */
2137void
2d21ac55 2138cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen)
1c79356b 2139{
0a7de745
A
2140 if (cnp->cn_hash == 0) {
2141 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2142 }
91447636 2143
0c530ab8 2144 NAME_CACHE_LOCK();
1c79356b 2145
0a7de745
A
2146 if (dvp->v_nc_generation == gen) {
2147 (void)cache_enter_locked(dvp, vp, cnp, NULL);
2148 }
2d21ac55
A
2149
2150 NAME_CACHE_UNLOCK();
2151}
2152
2153
2154/*
2155 * Add an entry to the cache.
2156 */
2157void
2158cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2159{
b0d623f7
A
2160 const char *strname;
2161
0a7de745
A
2162 if (cnp->cn_hash == 0) {
2163 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2164 }
2d21ac55 2165
b0d623f7
A
2166 /*
2167 * grab 1 reference on the string entered
2168 * for the cache_enter_locked to consume
2169 */
2170 strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2171
2d21ac55
A
2172 NAME_CACHE_LOCK();
2173
b0d623f7 2174 cache_enter_locked(dvp, vp, cnp, strname);
2d21ac55
A
2175
2176 NAME_CACHE_UNLOCK();
2177}
2178
2179
2180static void
b0d623f7 2181cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname)
2d21ac55 2182{
0a7de745 2183 struct namecache *ncp, *negp;
2d21ac55
A
2184 struct nchashhead *ncpp;
2185
0a7de745 2186 if (nc_disabled) {
6d2010ae 2187 return;
0a7de745 2188 }
6d2010ae 2189
2d21ac55
A
2190 /*
2191 * if the entry is for -ve caching vp is null
2192 */
91447636 2193 if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) {
0a7de745 2194 /*
91447636
A
2195 * someone beat us to the punch..
2196 * this vnode is already in the cache
2197 */
0a7de745 2198 if (strname != NULL) {
b0d623f7 2199 vfs_removename(strname);
0a7de745 2200 }
0c530ab8 2201 return;
91447636 2202 }
1c79356b
A
2203 /*
2204 * We allocate a new entry if we are less than the maximum
91447636
A
2205 * allowed and the one at the front of the list is in use.
2206 * Otherwise we use the one at the front of the list.
1c79356b 2207 */
91447636
A
2208 if (numcache < desiredNodes &&
2209 ((ncp = nchead.tqh_first) == NULL ||
0a7de745 2210 ncp->nc_hash.le_prev != 0)) {
91447636
A
2211 /*
2212 * Allocate one more entry
2213 */
b0d623f7 2214 ncp = (struct namecache *)_MALLOC_ZONE(sizeof(*ncp), M_CACHE, M_WAITOK);
1c79356b 2215 numcache++;
91447636
A
2216 } else {
2217 /*
2218 * reuse an old entry
2219 */
0a7de745 2220 ncp = TAILQ_FIRST(&nchead);
91447636
A
2221 TAILQ_REMOVE(&nchead, ncp, nc_entry);
2222
1c79356b 2223 if (ncp->nc_hash.le_prev != 0) {
0a7de745
A
2224 /*
2225 * still in use... we need to
2226 * delete it before re-using it
2227 */
0c530ab8 2228 NCHSTAT(ncs_stolen);
91447636 2229 cache_delete(ncp, 0);
1c79356b 2230 }
1c79356b 2231 }
0c530ab8 2232 NCHSTAT(ncs_enters);
1c79356b
A
2233
2234 /*
2235 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
1c79356b
A
2236 */
2237 ncp->nc_vp = vp;
1c79356b 2238 ncp->nc_dvp = dvp;
91447636 2239 ncp->nc_hashval = cnp->cn_hash;
91447636 2240
0a7de745 2241 if (strname == NULL) {
b0d623f7 2242 ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
0a7de745 2243 } else {
b0d623f7 2244 ncp->nc_name = strname;
0a7de745 2245 }
3e170ce0
A
2246
2247 //
2248 // If the bytes of the name associated with the vnode differ,
2249 // use the name associated with the vnode since the file system
2250 // may have set that explicitly in the case of a lookup on a
2251 // case-insensitive file system where the case of the looked up
2252 // name differs from what is on disk. For more details, see:
2253 // <rdar://problem/8044697> FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories
0a7de745 2254 //
3e170ce0
A
2255 const char *vn_name = vp ? vp->v_name : NULL;
2256 unsigned int len = vn_name ? strlen(vn_name) : 0;
2257 if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) {
2258 unsigned int hash = hash_string(vn_name, len);
0a7de745 2259
3e170ce0
A
2260 vfs_removename(ncp->nc_name);
2261 ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0);
2262 ncp->nc_hashval = hash;
2263 }
2264
91447636
A
2265 /*
2266 * make us the newest entry in the cache
2267 * i.e. we'll be the last to be stolen
2268 */
2269 TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry);
2270
55e303ae 2271 ncpp = NCHHASH(dvp, cnp->cn_hash);
1c79356b
A
2272#if DIAGNOSTIC
2273 {
2d21ac55 2274 struct namecache *p;
1c79356b 2275
0a7de745
A
2276 for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) {
2277 if (p == ncp) {
1c79356b 2278 panic("cache_enter: duplicate");
0a7de745
A
2279 }
2280 }
1c79356b
A
2281 }
2282#endif
91447636
A
2283 /*
2284 * make us available to be found via lookup
2285 */
1c79356b 2286 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
91447636
A
2287
2288 if (vp) {
0a7de745
A
2289 /*
2290 * add to the list of name cache entries
2291 * that point at vp
2292 */
91447636
A
2293 LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link);
2294 } else {
0a7de745 2295 /*
91447636 2296 * this is a negative cache entry (vp == NULL)
fe8ab488 2297 * stick it on the negative cache list.
91447636 2298 */
0a7de745
A
2299 TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry);
2300
0c530ab8 2301 ncs_negtotal++;
91447636 2302
0c530ab8 2303 if (ncs_negtotal > desiredNegNodes) {
0a7de745
A
2304 /*
2305 * if we've reached our desired limit
2306 * of negative cache entries, delete
2307 * the oldest
2308 */
2309 negp = TAILQ_FIRST(&neghead);
91447636
A
2310 cache_delete(negp, 1);
2311 }
2312 }
2313 /*
2314 * add us to the list of name cache entries that
2315 * are children of dvp
2316 */
0a7de745 2317 if (vp) {
39037602 2318 TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child);
0a7de745 2319 } else {
39037602 2320 TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child);
0a7de745 2321 }
1c79356b
A
2322}
2323
91447636
A
2324
2325/*
2326 * Initialize CRC-32 remainder table.
2327 */
0a7de745
A
2328static void
2329init_crc32(void)
91447636 2330{
0a7de745 2331 /*
91447636
A
2332 * the CRC-32 generator polynomial is:
2333 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10
2334 * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
2335 */
0a7de745
A
2336 unsigned int crc32_polynomial = 0x04c11db7;
2337 unsigned int i, j;
91447636
A
2338
2339 /*
2340 * pre-calculate the CRC-32 remainder for each possible octet encoding
2341 */
0a7de745
A
2342 for (i = 0; i < 256; i++) {
2343 unsigned int crc_rem = i << 24;
91447636 2344
0a7de745
A
2345 for (j = 0; j < 8; j++) {
2346 if (crc_rem & 0x80000000) {
2347 crc_rem = (crc_rem << 1) ^ crc32_polynomial;
2348 } else {
2349 crc_rem = (crc_rem << 1);
2350 }
91447636
A
2351 }
2352 crc32tab[i] = crc_rem;
2353 }
2354}
2355
2356
1c79356b
A
2357/*
2358 * Name cache initialization, from vfs_init() when we are booting
2359 */
2360void
91447636
A
2361nchinit(void)
2362{
0a7de745 2363 int i;
b0d623f7 2364
91447636
A
2365 desiredNegNodes = (desiredvnodes / 10);
2366 desiredNodes = desiredvnodes + desiredNegNodes;
2367
2368 TAILQ_INIT(&nchead);
2369 TAILQ_INIT(&neghead);
2370
2371 init_crc32();
2372
0a7de745 2373 nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash);
91447636
A
2374 nchashmask = nchash;
2375 nchash++;
2376
2377 init_string_table();
0a7de745 2378
b0d623f7 2379 /* Allocate name cache lock group attribute and group */
0a7de745
A
2380 namecache_lck_grp_attr = lck_grp_attr_alloc_init();
2381
2382 namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr);
91447636 2383
b0d623f7 2384 /* Allocate name cache lock attribute */
91447636 2385 namecache_lck_attr = lck_attr_alloc_init();
91447636 2386
b0d623f7 2387 /* Allocate name cache lock */
0c530ab8 2388 namecache_rw_lock = lck_rw_alloc_init(namecache_lck_grp, namecache_lck_attr);
91447636
A
2389
2390
b0d623f7 2391 /* Allocate string cache lock group attribute and group */
0a7de745
A
2392 strcache_lck_grp_attr = lck_grp_attr_alloc_init();
2393
2394 strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr);
b0d623f7 2395
b0d623f7
A
2396 /* Allocate string cache lock attribute */
2397 strcache_lck_attr = lck_attr_alloc_init();
2398
2399 /* Allocate string cache lock */
2400 strtable_rw_lock = lck_rw_alloc_init(strcache_lck_grp, strcache_lck_attr);
2401
0a7de745 2402 for (i = 0; i < NUM_STRCACHE_LOCKS; i++) {
b0d623f7 2403 lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr);
0a7de745 2404 }
91447636
A
2405}
2406
0c530ab8
A
2407void
2408name_cache_lock_shared(void)
2409{
2410 lck_rw_lock_shared(namecache_rw_lock);
2411}
2412
91447636
A
2413void
2414name_cache_lock(void)
1c79356b 2415{
0c530ab8 2416 lck_rw_lock_exclusive(namecache_rw_lock);
91447636 2417}
55e303ae 2418
91447636
A
2419void
2420name_cache_unlock(void)
2421{
0c530ab8 2422 lck_rw_done(namecache_rw_lock);
1c79356b
A
2423}
2424
55e303ae
A
2425
2426int
d9a64523 2427resize_namecache(int newsize)
55e303ae 2428{
0a7de745
A
2429 struct nchashhead *new_table;
2430 struct nchashhead *old_table;
2431 struct nchashhead *old_head, *head;
2432 struct namecache *entry, *next;
2433 uint32_t i, hashval;
2434 int dNodes, dNegNodes, nelements;
2435 u_long new_size, old_size;
2436
2437 if (newsize < 0) {
2438 return EINVAL;
2439 }
2440
2441 dNegNodes = (newsize / 10);
2442 dNodes = newsize + dNegNodes;
2443 // we don't support shrinking yet
2444 if (dNodes <= desiredNodes) {
2445 return 0;
2446 }
2447
2448 if (os_mul_overflow(dNodes, 2, &nelements)) {
2449 return EINVAL;
2450 }
2451
2452 new_table = hashinit(nelements, M_CACHE, &nchashmask);
2453 new_size = nchashmask + 1;
2454
2455 if (new_table == NULL) {
2456 return ENOMEM;
2457 }
2458
2459 NAME_CACHE_LOCK();
2460 // do the switch!
2461 old_table = nchashtbl;
2462 nchashtbl = new_table;
2463 old_size = nchash;
2464 nchash = new_size;
2465
2466 // walk the old table and insert all the entries into
2467 // the new table
2468 //
2469 for (i = 0; i < old_size; i++) {
2470 old_head = &old_table[i];
2471 for (entry = old_head->lh_first; entry != NULL; entry = next) {
2472 //
2473 // XXXdbg - Beware: this assumes that hash_string() does
2474 // the same thing as what happens in
2475 // lookup() over in vfs_lookup.c
2476 hashval = hash_string(entry->nc_name, 0);
2477 entry->nc_hashval = hashval;
2478 head = NCHHASH(entry->nc_dvp, hashval);
2479
2480 next = entry->nc_hash.le_next;
2481 LIST_INSERT_HEAD(head, entry, nc_hash);
2482 }
2483 }
2484 desiredNodes = dNodes;
2485 desiredNegNodes = dNegNodes;
2486
2487 NAME_CACHE_UNLOCK();
2488 FREE(old_table, M_CACHE);
2489
2490 return 0;
55e303ae
A
2491}
2492
91447636 2493static void
5ba3f43e 2494cache_delete(struct namecache *ncp, int free_entry)
91447636 2495{
0a7de745 2496 NCHSTAT(ncs_deletes);
91447636 2497
0a7de745
A
2498 if (ncp->nc_vp) {
2499 LIST_REMOVE(ncp, nc_un.nc_link);
91447636 2500 } else {
0a7de745
A
2501 TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry);
2502 ncs_negtotal--;
91447636 2503 }
0a7de745 2504 TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child);
91447636
A
2505
2506 LIST_REMOVE(ncp, nc_hash);
2507 /*
2508 * this field is used to indicate
2509 * that the entry is in use and
0a7de745 2510 * must be deleted before it can
91447636
A
2511 * be reused...
2512 */
2513 ncp->nc_hash.le_prev = NULL;
2514
b0d623f7 2515 vfs_removename(ncp->nc_name);
91447636 2516 ncp->nc_name = NULL;
5ba3f43e 2517 if (free_entry) {
0a7de745 2518 TAILQ_REMOVE(&nchead, ncp, nc_entry);
5ba3f43e
A
2519 FREE_ZONE(ncp, sizeof(*ncp), M_CACHE);
2520 numcache--;
2521 }
91447636
A
2522}
2523
2524
2525/*
0a7de745 2526 * purge the entry associated with the
91447636
A
2527 * specified vnode from the name cache
2528 */
cb323159
A
2529static void
2530cache_purge_locked(vnode_t vp, kauth_cred_t *credp)
91447636 2531{
0a7de745 2532 struct namecache *ncp;
91447636 2533
cb323159 2534 *credp = NULL;
0a7de745
A
2535 if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
2536 (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
2537 (vp->v_cred == NOCRED) &&
2538 (vp->v_parent == NULLVP)) {
2539 return;
2540 }
91447636 2541
0a7de745
A
2542 if (vp->v_parent) {
2543 vp->v_parent->v_nc_generation++;
2544 }
2d21ac55 2545
0a7de745
A
2546 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
2547 cache_delete(ncp, 1);
2548 }
55e303ae 2549
0a7de745
A
2550 while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
2551 cache_delete(ncp, 1);
2552 }
91447636 2553
2d21ac55
A
2554 /*
2555 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
2556 */
cb323159 2557 *credp = vp->v_cred;
2d21ac55
A
2558 vp->v_cred = NOCRED;
2559 vp->v_authorized_actions = 0;
cb323159
A
2560}
2561
2562void
2563cache_purge(vnode_t vp)
2564{
2565 kauth_cred_t tcred = NULL;
2566
2567 if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
2568 (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
2569 (vp->v_cred == NOCRED) &&
2570 (vp->v_parent == NULLVP)) {
2571 return;
2572 }
2573
2574 NAME_CACHE_LOCK();
2575
2576 cache_purge_locked(vp, &tcred);
2d21ac55 2577
0c530ab8 2578 NAME_CACHE_UNLOCK();
2d21ac55 2579
cb323159 2580 if (tcred && IS_VALID_CRED(tcred)) {
0a7de745
A
2581 kauth_cred_unref(&tcred);
2582 }
91447636 2583}
55e303ae 2584
1c79356b 2585/*
91447636
A
2586 * Purge all negative cache entries that are children of the
2587 * given vnode. A case-insensitive file system (or any file
2588 * system that has multiple equivalent names for the same
2589 * directory entry) can use this when creating or renaming
2590 * to remove negative entries that may no longer apply.
1c79356b
A
2591 */
2592void
91447636 2593cache_purge_negatives(vnode_t vp)
1c79356b 2594{
b0d623f7 2595 struct namecache *ncp, *next_ncp;
1c79356b 2596
0c530ab8 2597 NAME_CACHE_LOCK();
91447636 2598
39037602 2599 TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) {
0a7de745 2600 if (ncp->nc_vp) {
39037602 2601 break;
0a7de745 2602 }
39037602
A
2603
2604 cache_delete(ncp, 1);
2605 }
91447636 2606
0c530ab8 2607 NAME_CACHE_UNLOCK();
1c79356b
A
2608}
2609
2610/*
2611 * Flush all entries referencing a particular filesystem.
2612 *
2613 * Since we need to check it anyway, we will flush all the invalid
91447636 2614 * entries at the same time.
1c79356b
A
2615 */
2616void
2d21ac55 2617cache_purgevfs(struct mount *mp)
1c79356b
A
2618{
2619 struct nchashhead *ncpp;
91447636 2620 struct namecache *ncp;
1c79356b 2621
0c530ab8 2622 NAME_CACHE_LOCK();
1c79356b 2623 /* Scan hash tables for applicable entries */
91447636 2624 for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
0a7de745 2625restart:
91447636
A
2626 for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) {
2627 if (ncp->nc_dvp->v_mount == mp) {
2628 cache_delete(ncp, 0);
2629 goto restart;
1c79356b
A
2630 }
2631 }
2632 }
0c530ab8 2633 NAME_CACHE_UNLOCK();
1c79356b 2634}
55e303ae
A
2635
2636
2637
2638//
2639// String ref routines
2640//
0a7de745 2641static LIST_HEAD(stringhead, string_t) * string_ref_table;
55e303ae 2642static u_long string_table_mask;
0a7de745 2643static uint32_t filled_buckets = 0;
b0d623f7 2644
55e303ae
A
2645
2646typedef struct string_t {
0a7de745
A
2647 LIST_ENTRY(string_t) hash_chain;
2648 const char *str;
2649 uint32_t refcount;
55e303ae
A
2650} string_t;
2651
2652
b0d623f7 2653static void
91447636 2654resize_string_ref_table(void)
55e303ae 2655{
b0d623f7
A
2656 struct stringhead *new_table;
2657 struct stringhead *old_table;
2658 struct stringhead *old_head, *head;
2659 string_t *entry, *next;
2660 uint32_t i, hashval;
2661 u_long new_mask, old_mask;
55e303ae 2662
b0d623f7
A
2663 /*
2664 * need to hold the table lock exclusively
2665 * in order to grow the table... need to recheck
2666 * the need to resize again after we've taken
2667 * the lock exclusively in case some other thread
2668 * beat us to the punch
2669 */
2670 lck_rw_lock_exclusive(strtable_rw_lock);
55e303ae 2671
b0d623f7
A
2672 if (4 * filled_buckets < ((string_table_mask + 1) * 3)) {
2673 lck_rw_done(strtable_rw_lock);
2674 return;
2675 }
2676 new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask);
55e303ae 2677
b0d623f7
A
2678 if (new_table == NULL) {
2679 printf("failed to resize the hash table.\n");
2680 lck_rw_done(strtable_rw_lock);
2681 return;
2682 }
2683
2684 // do the switch!
2685 old_table = string_ref_table;
2686 string_ref_table = new_table;
2687 old_mask = string_table_mask;
2688 string_table_mask = new_mask;
0a7de745 2689 filled_buckets = 0;
55e303ae 2690
b0d623f7
A
2691 // walk the old table and insert all the entries into
2692 // the new table
2693 //
2694 for (i = 0; i <= old_mask; i++) {
2695 old_head = &old_table[i];
2696 for (entry = old_head->lh_first; entry != NULL; entry = next) {
2697 hashval = hash_string((const char *)entry->str, 0);
2698 head = &string_ref_table[hashval & string_table_mask];
2699 if (head->lh_first == NULL) {
2700 filled_buckets++;
2701 }
2702 next = entry->hash_chain.le_next;
2703 LIST_INSERT_HEAD(head, entry, hash_chain);
2704 }
55e303ae 2705 }
b0d623f7 2706 lck_rw_done(strtable_rw_lock);
55e303ae 2707
b0d623f7 2708 FREE(old_table, M_CACHE);
55e303ae
A
2709}
2710
2711
2712static void
2713init_string_table(void)
2714{
2d21ac55 2715 string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask);
55e303ae
A
2716}
2717
2718
2d21ac55 2719const char *
b0d623f7 2720vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags)
91447636 2721{
0a7de745 2722 return add_name_internal(name, len, hashval, FALSE, flags);
91447636
A
2723}
2724
b0d623f7 2725
2d21ac55 2726static const char *
b0d623f7 2727add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags)
55e303ae 2728{
b0d623f7
A
2729 struct stringhead *head;
2730 string_t *entry;
2731 uint32_t chain_len = 0;
0a7de745
A
2732 uint32_t hash_index;
2733 uint32_t lock_index;
b0d623f7 2734 char *ptr;
0a7de745
A
2735
2736 if (len > MAXPATHLEN) {
39037602 2737 len = MAXPATHLEN;
0a7de745 2738 }
39037602 2739
b0d623f7
A
2740 /*
2741 * if the length already accounts for the null-byte, then
2742 * subtract one so later on we don't index past the end
2743 * of the string.
2744 */
0a7de745 2745 if (len > 0 && name[len - 1] == '\0') {
b0d623f7
A
2746 len--;
2747 }
6d2010ae
A
2748 if (hashval == 0) {
2749 hashval = hash_string(name, len);
2750 }
2751
b0d623f7
A
2752 /*
2753 * take this lock 'shared' to keep the hash stable
2754 * if someone else decides to grow the pool they
2755 * will take this lock exclusively
2756 */
2757 lck_rw_lock_shared(strtable_rw_lock);
55e303ae 2758
b0d623f7
A
2759 /*
2760 * If the table gets more than 3/4 full, resize it
2761 */
2762 if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) {
2763 lck_rw_done(strtable_rw_lock);
55e303ae 2764
b0d623f7
A
2765 resize_string_ref_table();
2766
2767 lck_rw_lock_shared(strtable_rw_lock);
55e303ae 2768 }
b0d623f7
A
2769 hash_index = hashval & string_table_mask;
2770 lock_index = hash_index % NUM_STRCACHE_LOCKS;
2771
2772 head = &string_ref_table[hash_index];
55e303ae 2773
b0d623f7 2774 lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
55e303ae 2775
b0d623f7 2776 for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) {
5ba3f43e 2777 if (strncmp(entry->str, name, len) == 0 && entry->str[len] == 0) {
b0d623f7
A
2778 entry->refcount++;
2779 break;
2780 }
55e303ae 2781 }
b0d623f7
A
2782 if (entry == NULL) {
2783 lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]);
2784 /*
2785 * it wasn't already there so add it.
2786 */
2787 MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK);
55e303ae 2788
b0d623f7
A
2789 if (head->lh_first == NULL) {
2790 OSAddAtomic(1, &filled_buckets);
2791 }
2792 ptr = (char *)((char *)entry + sizeof(string_t));
2793 strncpy(ptr, name, len);
2794 ptr[len] = '\0';
2795 entry->str = ptr;
2796 entry->refcount = 1;
2797 LIST_INSERT_HEAD(head, entry, hash_chain);
2798 }
0a7de745 2799 if (need_extra_ref == TRUE) {
b0d623f7 2800 entry->refcount++;
0a7de745
A
2801 }
2802
b0d623f7
A
2803 lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
2804 lck_rw_done(strtable_rw_lock);
2805
2806 return (const char *)entry->str;
55e303ae
A
2807}
2808
b0d623f7 2809
55e303ae 2810int
91447636
A
2811vfs_removename(const char *nameref)
2812{
b0d623f7
A
2813 struct stringhead *head;
2814 string_t *entry;
2815 uint32_t hashval;
0a7de745
A
2816 uint32_t hash_index;
2817 uint32_t lock_index;
2818 int retval = ENOENT;
91447636 2819
b0d623f7 2820 hashval = hash_string(nameref, 0);
91447636 2821
b0d623f7
A
2822 /*
2823 * take this lock 'shared' to keep the hash stable
2824 * if someone else decides to grow the pool they
2825 * will take this lock exclusively
2826 */
2827 lck_rw_lock_shared(strtable_rw_lock);
2828 /*
2829 * must compute the head behind the table lock
2830 * since the size and location of the table
2831 * can change on the fly
2832 */
2833 hash_index = hashval & string_table_mask;
2834 lock_index = hash_index % NUM_STRCACHE_LOCKS;
91447636 2835
b0d623f7 2836 head = &string_ref_table[hash_index];
91447636 2837
b0d623f7 2838 lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
55e303ae 2839
b0d623f7
A
2840 for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
2841 if (entry->str == nameref) {
2842 entry->refcount--;
55e303ae 2843
b0d623f7
A
2844 if (entry->refcount == 0) {
2845 LIST_REMOVE(entry, hash_chain);
2846
2847 if (head->lh_first == NULL) {
2848 OSAddAtomic(-1, &filled_buckets);
2849 }
2850 } else {
2851 entry = NULL;
2852 }
2853 retval = 0;
2854 break;
2855 }
55e303ae 2856 }
b0d623f7
A
2857 lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
2858 lck_rw_done(strtable_rw_lock);
55e303ae 2859
0a7de745 2860 if (entry != NULL) {
b0d623f7 2861 FREE(entry, M_TEMP);
0a7de745 2862 }
b0d623f7
A
2863
2864 return retval;
55e303ae
A
2865}
2866
2867
2d21ac55 2868#ifdef DUMP_STRING_TABLE
55e303ae
A
2869void
2870dump_string_table(void)
2871{
0a7de745
A
2872 struct stringhead *head;
2873 string_t *entry;
2874 u_long i;
2875
2876 lck_rw_lock_shared(strtable_rw_lock);
2877
2878 for (i = 0; i <= string_table_mask; i++) {
2879 head = &string_ref_table[i];
2880 for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
2881 printf("%6d - %s\n", entry->refcount, entry->str);
2882 }
2883 }
2884 lck_rw_done(strtable_rw_lock);
55e303ae 2885}
0a7de745 2886#endif /* DUMP_STRING_TABLE */