]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_cache.c
18a0906b8b82e9119e23a1b04b64d16c1b33f188
[apple/xnu.git] / bsd / vfs / vfs_cache.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Poul-Henning Kamp of the FreeBSD Project.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 *
65 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/time.h>
76 #include <sys/mount_internal.h>
77 #include <sys/vnode_internal.h>
78 #include <miscfs/specfs/specdev.h>
79 #include <sys/namei.h>
80 #include <sys/errno.h>
81 #include <sys/malloc.h>
82 #include <sys/kauth.h>
83 #include <sys/user.h>
84 #include <sys/paths.h>
85 #include <os/overflow.h>
86
87 #if CONFIG_MACF
88 #include <security/mac_framework.h>
89 #endif
90
91 /*
92 * Name caching works as follows:
93 *
94 * Names found by directory scans are retained in a cache
95 * for future reference. It is managed LRU, so frequently
96 * used names will hang around. Cache is indexed by hash value
97 * obtained from (vp, name) where vp refers to the directory
98 * containing name.
99 *
100 * If it is a "negative" entry, (i.e. for a name that is known NOT to
101 * exist) the vnode pointer will be NULL.
102 *
103 * Upon reaching the last segment of a path, if the reference
104 * is for DELETE, or NOCACHE is set (rewrite), and the
105 * name is located in the cache, it will be dropped.
106 */
107
108 /*
109 * Structures associated with name cacheing.
110 */
111
112 LIST_HEAD(nchashhead, namecache) * nchashtbl; /* Hash Table */
113 u_long nchashmask;
114 u_long nchash; /* size of hash table - 1 */
115 long numcache; /* number of cache entries allocated */
116 int desiredNodes;
117 int desiredNegNodes;
118 int ncs_negtotal;
119 int nc_disabled = 0;
120 TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */
121 TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */
122
123
124 #if COLLECT_STATS
125
126 struct nchstats nchstats; /* cache effectiveness statistics */
127
128 #define NCHSTAT(v) { \
129 nchstats.v++; \
130 }
131 #define NAME_CACHE_LOCK() name_cache_lock()
132 #define NAME_CACHE_UNLOCK() name_cache_unlock()
133 #define NAME_CACHE_LOCK_SHARED() name_cache_lock()
134
135 #else
136
137 #define NCHSTAT(v)
138 #define NAME_CACHE_LOCK() name_cache_lock()
139 #define NAME_CACHE_UNLOCK() name_cache_unlock()
140 #define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared()
141
142 #endif
143
144
145 /* vars for name cache list lock */
146 lck_grp_t * namecache_lck_grp;
147 lck_grp_attr_t * namecache_lck_grp_attr;
148 lck_attr_t * namecache_lck_attr;
149
150 lck_grp_t * strcache_lck_grp;
151 lck_grp_attr_t * strcache_lck_grp_attr;
152 lck_attr_t * strcache_lck_attr;
153
154 lck_rw_t * namecache_rw_lock;
155 lck_rw_t * strtable_rw_lock;
156
157 #define NUM_STRCACHE_LOCKS 1024
158
159 lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS];
160
161
162 static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp);
163 static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int);
164 static void init_string_table(void);
165 static void cache_delete(struct namecache *, int);
166 static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname);
167 static void cache_purge_locked(vnode_t vp, kauth_cred_t *credp);
168
169 #ifdef DUMP_STRING_TABLE
170 /*
171 * Internal dump function used for debugging
172 */
173 void dump_string_table(void);
174 #endif /* DUMP_STRING_TABLE */
175
176 static void init_crc32(void);
177 static unsigned int crc32tab[256];
178
179
180 #define NCHHASH(dvp, hash_val) \
181 (&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask])
182
183 /*
184 * This function tries to check if a directory vp is a subdirectory of dvp
185 * only from valid v_parent pointers. It is called with the name cache lock
186 * held and does not drop the lock anytime inside the function.
187 *
188 * It returns a boolean that indicates whether or not it was able to
189 * successfully infer the parent/descendent relationship via the v_parent
190 * pointers, or if it could not infer such relationship and that the decision
191 * must be delegated to the owning filesystem.
192 *
193 * If it does not defer the decision, i.e. it was successfuly able to determine
194 * the parent/descendent relationship, *is_subdir tells the caller if vp is a
195 * subdirectory of dvp.
196 *
197 * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp
198 * is the vnode whose parent is to be determined from the filesystem.
199 * *is_subdir, in this case, is not indicative of anything and should be
200 * ignored.
201 *
202 * The return value and output args should be used as follows :
203 *
204 * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp);
205 * if (!defer) {
206 * if (*is_subdir)
207 * vp is subdirectory;
208 * else
209 * vp is not a subdirectory;
210 * } else {
211 * if (*next_vp)
212 * check this vnode's parent from the filesystem
213 * else
214 * error (likely because of forced unmount).
215 * }
216 *
217 */
218 static boolean_t
219 cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir,
220 vnode_t *next_vp)
221 {
222 vnode_t tvp = vp;
223 int defer = FALSE;
224
225 *is_subdir = FALSE;
226 *next_vp = NULLVP;
227 while (1) {
228 mount_t tmp;
229
230 if (tvp == dvp) {
231 *is_subdir = TRUE;
232 break;
233 } else if (tvp == rootvnode) {
234 /* *is_subdir = FALSE */
235 break;
236 }
237
238 tmp = tvp->v_mount;
239 while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered &&
240 tvp != dvp && tvp != rootvnode) {
241 tvp = tmp->mnt_vnodecovered;
242 tmp = tvp->v_mount;
243 }
244
245 /*
246 * If dvp is not at the top of a mount "stack" then
247 * vp is not a subdirectory of dvp either.
248 */
249 if (tvp == dvp || tvp == rootvnode) {
250 /* *is_subdir = FALSE */
251 break;
252 }
253
254 if (!tmp) {
255 defer = TRUE;
256 *next_vp = NULLVP;
257 break;
258 }
259
260 if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) {
261 defer = TRUE;
262 *next_vp = tvp;
263 break;
264 }
265
266 tvp = tvp->v_parent;
267 }
268
269 return defer;
270 }
271
272 /* maximum times retry from potentially transient errors in vnode_issubdir */
273 #define MAX_ERROR_RETRY 3
274
275 /*
276 * This function checks if a given directory (vp) is a subdirectory of dvp.
277 * It walks backwards from vp and if it hits dvp in its parent chain,
278 * it is a subdirectory. If it encounters the root directory, it is not
279 * a subdirectory.
280 *
281 * This function returns an error if it is unsuccessful and 0 on success.
282 *
283 * On entry (and exit) vp has an iocount and if this function has to take
284 * any iocounts on other vnodes in the parent chain traversal, it releases them.
285 */
286 int
287 vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx)
288 {
289 vnode_t start_vp, tvp;
290 vnode_t vp_with_iocount;
291 int error = 0;
292 char dotdotbuf[] = "..";
293 int error_retry_count = 0; /* retry count for potentially transient
294 * errors */
295
296 *is_subdir = FALSE;
297 tvp = start_vp = vp;
298 /*
299 * Anytime we acquire an iocount in this function, we save the vnode
300 * in this variable and release it before exiting.
301 */
302 vp_with_iocount = NULLVP;
303
304 while (1) {
305 boolean_t defer;
306 vnode_t pvp;
307 uint32_t vid;
308 struct componentname cn;
309 boolean_t is_subdir_locked = FALSE;
310
311 if (tvp == dvp) {
312 *is_subdir = TRUE;
313 break;
314 } else if (tvp == rootvnode) {
315 /* *is_subdir = FALSE */
316 break;
317 }
318
319 NAME_CACHE_LOCK_SHARED();
320
321 defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked,
322 &tvp);
323
324 if (defer && tvp) {
325 vid = vnode_vid(tvp);
326 }
327
328 NAME_CACHE_UNLOCK();
329
330 if (!defer) {
331 *is_subdir = is_subdir_locked;
332 break;
333 }
334
335 if (!tvp) {
336 if (error_retry_count++ < MAX_ERROR_RETRY) {
337 tvp = vp;
338 continue;
339 }
340 error = ENOENT;
341 break;
342 }
343
344 if (tvp != start_vp) {
345 if (vp_with_iocount) {
346 vnode_put(vp_with_iocount);
347 vp_with_iocount = NULLVP;
348 }
349
350 error = vnode_getwithvid(tvp, vid);
351 if (error) {
352 if (error_retry_count++ < MAX_ERROR_RETRY) {
353 tvp = vp;
354 error = 0;
355 continue;
356 }
357 break;
358 }
359
360 vp_with_iocount = tvp;
361 }
362
363 bzero(&cn, sizeof(cn));
364 cn.cn_nameiop = LOOKUP;
365 cn.cn_flags = ISLASTCN | ISDOTDOT;
366 cn.cn_context = ctx;
367 cn.cn_pnbuf = &dotdotbuf[0];
368 cn.cn_pnlen = sizeof(dotdotbuf);
369 cn.cn_nameptr = cn.cn_pnbuf;
370 cn.cn_namelen = 2;
371
372 pvp = NULLVP;
373 if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) {
374 break;
375 }
376
377 if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) {
378 (void)vnode_update_identity(tvp, pvp, NULL, 0, 0,
379 VNODE_UPDATE_PARENT);
380 }
381
382 if (vp_with_iocount) {
383 vnode_put(vp_with_iocount);
384 }
385
386 vp_with_iocount = tvp = pvp;
387 }
388
389 if (vp_with_iocount) {
390 vnode_put(vp_with_iocount);
391 }
392
393 return error;
394 }
395
396 /*
397 * This function builds the path in "buff" from the supplied vnode.
398 * The length of the buffer *INCLUDING* the trailing zero byte is
399 * returned in outlen. NOTE: the length includes the trailing zero
400 * byte and thus the length is one greater than what strlen would
401 * return. This is important and lots of code elsewhere in the kernel
402 * assumes this behavior.
403 *
404 * This function can call vnop in file system if the parent vnode
405 * does not exist or when called for hardlinks via volfs path.
406 * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present
407 * in the name cache and does not enter the file system.
408 *
409 * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when
410 * we encounter ENOENT during path reconstruction. ENOENT means that
411 * one of the parents moved while we were building the path. The
412 * caller can special handle this case by calling build_path again.
413 *
414 * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path
415 * that is relative to the nearest mount point, i.e. do not
416 * cross over mount points during building the path.
417 *
418 * passed in vp must have a valid io_count reference
419 *
420 * If parent vnode is non-NULL it also must have an io count. This
421 * allows build_path_with_parent to be safely called for operations
422 * unlink, rmdir and rename that already have io counts on the target
423 * and the directory. In this way build_path_with_parent does not have
424 * to try and obtain an additional io count on the parent. Taking an
425 * io count ont the parent can lead to dead lock if a forced unmount
426 * occures at the right moment. For a fuller explaination on how this
427 * can occur see the comment for vn_getpath_with_parent.
428 *
429 */
430 int
431 build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
432 {
433 vnode_t vp, tvp;
434 vnode_t vp_with_iocount;
435 vnode_t proc_root_dir_vp;
436 char *end;
437 const char *str;
438 int len;
439 int ret = 0;
440 int fixhardlink;
441
442 if (first_vp == NULLVP) {
443 return EINVAL;
444 }
445
446 if (buflen <= 1) {
447 return ENOSPC;
448 }
449
450 /*
451 * Grab the process fd so we can evaluate fd_rdir.
452 */
453 if (vfs_context_proc(ctx)->p_fd) {
454 proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir;
455 } else {
456 proc_root_dir_vp = NULL;
457 }
458
459 vp_with_iocount = NULLVP;
460 again:
461 vp = first_vp;
462
463 end = &buff[buflen - 1];
464 *end = '\0';
465
466 /*
467 * holding the NAME_CACHE_LOCK in shared mode is
468 * sufficient to stabilize both the vp->v_parent chain
469 * and the 'vp->v_mount->mnt_vnodecovered' chain
470 *
471 * if we need to drop this lock, we must first grab the v_id
472 * from the vnode we're currently working with... if that
473 * vnode doesn't already have an io_count reference (the vp
474 * passed in comes with one), we must grab a reference
475 * after we drop the NAME_CACHE_LOCK via vnode_getwithvid...
476 * deadlocks may result if you call vnode_get while holding
477 * the NAME_CACHE_LOCK... we lazily release the reference
478 * we pick up the next time we encounter a need to drop
479 * the NAME_CACHE_LOCK or before we return from this routine
480 */
481 NAME_CACHE_LOCK_SHARED();
482
483 #if CONFIG_FIRMLINKS
484 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
485 (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) {
486 vp = vp->v_fmlink;
487 }
488 #endif
489
490 /*
491 * Check if this is the root of a file system.
492 */
493 while (vp && vp->v_flag & VROOT) {
494 if (vp->v_mount == NULL) {
495 ret = EINVAL;
496 goto out_unlock;
497 }
498 if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) {
499 /*
500 * It's the root of the root file system, so it's
501 * just "/".
502 */
503 *--end = '/';
504
505 goto out_unlock;
506 } else {
507 /*
508 * This the root of the volume and the caller does not
509 * want to cross mount points. Therefore just return
510 * '/' as the relative path.
511 */
512 #if CONFIG_FIRMLINKS
513 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
514 (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink) {
515 vp = vp->v_fmlink;
516 } else
517 #endif
518 if (flags & BUILDPATH_VOLUME_RELATIVE) {
519 *--end = '/';
520 goto out_unlock;
521 } else {
522 vp = vp->v_mount->mnt_vnodecovered;
523 }
524 }
525 }
526
527 while ((vp != NULLVP) && (vp->v_parent != vp)) {
528 int vid;
529
530 /*
531 * For hardlinks the v_name may be stale, so if its OK
532 * to enter a file system, ask the file system for the
533 * name and parent (below).
534 */
535 fixhardlink = (vp->v_flag & VISHARDLINK) &&
536 (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
537 !(flags & BUILDPATH_NO_FS_ENTER);
538
539 if (!fixhardlink) {
540 str = vp->v_name;
541
542 if (str == NULL || *str == '\0') {
543 if (vp->v_parent != NULL) {
544 ret = EINVAL;
545 } else {
546 ret = ENOENT;
547 }
548 goto out_unlock;
549 }
550 len = strlen(str);
551 /*
552 * Check that there's enough space (including space for the '/')
553 */
554 if ((end - buff) < (len + 1)) {
555 ret = ENOSPC;
556 goto out_unlock;
557 }
558 /*
559 * Copy the name backwards.
560 */
561 str += len;
562
563 for (; len > 0; len--) {
564 *--end = *--str;
565 }
566 /*
567 * Add a path separator.
568 */
569 *--end = '/';
570 }
571
572 /*
573 * Walk up the parent chain.
574 */
575 if (((vp->v_parent != NULLVP) && !fixhardlink) ||
576 (flags & BUILDPATH_NO_FS_ENTER)) {
577 /*
578 * In this if () block we are not allowed to enter the filesystem
579 * to conclusively get the most accurate parent identifier.
580 * As a result, if 'vp' does not identify '/' and it
581 * does not have a valid v_parent, then error out
582 * and disallow further path construction
583 */
584 if ((vp->v_parent == NULLVP) && (rootvnode != vp)) {
585 /*
586 * Only '/' is allowed to have a NULL parent
587 * pointer. Upper level callers should ideally
588 * re-drive name lookup on receiving a ENOENT.
589 */
590 ret = ENOENT;
591
592 /* The code below will exit early if 'tvp = vp' == NULL */
593 }
594 vp = vp->v_parent;
595
596 /*
597 * if the vnode we have in hand isn't a directory and it
598 * has a v_parent, then we started with the resource fork
599 * so skip up to avoid getting a duplicate copy of the
600 * file name in the path.
601 */
602 if (vp && !vnode_isdir(vp) && vp->v_parent) {
603 vp = vp->v_parent;
604 }
605 } else {
606 /*
607 * No parent, go get it if supported.
608 */
609 struct vnode_attr va;
610 vnode_t dvp;
611
612 /*
613 * Make sure file system supports obtaining a path from id.
614 */
615 if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
616 ret = ENOENT;
617 goto out_unlock;
618 }
619 vid = vp->v_id;
620
621 NAME_CACHE_UNLOCK();
622
623 if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
624 if (vp_with_iocount) {
625 vnode_put(vp_with_iocount);
626 vp_with_iocount = NULLVP;
627 }
628 if (vnode_getwithvid(vp, vid)) {
629 goto again;
630 }
631 vp_with_iocount = vp;
632 }
633 VATTR_INIT(&va);
634 VATTR_WANTED(&va, va_parentid);
635
636 if (fixhardlink) {
637 VATTR_WANTED(&va, va_name);
638 MALLOC_ZONE(va.va_name, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
639 } else {
640 va.va_name = NULL;
641 }
642 /*
643 * Ask the file system for its parent id and for its name (optional).
644 */
645 ret = vnode_getattr(vp, &va, ctx);
646
647 if (fixhardlink) {
648 if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) {
649 str = va.va_name;
650 vnode_update_identity(vp, NULL, str, strlen(str), 0, VNODE_UPDATE_NAME);
651 } else if (vp->v_name) {
652 str = vp->v_name;
653 ret = 0;
654 } else {
655 ret = ENOENT;
656 goto bad_news;
657 }
658 len = strlen(str);
659
660 /*
661 * Check that there's enough space.
662 */
663 if ((end - buff) < (len + 1)) {
664 ret = ENOSPC;
665 } else {
666 /* Copy the name backwards. */
667 str += len;
668
669 for (; len > 0; len--) {
670 *--end = *--str;
671 }
672 /*
673 * Add a path separator.
674 */
675 *--end = '/';
676 }
677 bad_news:
678 FREE_ZONE(va.va_name, MAXPATHLEN, M_NAMEI);
679 }
680 if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
681 ret = ENOENT;
682 goto out;
683 }
684 /*
685 * Ask the file system for the parent vnode.
686 */
687 if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) {
688 goto out;
689 }
690
691 if (!fixhardlink && (vp->v_parent != dvp)) {
692 vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT);
693 }
694
695 if (vp_with_iocount) {
696 vnode_put(vp_with_iocount);
697 }
698 vp = dvp;
699 vp_with_iocount = vp;
700
701 NAME_CACHE_LOCK_SHARED();
702
703 /*
704 * if the vnode we have in hand isn't a directory and it
705 * has a v_parent, then we started with the resource fork
706 * so skip up to avoid getting a duplicate copy of the
707 * file name in the path.
708 */
709 if (vp && !vnode_isdir(vp) && vp->v_parent) {
710 vp = vp->v_parent;
711 }
712 }
713
714 if (vp && (flags & BUILDPATH_CHECKACCESS)) {
715 vid = vp->v_id;
716
717 NAME_CACHE_UNLOCK();
718
719 if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
720 if (vp_with_iocount) {
721 vnode_put(vp_with_iocount);
722 vp_with_iocount = NULLVP;
723 }
724 if (vnode_getwithvid(vp, vid)) {
725 goto again;
726 }
727 vp_with_iocount = vp;
728 }
729 if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) {
730 goto out; /* no peeking */
731 }
732 NAME_CACHE_LOCK_SHARED();
733 }
734
735 /*
736 * When a mount point is crossed switch the vp.
737 * Continue until we find the root or we find
738 * a vnode that's not the root of a mounted
739 * file system.
740 */
741 tvp = vp;
742
743 while (tvp) {
744 if (tvp == proc_root_dir_vp) {
745 goto out_unlock; /* encountered the root */
746 }
747
748 #if CONFIG_FIRMLINKS
749 if (!(flags & BUILDPATH_NO_FIRMLINK) &&
750 (tvp->v_flag & VFMLINKTARGET) && tvp->v_fmlink) {
751 tvp = tvp->v_fmlink;
752 break;
753 }
754 #endif
755
756 if (!(tvp->v_flag & VROOT) || !tvp->v_mount) {
757 break; /* not the root of a mounted FS */
758 }
759 if (flags & BUILDPATH_VOLUME_RELATIVE) {
760 /* Do not cross over mount points */
761 tvp = NULL;
762 } else {
763 tvp = tvp->v_mount->mnt_vnodecovered;
764 }
765 }
766 if (tvp == NULLVP) {
767 goto out_unlock;
768 }
769 vp = tvp;
770 }
771 out_unlock:
772 NAME_CACHE_UNLOCK();
773 out:
774 if (vp_with_iocount) {
775 vnode_put(vp_with_iocount);
776 }
777 /*
778 * Slide the name down to the beginning of the buffer.
779 */
780 memmove(buff, end, &buff[buflen] - end);
781
782 /*
783 * length includes the trailing zero byte
784 */
785 *outlen = &buff[buflen] - end;
786
787 /* One of the parents was moved during path reconstruction.
788 * The caller is interested in knowing whether any of the
789 * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN.
790 */
791 if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) {
792 ret = EAGAIN;
793 }
794
795 return ret;
796 }
797
798 int
799 build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
800 {
801 return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, flags, ctx);
802 }
803
804 /*
805 * return NULLVP if vp's parent doesn't
806 * exist, or we can't get a valid iocount
807 * else return the parent of vp
808 */
809 vnode_t
810 vnode_getparent(vnode_t vp)
811 {
812 vnode_t pvp = NULLVP;
813 int pvid;
814
815 NAME_CACHE_LOCK_SHARED();
816
817 pvp = vp->v_parent;
818
819 /*
820 * v_parent is stable behind the name_cache lock
821 * however, the only thing we can really guarantee
822 * is that we've grabbed a valid iocount on the
823 * parent of 'vp' at the time we took the name_cache lock...
824 * once we drop the lock, vp could get re-parented
825 */
826 if (pvp != NULLVP) {
827 pvid = pvp->v_id;
828
829 NAME_CACHE_UNLOCK();
830
831 if (vnode_getwithvid(pvp, pvid) != 0) {
832 pvp = NULL;
833 }
834 } else {
835 NAME_CACHE_UNLOCK();
836 }
837 return pvp;
838 }
839
840 const char *
841 vnode_getname(vnode_t vp)
842 {
843 const char *name = NULL;
844
845 NAME_CACHE_LOCK_SHARED();
846
847 if (vp->v_name) {
848 name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0);
849 }
850 NAME_CACHE_UNLOCK();
851
852 return name;
853 }
854
855 void
856 vnode_putname(const char *name)
857 {
858 vfs_removename(name);
859 }
860
861 static const char unknown_vnodename[] = "(unknown vnode name)";
862
863 const char *
864 vnode_getname_printable(vnode_t vp)
865 {
866 const char *name = vnode_getname(vp);
867 if (name != NULL) {
868 return name;
869 }
870
871 switch (vp->v_type) {
872 case VCHR:
873 case VBLK:
874 {
875 /*
876 * Create an artificial dev name from
877 * major and minor device number
878 */
879 char dev_name[64];
880 (void) snprintf(dev_name, sizeof(dev_name),
881 "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b',
882 major(vp->v_rdev), minor(vp->v_rdev));
883 /*
884 * Add the newly created dev name to the name
885 * cache to allow easier cleanup. Also,
886 * vfs_addname allocates memory for the new name
887 * and returns it.
888 */
889 NAME_CACHE_LOCK_SHARED();
890 name = vfs_addname(dev_name, strlen(dev_name), 0, 0);
891 NAME_CACHE_UNLOCK();
892 return name;
893 }
894 default:
895 return unknown_vnodename;
896 }
897 }
898
899 void
900 vnode_putname_printable(const char *name)
901 {
902 if (name == unknown_vnodename) {
903 return;
904 }
905 vnode_putname(name);
906 }
907
908
909 /*
910 * if VNODE_UPDATE_PARENT, and we can take
911 * a reference on dvp, then update vp with
912 * it's new parent... if vp already has a parent,
913 * then drop the reference vp held on it
914 *
915 * if VNODE_UPDATE_NAME,
916 * then drop string ref on v_name if it exists, and if name is non-NULL
917 * then pick up a string reference on name and record it in v_name...
918 * optionally pass in the length and hashval of name if known
919 *
920 * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp
921 */
922 void
923 vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags)
924 {
925 struct namecache *ncp;
926 vnode_t old_parentvp = NULLVP;
927 int isstream = (vp->v_flag & VISNAMEDSTREAM);
928 int kusecountbumped = 0;
929 kauth_cred_t tcred = NULL;
930 const char *vname = NULL;
931 const char *tname = NULL;
932
933 if (flags & VNODE_UPDATE_PARENT) {
934 if (dvp && vnode_ref(dvp) != 0) {
935 dvp = NULLVP;
936 }
937 /* Don't count a stream's parent ref during unmounts */
938 if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) {
939 vnode_lock_spin(dvp);
940 ++dvp->v_kusecount;
941 kusecountbumped = 1;
942 vnode_unlock(dvp);
943 }
944 } else {
945 dvp = NULLVP;
946 }
947 if ((flags & VNODE_UPDATE_NAME)) {
948 if (name != vp->v_name) {
949 if (name && *name) {
950 if (name_len == 0) {
951 name_len = strlen(name);
952 }
953 tname = vfs_addname(name, name_len, name_hashval, 0);
954 }
955 } else {
956 flags &= ~VNODE_UPDATE_NAME;
957 }
958 }
959 if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGEFIRMLINK))) {
960 NAME_CACHE_LOCK();
961
962 #if CONFIG_FIRMLINKS
963 if (flags & VNODE_UPDATE_PURGEFIRMLINK) {
964 vnode_t old_fvp = vp->v_fmlink;
965 if (old_fvp) {
966 vnode_lock_spin(vp);
967 vp->v_flag &= ~VFMLINKTARGET;
968 vp->v_fmlink = NULLVP;
969 vnode_unlock(vp);
970 NAME_CACHE_UNLOCK();
971
972 /*
973 * vnode_rele can result in cascading series of
974 * usecount releases. The combination of calling
975 * vnode_recycle and dont_reenter (3rd arg to
976 * vnode_rele_internal) ensures we don't have
977 * that issue.
978 */
979 vnode_recycle(old_fvp);
980 vnode_rele_internal(old_fvp, O_EVTONLY, 1, 0);
981
982 NAME_CACHE_LOCK();
983 }
984 }
985 #endif
986
987 if ((flags & VNODE_UPDATE_PURGE)) {
988 if (vp->v_parent) {
989 vp->v_parent->v_nc_generation++;
990 }
991
992 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
993 cache_delete(ncp, 1);
994 }
995
996 while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
997 cache_delete(ncp, 1);
998 }
999
1000 /*
1001 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1002 */
1003 tcred = vp->v_cred;
1004 vp->v_cred = NOCRED;
1005 vp->v_authorized_actions = 0;
1006 vp->v_cred_timestamp = 0;
1007 }
1008 if ((flags & VNODE_UPDATE_NAME)) {
1009 vname = vp->v_name;
1010 vp->v_name = tname;
1011 }
1012 if (flags & VNODE_UPDATE_PARENT) {
1013 if (dvp != vp && dvp != vp->v_parent) {
1014 old_parentvp = vp->v_parent;
1015 vp->v_parent = dvp;
1016 dvp = NULLVP;
1017
1018 if (old_parentvp) {
1019 flags |= VNODE_UPDATE_CACHE;
1020 }
1021 }
1022 }
1023 if (flags & VNODE_UPDATE_CACHE) {
1024 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
1025 cache_delete(ncp, 1);
1026 }
1027 }
1028 NAME_CACHE_UNLOCK();
1029
1030 if (vname != NULL) {
1031 vfs_removename(vname);
1032 }
1033
1034 if (IS_VALID_CRED(tcred)) {
1035 kauth_cred_unref(&tcred);
1036 }
1037 }
1038 if (dvp != NULLVP) {
1039 /* Back-out the ref we took if we lost a race for vp->v_parent. */
1040 if (kusecountbumped) {
1041 vnode_lock_spin(dvp);
1042 if (dvp->v_kusecount > 0) {
1043 --dvp->v_kusecount;
1044 }
1045 vnode_unlock(dvp);
1046 }
1047 vnode_rele(dvp);
1048 }
1049 if (old_parentvp) {
1050 struct uthread *ut;
1051
1052 if (isstream) {
1053 vnode_lock_spin(old_parentvp);
1054 if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) {
1055 --old_parentvp->v_kusecount;
1056 }
1057 vnode_unlock(old_parentvp);
1058 }
1059 ut = get_bsdthread_info(current_thread());
1060
1061 /*
1062 * indicated to vnode_rele that it shouldn't do a
1063 * vnode_reclaim at this time... instead it will
1064 * chain the vnode to the uu_vreclaims list...
1065 * we'll be responsible for calling vnode_reclaim
1066 * on each of the vnodes in this list...
1067 */
1068 ut->uu_defer_reclaims = 1;
1069 ut->uu_vreclaims = NULLVP;
1070
1071 while ((vp = old_parentvp) != NULLVP) {
1072 vnode_lock_spin(vp);
1073 vnode_rele_internal(vp, 0, 0, 1);
1074
1075 /*
1076 * check to see if the vnode is now in the state
1077 * that would have triggered a vnode_reclaim in vnode_rele
1078 * if it is, we save it's parent pointer and then NULL
1079 * out the v_parent field... we'll drop the reference
1080 * that was held on the next iteration of this loop...
1081 * this short circuits a potential deep recursion if we
1082 * have a long chain of parents in this state...
1083 * we'll sit in this loop until we run into
1084 * a parent in this chain that is not in this state
1085 *
1086 * make our check and the vnode_rele atomic
1087 * with respect to the current vnode we're working on
1088 * by holding the vnode lock
1089 * if vnode_rele deferred the vnode_reclaim and has put
1090 * this vnode on the list to be reaped by us, than
1091 * it has left this vnode with an iocount == 1
1092 */
1093 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1094 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1095 /*
1096 * vnode_rele wanted to do a vnode_reclaim on this vnode
1097 * it should be sitting on the head of the uu_vreclaims chain
1098 * pull the parent pointer now so that when we do the
1099 * vnode_reclaim for each of the vnodes in the uu_vreclaims
1100 * list, we won't recurse back through here
1101 *
1102 * need to do a convert here in case vnode_rele_internal
1103 * returns with the lock held in the spin mode... it
1104 * can drop and retake the lock under certain circumstances
1105 */
1106 vnode_lock_convert(vp);
1107
1108 NAME_CACHE_LOCK();
1109 old_parentvp = vp->v_parent;
1110 vp->v_parent = NULLVP;
1111 NAME_CACHE_UNLOCK();
1112 } else {
1113 /*
1114 * we're done... we ran into a vnode that isn't
1115 * being terminated
1116 */
1117 old_parentvp = NULLVP;
1118 }
1119 vnode_unlock(vp);
1120 }
1121 ut->uu_defer_reclaims = 0;
1122
1123 while ((vp = ut->uu_vreclaims) != NULLVP) {
1124 ut->uu_vreclaims = vp->v_defer_reclaimlist;
1125
1126 /*
1127 * vnode_put will drive the vnode_reclaim if
1128 * we are still the only reference on this vnode
1129 */
1130 vnode_put(vp);
1131 }
1132 }
1133 }
1134
1135 #if CONFIG_FIRMLINKS
1136 errno_t
1137 vnode_setasfirmlink(vnode_t vp, vnode_t target_vp)
1138 {
1139 int error = 0;
1140 vnode_t old_target_vp = NULLVP;
1141 vnode_t old_target_vp_v_fmlink = NULLVP;
1142 kauth_cred_t target_vp_cred = NULL;
1143 kauth_cred_t old_target_vp_cred = NULL;
1144
1145 if (!vp) {
1146 return EINVAL;
1147 }
1148
1149 if (target_vp) {
1150 if (vp->v_fmlink == target_vp) { /* Will be checked again under the name cache lock */
1151 return 0;
1152 }
1153
1154 /*
1155 * Firmlink source and target will take both a usecount
1156 * and kusecount on each other.
1157 */
1158 if ((error = vnode_ref_ext(target_vp, O_EVTONLY, VNODE_REF_FORCE))) {
1159 return error;
1160 }
1161
1162 if ((error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE))) {
1163 vnode_rele_ext(target_vp, O_EVTONLY, 1);
1164 return error;
1165 }
1166 }
1167
1168 NAME_CACHE_LOCK();
1169
1170 old_target_vp = vp->v_fmlink;
1171 if (target_vp && (target_vp == old_target_vp)) {
1172 NAME_CACHE_UNLOCK();
1173 return 0;
1174 }
1175 vp->v_fmlink = target_vp;
1176
1177 vnode_lock_spin(vp);
1178 vp->v_flag &= ~VFMLINKTARGET;
1179 vnode_unlock(vp);
1180
1181 if (target_vp) {
1182 target_vp->v_fmlink = vp;
1183 vnode_lock_spin(target_vp);
1184 target_vp->v_flag |= VFMLINKTARGET;
1185 vnode_unlock(target_vp);
1186 cache_purge_locked(vp, &target_vp_cred);
1187 }
1188
1189 if (old_target_vp) {
1190 old_target_vp_v_fmlink = old_target_vp->v_fmlink;
1191 old_target_vp->v_fmlink = NULLVP;
1192 vnode_lock_spin(old_target_vp);
1193 old_target_vp->v_flag &= ~VFMLINKTARGET;
1194 vnode_unlock(old_target_vp);
1195 cache_purge_locked(vp, &old_target_vp_cred);
1196 }
1197
1198 NAME_CACHE_UNLOCK();
1199
1200 if (target_vp_cred && IS_VALID_CRED(target_vp_cred)) {
1201 kauth_cred_unref(&target_vp_cred);
1202 }
1203
1204 if (old_target_vp) {
1205 if (old_target_vp_cred && IS_VALID_CRED(old_target_vp_cred)) {
1206 kauth_cred_unref(&old_target_vp_cred);
1207 }
1208
1209 vnode_rele_ext(old_target_vp, O_EVTONLY, 1);
1210 if (old_target_vp_v_fmlink) {
1211 vnode_rele_ext(old_target_vp_v_fmlink, O_EVTONLY, 1);
1212 }
1213 }
1214
1215 return 0;
1216 }
1217
1218 errno_t
1219 vnode_getfirmlink(vnode_t vp, vnode_t *target_vp)
1220 {
1221 int error;
1222
1223 if (!vp->v_fmlink) {
1224 return ENODEV;
1225 }
1226
1227 NAME_CACHE_LOCK_SHARED();
1228 if (vp->v_fmlink && !(vp->v_flag & VFMLINKTARGET) &&
1229 (vnode_get(vp->v_fmlink) == 0)) {
1230 vnode_t tvp = vp->v_fmlink;
1231
1232 vnode_lock_spin(tvp);
1233 if (tvp->v_lflag & (VL_TERMINATE | VL_DEAD)) {
1234 vnode_unlock(tvp);
1235 NAME_CACHE_UNLOCK();
1236 vnode_put(tvp);
1237 return ENOENT;
1238 }
1239 if (!(tvp->v_flag & VFMLINKTARGET)) {
1240 panic("firmlink target for vnode %p does not have flag set", vp);
1241 }
1242 vnode_unlock(tvp);
1243 *target_vp = tvp;
1244 error = 0;
1245 } else {
1246 *target_vp = NULLVP;
1247 error = ENODEV;
1248 }
1249 NAME_CACHE_UNLOCK();
1250 return error;
1251 }
1252
1253 #else /* CONFIG_FIRMLINKS */
1254
1255 errno_t
1256 vnode_setasfirmlink(__unused vnode_t vp, __unused vnode_t src_vp)
1257 {
1258 return ENOTSUP;
1259 }
1260
1261 errno_t
1262 vnode_getfirmlink(__unused vnode_t vp, __unused vnode_t *target_vp)
1263 {
1264 return ENOTSUP;
1265 }
1266
1267 #endif
1268
1269 /*
1270 * Mark a vnode as having multiple hard links. HFS makes use of this
1271 * because it keeps track of each link separately, and wants to know
1272 * which link was actually used.
1273 *
1274 * This will cause the name cache to force a VNOP_LOOKUP on the vnode
1275 * so that HFS can post-process the lookup. Also, volfs will call
1276 * VNOP_GETATTR2 to determine the parent, instead of using v_parent.
1277 */
1278 void
1279 vnode_setmultipath(vnode_t vp)
1280 {
1281 vnode_lock_spin(vp);
1282
1283 /*
1284 * In theory, we're changing the vnode's identity as far as the
1285 * name cache is concerned, so we ought to grab the name cache lock
1286 * here. However, there is already a race, and grabbing the name
1287 * cache lock only makes the race window slightly smaller.
1288 *
1289 * The race happens because the vnode already exists in the name
1290 * cache, and could be found by one thread before another thread
1291 * can set the hard link flag.
1292 */
1293
1294 vp->v_flag |= VISHARDLINK;
1295
1296 vnode_unlock(vp);
1297 }
1298
1299
1300
1301 /*
1302 * backwards compatibility
1303 */
1304 void
1305 vnode_uncache_credentials(vnode_t vp)
1306 {
1307 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
1308 }
1309
1310
1311 /*
1312 * use the exclusive form of NAME_CACHE_LOCK to protect the update of the
1313 * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions
1314 * we use this lock so that we can look at the v_cred and v_authorized_actions
1315 * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path',
1316 * which is the super-hot path... if we are updating the authorized actions for this
1317 * vnode, we are already in the super-slow and far less frequented path so its not
1318 * that bad that we take the lock exclusive for this case... of course we strive
1319 * to hold it for the minimum amount of time possible
1320 */
1321
1322 void
1323 vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action)
1324 {
1325 kauth_cred_t tcred = NOCRED;
1326
1327 NAME_CACHE_LOCK();
1328
1329 vp->v_authorized_actions &= ~action;
1330
1331 if (action == KAUTH_INVALIDATE_CACHED_RIGHTS &&
1332 IS_VALID_CRED(vp->v_cred)) {
1333 /*
1334 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1335 */
1336 tcred = vp->v_cred;
1337 vp->v_cred = NOCRED;
1338 }
1339 NAME_CACHE_UNLOCK();
1340
1341 if (tcred != NOCRED) {
1342 kauth_cred_unref(&tcred);
1343 }
1344 }
1345
1346
1347 extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */
1348
1349 boolean_t
1350 vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1351 {
1352 kauth_cred_t ucred;
1353 boolean_t retval = FALSE;
1354
1355 /* Boot argument to defeat rights caching */
1356 if (bootarg_vnode_cache_defeat) {
1357 return FALSE;
1358 }
1359
1360 if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1361 /*
1362 * a TTL is enabled on the rights cache... handle it here
1363 * a TTL of 0 indicates that no rights should be cached
1364 */
1365 if (vp->v_mount->mnt_authcache_ttl) {
1366 if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1367 /*
1368 * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones),
1369 * we will only allow a SEARCH right on a directory to be cached...
1370 * that cached right always has a default TTL associated with it
1371 */
1372 if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) {
1373 vp = NULLVP;
1374 }
1375 }
1376 if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) {
1377 vnode_uncache_authorized_action(vp, vp->v_authorized_actions);
1378 vp = NULLVP;
1379 }
1380 } else {
1381 vp = NULLVP;
1382 }
1383 }
1384 if (vp != NULLVP) {
1385 ucred = vfs_context_ucred(ctx);
1386
1387 NAME_CACHE_LOCK_SHARED();
1388
1389 if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) {
1390 retval = TRUE;
1391 }
1392
1393 NAME_CACHE_UNLOCK();
1394 }
1395 return retval;
1396 }
1397
1398
1399 void
1400 vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1401 {
1402 kauth_cred_t tcred = NOCRED;
1403 kauth_cred_t ucred;
1404 struct timeval tv;
1405 boolean_t ttl_active = FALSE;
1406
1407 ucred = vfs_context_ucred(ctx);
1408
1409 if (!IS_VALID_CRED(ucred) || action == 0) {
1410 return;
1411 }
1412
1413 if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1414 /*
1415 * a TTL is enabled on the rights cache... handle it here
1416 * a TTL of 0 indicates that no rights should be cached
1417 */
1418 if (vp->v_mount->mnt_authcache_ttl == 0) {
1419 return;
1420 }
1421
1422 if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1423 /*
1424 * only cache SEARCH action for filesystems marked
1425 * MNTK_AUTH_OPAQUE on VDIRs...
1426 * the lookup_path code will time these out
1427 */
1428 if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) {
1429 return;
1430 }
1431 }
1432 ttl_active = TRUE;
1433
1434 microuptime(&tv);
1435 }
1436 NAME_CACHE_LOCK();
1437
1438 if (vp->v_cred != ucred) {
1439 kauth_cred_ref(ucred);
1440 /*
1441 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1442 */
1443 tcred = vp->v_cred;
1444 vp->v_cred = ucred;
1445 vp->v_authorized_actions = 0;
1446 }
1447 if (ttl_active == TRUE && vp->v_authorized_actions == 0) {
1448 /*
1449 * only reset the timestamnp on the
1450 * first authorization cached after the previous
1451 * timer has expired or we're switching creds...
1452 * 'vnode_cache_is_authorized' will clear the
1453 * authorized actions if the TTL is active and
1454 * it has expired
1455 */
1456 vp->v_cred_timestamp = tv.tv_sec;
1457 }
1458 vp->v_authorized_actions |= action;
1459
1460 NAME_CACHE_UNLOCK();
1461
1462 if (IS_VALID_CRED(tcred)) {
1463 kauth_cred_unref(&tcred);
1464 }
1465 }
1466
1467
1468 boolean_t
1469 vnode_cache_is_stale(vnode_t vp)
1470 {
1471 struct timeval tv;
1472 boolean_t retval;
1473
1474 microuptime(&tv);
1475
1476 if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) {
1477 retval = TRUE;
1478 } else {
1479 retval = FALSE;
1480 }
1481
1482 return retval;
1483 }
1484
1485
1486
1487 /*
1488 * Returns: 0 Success
1489 * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei.
1490 * This errno value should not be seen by anyone outside of the kernel.
1491 */
1492 int
1493 cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
1494 vfs_context_t ctx, int *dp_authorized, vnode_t last_dp)
1495 {
1496 char *cp; /* pointer into pathname argument */
1497 int vid;
1498 int vvid = 0; /* protected by vp != NULLVP */
1499 vnode_t vp = NULLVP;
1500 vnode_t tdp = NULLVP;
1501 kauth_cred_t ucred;
1502 boolean_t ttl_enabled = FALSE;
1503 struct timeval tv;
1504 mount_t mp;
1505 unsigned int hash;
1506 int error = 0;
1507 boolean_t dotdotchecked = FALSE;
1508
1509 #if CONFIG_TRIGGERS
1510 vnode_t trigger_vp;
1511 #endif /* CONFIG_TRIGGERS */
1512
1513 ucred = vfs_context_ucred(ctx);
1514 ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
1515
1516 NAME_CACHE_LOCK_SHARED();
1517
1518 if (dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1519 ttl_enabled = TRUE;
1520 microuptime(&tv);
1521 }
1522 for (;;) {
1523 /*
1524 * Search a directory.
1525 *
1526 * The cn_hash value is for use by cache_lookup
1527 * The last component of the filename is left accessible via
1528 * cnp->cn_nameptr for callers that need the name.
1529 */
1530 hash = 0;
1531 cp = cnp->cn_nameptr;
1532
1533 while (*cp && (*cp != '/')) {
1534 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1535 }
1536 /*
1537 * the crc generator can legitimately generate
1538 * a 0... however, 0 for us means that we
1539 * haven't computed a hash, so use 1 instead
1540 */
1541 if (hash == 0) {
1542 hash = 1;
1543 }
1544 cnp->cn_hash = hash;
1545 cnp->cn_namelen = cp - cnp->cn_nameptr;
1546
1547 ndp->ni_pathlen -= cnp->cn_namelen;
1548 ndp->ni_next = cp;
1549
1550 /*
1551 * Replace multiple slashes by a single slash and trailing slashes
1552 * by a null. This must be done before VNOP_LOOKUP() because some
1553 * fs's don't know about trailing slashes. Remember if there were
1554 * trailing slashes to handle symlinks, existing non-directories
1555 * and non-existing files that won't be directories specially later.
1556 */
1557 while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) {
1558 cp++;
1559 ndp->ni_pathlen--;
1560
1561 if (*cp == '\0') {
1562 ndp->ni_flag |= NAMEI_TRAILINGSLASH;
1563 *ndp->ni_next = '\0';
1564 }
1565 }
1566 ndp->ni_next = cp;
1567
1568 cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT);
1569
1570 if (*cp == '\0') {
1571 cnp->cn_flags |= ISLASTCN;
1572 }
1573
1574 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') {
1575 cnp->cn_flags |= ISDOTDOT;
1576 }
1577
1578 *dp_authorized = 0;
1579 #if NAMEDRSRCFORK
1580 /*
1581 * Process a request for a file's resource fork.
1582 *
1583 * Consume the _PATH_RSRCFORKSPEC suffix and tag the path.
1584 */
1585 if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) &&
1586 (cp[1] == '.' && cp[2] == '.') &&
1587 bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) {
1588 /* Skip volfs file systems that don't support native streams. */
1589 if ((dp->v_mount != NULL) &&
1590 (dp->v_mount->mnt_flag & MNT_DOVOLFS) &&
1591 (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) {
1592 goto skiprsrcfork;
1593 }
1594 cnp->cn_flags |= CN_WANTSRSRCFORK;
1595 cnp->cn_flags |= ISLASTCN;
1596 ndp->ni_next[0] = '\0';
1597 ndp->ni_pathlen = 1;
1598 }
1599 skiprsrcfork:
1600 #endif
1601
1602 #if CONFIG_MACF
1603
1604 /*
1605 * Name cache provides authorization caching (see below)
1606 * that will short circuit MAC checks in lookup().
1607 * We must perform MAC check here. On denial
1608 * dp_authorized will remain 0 and second check will
1609 * be perfomed in lookup().
1610 */
1611 if (!(cnp->cn_flags & DONOTAUTH)) {
1612 error = mac_vnode_check_lookup(ctx, dp, cnp);
1613 if (error) {
1614 NAME_CACHE_UNLOCK();
1615 goto errorout;
1616 }
1617 }
1618 #endif /* MAC */
1619 if (ttl_enabled &&
1620 (dp->v_mount->mnt_authcache_ttl == 0 ||
1621 ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl))) {
1622 break;
1623 }
1624
1625 /*
1626 * NAME_CACHE_LOCK holds these fields stable
1627 *
1628 * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly
1629 * so we make an ugly check for root here. root is always
1630 * allowed and breaking out of here only to find out that is
1631 * authorized by virtue of being root is very very expensive.
1632 * However, the check for not root is valid only for filesystems
1633 * which use local authorization.
1634 *
1635 * XXX: Remove the check for root when we can reliably set
1636 * KAUTH_VNODE_SEARCHBYANYONE as root.
1637 */
1638 if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) &&
1639 !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) &&
1640 (ttl_enabled || !vfs_context_issuser(ctx))) {
1641 break;
1642 }
1643
1644 /*
1645 * indicate that we're allowed to traverse this directory...
1646 * even if we fail the cache lookup or decide to bail for
1647 * some other reason, this information is valid and is used
1648 * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP
1649 */
1650 *dp_authorized = 1;
1651
1652 if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) {
1653 if (cnp->cn_nameiop != LOOKUP) {
1654 break;
1655 }
1656 if (cnp->cn_flags & LOCKPARENT) {
1657 break;
1658 }
1659 if (cnp->cn_flags & NOCACHE) {
1660 break;
1661 }
1662 if (cnp->cn_flags & ISDOTDOT) {
1663 #if CONFIG_FIRMLINKS
1664 if (dp->v_fmlink && (dp->v_flag & VFMLINKTARGET)) {
1665 dp = dp->v_fmlink;
1666 }
1667 #endif
1668
1669 /*
1670 * Force directory hardlinks to go to
1671 * file system for ".." requests.
1672 */
1673 if ((dp->v_flag & VISHARDLINK)) {
1674 break;
1675 }
1676 /*
1677 * Quit here only if we can't use
1678 * the parent directory pointer or
1679 * don't have one. Otherwise, we'll
1680 * use it below.
1681 */
1682 if ((dp->v_flag & VROOT) ||
1683 dp == ndp->ni_rootdir ||
1684 dp->v_parent == NULLVP) {
1685 break;
1686 }
1687 }
1688 }
1689
1690 if ((cnp->cn_flags & CN_SKIPNAMECACHE)) {
1691 /*
1692 * Force lookup to go to the filesystem with
1693 * all cnp fields set up.
1694 */
1695 break;
1696 }
1697
1698 /*
1699 * "." and ".." aren't supposed to be cached, so check
1700 * for them before checking the cache.
1701 */
1702 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1703 vp = dp;
1704 } else if ((cnp->cn_flags & ISDOTDOT)) {
1705 /*
1706 * If this is a chrooted process, we need to check if
1707 * the process is trying to break out of its chrooted
1708 * jail. We do that by trying to determine if dp is
1709 * a subdirectory of ndp->ni_rootdir. If we aren't
1710 * able to determine that by the v_parent pointers, we
1711 * will leave the fast path.
1712 *
1713 * Since this function may see dotdot components
1714 * many times and it has the name cache lock held for
1715 * the entire duration, we optimise this by doing this
1716 * check only once per cache_lookup_path call.
1717 * If dotdotchecked is set, it means we've done this
1718 * check once already and don't need to do it again.
1719 */
1720 if (!dotdotchecked && (ndp->ni_rootdir != rootvnode)) {
1721 vnode_t tvp = dp;
1722 boolean_t defer = FALSE;
1723 boolean_t is_subdir = FALSE;
1724
1725 defer = cache_check_vnode_issubdir(tvp,
1726 ndp->ni_rootdir, &is_subdir, &tvp);
1727
1728 if (defer) {
1729 /* defer to Filesystem */
1730 break;
1731 } else if (!is_subdir) {
1732 /*
1733 * This process is trying to break out
1734 * of its chrooted jail, so all its
1735 * dotdot accesses will be translated to
1736 * its root directory.
1737 */
1738 vp = ndp->ni_rootdir;
1739 } else {
1740 /*
1741 * All good, let this dotdot access
1742 * proceed normally
1743 */
1744 vp = dp->v_parent;
1745 }
1746 dotdotchecked = TRUE;
1747 } else {
1748 vp = dp->v_parent;
1749 }
1750 } else {
1751 if ((vp = cache_lookup_locked(dp, cnp)) == NULLVP) {
1752 break;
1753 }
1754
1755 if ((vp->v_flag & VISHARDLINK)) {
1756 /*
1757 * The file system wants a VNOP_LOOKUP on this vnode
1758 */
1759 vp = NULL;
1760 break;
1761 }
1762 }
1763 if ((cnp->cn_flags & ISLASTCN)) {
1764 break;
1765 }
1766
1767 if (vp->v_type != VDIR) {
1768 if (vp->v_type != VLNK) {
1769 vp = NULL;
1770 }
1771 break;
1772 }
1773
1774 if ((mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
1775 vnode_t tmp_vp = mp->mnt_realrootvp;
1776 if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation ||
1777 mp->mnt_realrootvp_vid != tmp_vp->v_id) {
1778 break;
1779 }
1780 vp = tmp_vp;
1781 }
1782
1783 #if CONFIG_TRIGGERS
1784 /*
1785 * After traversing all mountpoints stacked here, if we have a
1786 * trigger in hand, resolve it. Note that we don't need to
1787 * leave the fast path if the mount has already happened.
1788 */
1789 if (vp->v_resolve) {
1790 break;
1791 }
1792 #endif /* CONFIG_TRIGGERS */
1793
1794
1795 dp = vp;
1796 vp = NULLVP;
1797
1798 cnp->cn_nameptr = ndp->ni_next + 1;
1799 ndp->ni_pathlen--;
1800 while (*cnp->cn_nameptr == '/') {
1801 cnp->cn_nameptr++;
1802 ndp->ni_pathlen--;
1803 }
1804 }
1805 if (vp != NULLVP) {
1806 vvid = vp->v_id;
1807 }
1808 vid = dp->v_id;
1809
1810 NAME_CACHE_UNLOCK();
1811
1812 if ((vp != NULLVP) && (vp->v_type != VLNK) &&
1813 ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
1814 /*
1815 * if we've got a child and it's the last component, and
1816 * the lookup doesn't need to return the parent then we
1817 * can skip grabbing an iocount on the parent, since all
1818 * we're going to do with it is a vnode_put just before
1819 * we return from 'lookup'. If it's a symbolic link,
1820 * we need the parent in case the link happens to be
1821 * a relative pathname.
1822 */
1823 tdp = dp;
1824 dp = NULLVP;
1825 } else {
1826 need_dp:
1827 /*
1828 * return the last directory we looked at
1829 * with an io reference held. If it was the one passed
1830 * in as a result of the last iteration of VNOP_LOOKUP,
1831 * it should already hold an io ref. No need to increase ref.
1832 */
1833 if (last_dp != dp) {
1834 if (dp == ndp->ni_usedvp) {
1835 /*
1836 * if this vnode matches the one passed in via USEDVP
1837 * than this context already holds an io_count... just
1838 * use vnode_get to get an extra ref for lookup to play
1839 * with... can't use the getwithvid variant here because
1840 * it will block behind a vnode_drain which would result
1841 * in a deadlock (since we already own an io_count that the
1842 * vnode_drain is waiting on)... vnode_get grabs the io_count
1843 * immediately w/o waiting... it always succeeds
1844 */
1845 vnode_get(dp);
1846 } else if ((error = vnode_getwithvid_drainok(dp, vid))) {
1847 /*
1848 * failure indicates the vnode
1849 * changed identity or is being
1850 * TERMINATED... in either case
1851 * punt this lookup.
1852 *
1853 * don't necessarily return ENOENT, though, because
1854 * we really want to go back to disk and make sure it's
1855 * there or not if someone else is changing this
1856 * vnode. That being said, the one case where we do want
1857 * to return ENOENT is when the vnode's mount point is
1858 * in the process of unmounting and we might cause a deadlock
1859 * in our attempt to take an iocount. An ENODEV error return
1860 * is from vnode_get* is an indication this but we change that
1861 * ENOENT for upper layers.
1862 */
1863 if (error == ENODEV) {
1864 error = ENOENT;
1865 } else {
1866 error = ERECYCLE;
1867 }
1868 goto errorout;
1869 }
1870 }
1871 }
1872 if (vp != NULLVP) {
1873 if ((vnode_getwithvid_drainok(vp, vvid))) {
1874 vp = NULLVP;
1875
1876 /*
1877 * can't get reference on the vp we'd like
1878 * to return... if we didn't grab a reference
1879 * on the directory (due to fast path bypass),
1880 * then we need to do it now... we can't return
1881 * with both ni_dvp and ni_vp NULL, and no
1882 * error condition
1883 */
1884 if (dp == NULLVP) {
1885 dp = tdp;
1886 goto need_dp;
1887 }
1888 }
1889 }
1890
1891 ndp->ni_dvp = dp;
1892 ndp->ni_vp = vp;
1893
1894 #if CONFIG_TRIGGERS
1895 trigger_vp = vp ? vp : dp;
1896 if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) {
1897 error = vnode_trigger_resolve(trigger_vp, ndp, ctx);
1898 if (error) {
1899 if (vp) {
1900 vnode_put(vp);
1901 }
1902 if (dp) {
1903 vnode_put(dp);
1904 }
1905 goto errorout;
1906 }
1907 }
1908 #endif /* CONFIG_TRIGGERS */
1909
1910 errorout:
1911 /*
1912 * If we came into cache_lookup_path after an iteration of the lookup loop that
1913 * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref
1914 * on it. It is now the job of cache_lookup_path to drop the ref on this vnode
1915 * when it is no longer needed. If we get to this point, and last_dp is not NULL
1916 * and it is ALSO not the dvp we want to return to caller of this function, it MUST be
1917 * the case that we got to a subsequent path component and this previous vnode is
1918 * no longer needed. We can then drop the io ref on it.
1919 */
1920 if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) {
1921 vnode_put(last_dp);
1922 }
1923
1924 //initialized to 0, should be the same if no error cases occurred.
1925 return error;
1926 }
1927
1928
1929 static vnode_t
1930 cache_lookup_locked(vnode_t dvp, struct componentname *cnp)
1931 {
1932 struct namecache *ncp;
1933 struct nchashhead *ncpp;
1934 long namelen = cnp->cn_namelen;
1935 unsigned int hashval = cnp->cn_hash;
1936
1937 if (nc_disabled) {
1938 return NULL;
1939 }
1940
1941 ncpp = NCHHASH(dvp, cnp->cn_hash);
1942 LIST_FOREACH(ncp, ncpp, nc_hash) {
1943 if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
1944 if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
1945 break;
1946 }
1947 }
1948 }
1949 if (ncp == 0) {
1950 /*
1951 * We failed to find an entry
1952 */
1953 NCHSTAT(ncs_miss);
1954 return NULL;
1955 }
1956 NCHSTAT(ncs_goodhits);
1957
1958 return ncp->nc_vp;
1959 }
1960
1961
1962 unsigned int hash_string(const char *cp, int len);
1963 //
1964 // Have to take a len argument because we may only need to
1965 // hash part of a componentname.
1966 //
1967 unsigned int
1968 hash_string(const char *cp, int len)
1969 {
1970 unsigned hash = 0;
1971
1972 if (len) {
1973 while (len--) {
1974 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1975 }
1976 } else {
1977 while (*cp != '\0') {
1978 hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1979 }
1980 }
1981 /*
1982 * the crc generator can legitimately generate
1983 * a 0... however, 0 for us means that we
1984 * haven't computed a hash, so use 1 instead
1985 */
1986 if (hash == 0) {
1987 hash = 1;
1988 }
1989 return hash;
1990 }
1991
1992
1993 /*
1994 * Lookup an entry in the cache
1995 *
1996 * We don't do this if the segment name is long, simply so the cache
1997 * can avoid holding long names (which would either waste space, or
1998 * add greatly to the complexity).
1999 *
2000 * Lookup is called with dvp pointing to the directory to search,
2001 * cnp pointing to the name of the entry being sought. If the lookup
2002 * succeeds, the vnode is returned in *vpp, and a status of -1 is
2003 * returned. If the lookup determines that the name does not exist
2004 * (negative cacheing), a status of ENOENT is returned. If the lookup
2005 * fails, a status of zero is returned.
2006 */
2007
2008 int
2009 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
2010 {
2011 struct namecache *ncp;
2012 struct nchashhead *ncpp;
2013 long namelen = cnp->cn_namelen;
2014 unsigned int hashval;
2015 boolean_t have_exclusive = FALSE;
2016 uint32_t vid;
2017 vnode_t vp;
2018
2019 if (cnp->cn_hash == 0) {
2020 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2021 }
2022 hashval = cnp->cn_hash;
2023
2024 if (nc_disabled) {
2025 return 0;
2026 }
2027
2028 NAME_CACHE_LOCK_SHARED();
2029
2030 relook:
2031 ncpp = NCHHASH(dvp, cnp->cn_hash);
2032 LIST_FOREACH(ncp, ncpp, nc_hash) {
2033 if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2034 if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2035 break;
2036 }
2037 }
2038 }
2039 /* We failed to find an entry */
2040 if (ncp == 0) {
2041 NCHSTAT(ncs_miss);
2042 NAME_CACHE_UNLOCK();
2043 return 0;
2044 }
2045
2046 /* We don't want to have an entry, so dump it */
2047 if ((cnp->cn_flags & MAKEENTRY) == 0) {
2048 if (have_exclusive == TRUE) {
2049 NCHSTAT(ncs_badhits);
2050 cache_delete(ncp, 1);
2051 NAME_CACHE_UNLOCK();
2052 return 0;
2053 }
2054 NAME_CACHE_UNLOCK();
2055 NAME_CACHE_LOCK();
2056 have_exclusive = TRUE;
2057 goto relook;
2058 }
2059 vp = ncp->nc_vp;
2060
2061 /* We found a "positive" match, return the vnode */
2062 if (vp) {
2063 NCHSTAT(ncs_goodhits);
2064
2065 vid = vp->v_id;
2066 NAME_CACHE_UNLOCK();
2067
2068 if (vnode_getwithvid(vp, vid)) {
2069 #if COLLECT_STATS
2070 NAME_CACHE_LOCK();
2071 NCHSTAT(ncs_badvid);
2072 NAME_CACHE_UNLOCK();
2073 #endif
2074 return 0;
2075 }
2076 *vpp = vp;
2077 return -1;
2078 }
2079
2080 /* We found a negative match, and want to create it, so purge */
2081 if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
2082 if (have_exclusive == TRUE) {
2083 NCHSTAT(ncs_badhits);
2084 cache_delete(ncp, 1);
2085 NAME_CACHE_UNLOCK();
2086 return 0;
2087 }
2088 NAME_CACHE_UNLOCK();
2089 NAME_CACHE_LOCK();
2090 have_exclusive = TRUE;
2091 goto relook;
2092 }
2093
2094 /*
2095 * We found a "negative" match, ENOENT notifies client of this match.
2096 */
2097 NCHSTAT(ncs_neghits);
2098
2099 NAME_CACHE_UNLOCK();
2100 return ENOENT;
2101 }
2102
2103 const char *
2104 cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp)
2105 {
2106 const char *strname;
2107
2108 if (cnp->cn_hash == 0) {
2109 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2110 }
2111
2112 /*
2113 * grab 2 references on the string entered
2114 * one for the cache_enter_locked to consume
2115 * and the second to be consumed by v_name (vnode_create call point)
2116 */
2117 strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0);
2118
2119 NAME_CACHE_LOCK();
2120
2121 cache_enter_locked(dvp, vp, cnp, strname);
2122
2123 NAME_CACHE_UNLOCK();
2124
2125 return strname;
2126 }
2127
2128
2129 /*
2130 * Add an entry to the cache...
2131 * but first check to see if the directory
2132 * that this entry is to be associated with has
2133 * had any cache_purges applied since we took
2134 * our identity snapshot... this check needs to
2135 * be done behind the name cache lock
2136 */
2137 void
2138 cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen)
2139 {
2140 if (cnp->cn_hash == 0) {
2141 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2142 }
2143
2144 NAME_CACHE_LOCK();
2145
2146 if (dvp->v_nc_generation == gen) {
2147 (void)cache_enter_locked(dvp, vp, cnp, NULL);
2148 }
2149
2150 NAME_CACHE_UNLOCK();
2151 }
2152
2153
2154 /*
2155 * Add an entry to the cache.
2156 */
2157 void
2158 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2159 {
2160 const char *strname;
2161
2162 if (cnp->cn_hash == 0) {
2163 cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2164 }
2165
2166 /*
2167 * grab 1 reference on the string entered
2168 * for the cache_enter_locked to consume
2169 */
2170 strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2171
2172 NAME_CACHE_LOCK();
2173
2174 cache_enter_locked(dvp, vp, cnp, strname);
2175
2176 NAME_CACHE_UNLOCK();
2177 }
2178
2179
2180 static void
2181 cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname)
2182 {
2183 struct namecache *ncp, *negp;
2184 struct nchashhead *ncpp;
2185
2186 if (nc_disabled) {
2187 return;
2188 }
2189
2190 /*
2191 * if the entry is for -ve caching vp is null
2192 */
2193 if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) {
2194 /*
2195 * someone beat us to the punch..
2196 * this vnode is already in the cache
2197 */
2198 if (strname != NULL) {
2199 vfs_removename(strname);
2200 }
2201 return;
2202 }
2203 /*
2204 * We allocate a new entry if we are less than the maximum
2205 * allowed and the one at the front of the list is in use.
2206 * Otherwise we use the one at the front of the list.
2207 */
2208 if (numcache < desiredNodes &&
2209 ((ncp = nchead.tqh_first) == NULL ||
2210 ncp->nc_hash.le_prev != 0)) {
2211 /*
2212 * Allocate one more entry
2213 */
2214 ncp = (struct namecache *)_MALLOC_ZONE(sizeof(*ncp), M_CACHE, M_WAITOK);
2215 numcache++;
2216 } else {
2217 /*
2218 * reuse an old entry
2219 */
2220 ncp = TAILQ_FIRST(&nchead);
2221 TAILQ_REMOVE(&nchead, ncp, nc_entry);
2222
2223 if (ncp->nc_hash.le_prev != 0) {
2224 /*
2225 * still in use... we need to
2226 * delete it before re-using it
2227 */
2228 NCHSTAT(ncs_stolen);
2229 cache_delete(ncp, 0);
2230 }
2231 }
2232 NCHSTAT(ncs_enters);
2233
2234 /*
2235 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
2236 */
2237 ncp->nc_vp = vp;
2238 ncp->nc_dvp = dvp;
2239 ncp->nc_hashval = cnp->cn_hash;
2240
2241 if (strname == NULL) {
2242 ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2243 } else {
2244 ncp->nc_name = strname;
2245 }
2246
2247 //
2248 // If the bytes of the name associated with the vnode differ,
2249 // use the name associated with the vnode since the file system
2250 // may have set that explicitly in the case of a lookup on a
2251 // case-insensitive file system where the case of the looked up
2252 // name differs from what is on disk. For more details, see:
2253 // <rdar://problem/8044697> FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories
2254 //
2255 const char *vn_name = vp ? vp->v_name : NULL;
2256 unsigned int len = vn_name ? strlen(vn_name) : 0;
2257 if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) {
2258 unsigned int hash = hash_string(vn_name, len);
2259
2260 vfs_removename(ncp->nc_name);
2261 ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0);
2262 ncp->nc_hashval = hash;
2263 }
2264
2265 /*
2266 * make us the newest entry in the cache
2267 * i.e. we'll be the last to be stolen
2268 */
2269 TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry);
2270
2271 ncpp = NCHHASH(dvp, cnp->cn_hash);
2272 #if DIAGNOSTIC
2273 {
2274 struct namecache *p;
2275
2276 for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) {
2277 if (p == ncp) {
2278 panic("cache_enter: duplicate");
2279 }
2280 }
2281 }
2282 #endif
2283 /*
2284 * make us available to be found via lookup
2285 */
2286 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
2287
2288 if (vp) {
2289 /*
2290 * add to the list of name cache entries
2291 * that point at vp
2292 */
2293 LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link);
2294 } else {
2295 /*
2296 * this is a negative cache entry (vp == NULL)
2297 * stick it on the negative cache list.
2298 */
2299 TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry);
2300
2301 ncs_negtotal++;
2302
2303 if (ncs_negtotal > desiredNegNodes) {
2304 /*
2305 * if we've reached our desired limit
2306 * of negative cache entries, delete
2307 * the oldest
2308 */
2309 negp = TAILQ_FIRST(&neghead);
2310 cache_delete(negp, 1);
2311 }
2312 }
2313 /*
2314 * add us to the list of name cache entries that
2315 * are children of dvp
2316 */
2317 if (vp) {
2318 TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child);
2319 } else {
2320 TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child);
2321 }
2322 }
2323
2324
2325 /*
2326 * Initialize CRC-32 remainder table.
2327 */
2328 static void
2329 init_crc32(void)
2330 {
2331 /*
2332 * the CRC-32 generator polynomial is:
2333 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10
2334 * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
2335 */
2336 unsigned int crc32_polynomial = 0x04c11db7;
2337 unsigned int i, j;
2338
2339 /*
2340 * pre-calculate the CRC-32 remainder for each possible octet encoding
2341 */
2342 for (i = 0; i < 256; i++) {
2343 unsigned int crc_rem = i << 24;
2344
2345 for (j = 0; j < 8; j++) {
2346 if (crc_rem & 0x80000000) {
2347 crc_rem = (crc_rem << 1) ^ crc32_polynomial;
2348 } else {
2349 crc_rem = (crc_rem << 1);
2350 }
2351 }
2352 crc32tab[i] = crc_rem;
2353 }
2354 }
2355
2356
2357 /*
2358 * Name cache initialization, from vfs_init() when we are booting
2359 */
2360 void
2361 nchinit(void)
2362 {
2363 int i;
2364
2365 desiredNegNodes = (desiredvnodes / 10);
2366 desiredNodes = desiredvnodes + desiredNegNodes;
2367
2368 TAILQ_INIT(&nchead);
2369 TAILQ_INIT(&neghead);
2370
2371 init_crc32();
2372
2373 nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash);
2374 nchashmask = nchash;
2375 nchash++;
2376
2377 init_string_table();
2378
2379 /* Allocate name cache lock group attribute and group */
2380 namecache_lck_grp_attr = lck_grp_attr_alloc_init();
2381
2382 namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr);
2383
2384 /* Allocate name cache lock attribute */
2385 namecache_lck_attr = lck_attr_alloc_init();
2386
2387 /* Allocate name cache lock */
2388 namecache_rw_lock = lck_rw_alloc_init(namecache_lck_grp, namecache_lck_attr);
2389
2390
2391 /* Allocate string cache lock group attribute and group */
2392 strcache_lck_grp_attr = lck_grp_attr_alloc_init();
2393
2394 strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr);
2395
2396 /* Allocate string cache lock attribute */
2397 strcache_lck_attr = lck_attr_alloc_init();
2398
2399 /* Allocate string cache lock */
2400 strtable_rw_lock = lck_rw_alloc_init(strcache_lck_grp, strcache_lck_attr);
2401
2402 for (i = 0; i < NUM_STRCACHE_LOCKS; i++) {
2403 lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr);
2404 }
2405 }
2406
2407 void
2408 name_cache_lock_shared(void)
2409 {
2410 lck_rw_lock_shared(namecache_rw_lock);
2411 }
2412
2413 void
2414 name_cache_lock(void)
2415 {
2416 lck_rw_lock_exclusive(namecache_rw_lock);
2417 }
2418
2419 void
2420 name_cache_unlock(void)
2421 {
2422 lck_rw_done(namecache_rw_lock);
2423 }
2424
2425
2426 int
2427 resize_namecache(int newsize)
2428 {
2429 struct nchashhead *new_table;
2430 struct nchashhead *old_table;
2431 struct nchashhead *old_head, *head;
2432 struct namecache *entry, *next;
2433 uint32_t i, hashval;
2434 int dNodes, dNegNodes, nelements;
2435 u_long new_size, old_size;
2436
2437 if (newsize < 0) {
2438 return EINVAL;
2439 }
2440
2441 dNegNodes = (newsize / 10);
2442 dNodes = newsize + dNegNodes;
2443 // we don't support shrinking yet
2444 if (dNodes <= desiredNodes) {
2445 return 0;
2446 }
2447
2448 if (os_mul_overflow(dNodes, 2, &nelements)) {
2449 return EINVAL;
2450 }
2451
2452 new_table = hashinit(nelements, M_CACHE, &nchashmask);
2453 new_size = nchashmask + 1;
2454
2455 if (new_table == NULL) {
2456 return ENOMEM;
2457 }
2458
2459 NAME_CACHE_LOCK();
2460 // do the switch!
2461 old_table = nchashtbl;
2462 nchashtbl = new_table;
2463 old_size = nchash;
2464 nchash = new_size;
2465
2466 // walk the old table and insert all the entries into
2467 // the new table
2468 //
2469 for (i = 0; i < old_size; i++) {
2470 old_head = &old_table[i];
2471 for (entry = old_head->lh_first; entry != NULL; entry = next) {
2472 //
2473 // XXXdbg - Beware: this assumes that hash_string() does
2474 // the same thing as what happens in
2475 // lookup() over in vfs_lookup.c
2476 hashval = hash_string(entry->nc_name, 0);
2477 entry->nc_hashval = hashval;
2478 head = NCHHASH(entry->nc_dvp, hashval);
2479
2480 next = entry->nc_hash.le_next;
2481 LIST_INSERT_HEAD(head, entry, nc_hash);
2482 }
2483 }
2484 desiredNodes = dNodes;
2485 desiredNegNodes = dNegNodes;
2486
2487 NAME_CACHE_UNLOCK();
2488 FREE(old_table, M_CACHE);
2489
2490 return 0;
2491 }
2492
2493 static void
2494 cache_delete(struct namecache *ncp, int free_entry)
2495 {
2496 NCHSTAT(ncs_deletes);
2497
2498 if (ncp->nc_vp) {
2499 LIST_REMOVE(ncp, nc_un.nc_link);
2500 } else {
2501 TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry);
2502 ncs_negtotal--;
2503 }
2504 TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child);
2505
2506 LIST_REMOVE(ncp, nc_hash);
2507 /*
2508 * this field is used to indicate
2509 * that the entry is in use and
2510 * must be deleted before it can
2511 * be reused...
2512 */
2513 ncp->nc_hash.le_prev = NULL;
2514
2515 vfs_removename(ncp->nc_name);
2516 ncp->nc_name = NULL;
2517 if (free_entry) {
2518 TAILQ_REMOVE(&nchead, ncp, nc_entry);
2519 FREE_ZONE(ncp, sizeof(*ncp), M_CACHE);
2520 numcache--;
2521 }
2522 }
2523
2524
2525 /*
2526 * purge the entry associated with the
2527 * specified vnode from the name cache
2528 */
2529 static void
2530 cache_purge_locked(vnode_t vp, kauth_cred_t *credp)
2531 {
2532 struct namecache *ncp;
2533
2534 *credp = NULL;
2535 if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
2536 (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
2537 (vp->v_cred == NOCRED) &&
2538 (vp->v_parent == NULLVP)) {
2539 return;
2540 }
2541
2542 if (vp->v_parent) {
2543 vp->v_parent->v_nc_generation++;
2544 }
2545
2546 while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
2547 cache_delete(ncp, 1);
2548 }
2549
2550 while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
2551 cache_delete(ncp, 1);
2552 }
2553
2554 /*
2555 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
2556 */
2557 *credp = vp->v_cred;
2558 vp->v_cred = NOCRED;
2559 vp->v_authorized_actions = 0;
2560 }
2561
2562 void
2563 cache_purge(vnode_t vp)
2564 {
2565 kauth_cred_t tcred = NULL;
2566
2567 if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
2568 (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
2569 (vp->v_cred == NOCRED) &&
2570 (vp->v_parent == NULLVP)) {
2571 return;
2572 }
2573
2574 NAME_CACHE_LOCK();
2575
2576 cache_purge_locked(vp, &tcred);
2577
2578 NAME_CACHE_UNLOCK();
2579
2580 if (tcred && IS_VALID_CRED(tcred)) {
2581 kauth_cred_unref(&tcred);
2582 }
2583 }
2584
2585 /*
2586 * Purge all negative cache entries that are children of the
2587 * given vnode. A case-insensitive file system (or any file
2588 * system that has multiple equivalent names for the same
2589 * directory entry) can use this when creating or renaming
2590 * to remove negative entries that may no longer apply.
2591 */
2592 void
2593 cache_purge_negatives(vnode_t vp)
2594 {
2595 struct namecache *ncp, *next_ncp;
2596
2597 NAME_CACHE_LOCK();
2598
2599 TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) {
2600 if (ncp->nc_vp) {
2601 break;
2602 }
2603
2604 cache_delete(ncp, 1);
2605 }
2606
2607 NAME_CACHE_UNLOCK();
2608 }
2609
2610 /*
2611 * Flush all entries referencing a particular filesystem.
2612 *
2613 * Since we need to check it anyway, we will flush all the invalid
2614 * entries at the same time.
2615 */
2616 void
2617 cache_purgevfs(struct mount *mp)
2618 {
2619 struct nchashhead *ncpp;
2620 struct namecache *ncp;
2621
2622 NAME_CACHE_LOCK();
2623 /* Scan hash tables for applicable entries */
2624 for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
2625 restart:
2626 for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) {
2627 if (ncp->nc_dvp->v_mount == mp) {
2628 cache_delete(ncp, 0);
2629 goto restart;
2630 }
2631 }
2632 }
2633 NAME_CACHE_UNLOCK();
2634 }
2635
2636
2637
2638 //
2639 // String ref routines
2640 //
2641 static LIST_HEAD(stringhead, string_t) * string_ref_table;
2642 static u_long string_table_mask;
2643 static uint32_t filled_buckets = 0;
2644
2645
2646 typedef struct string_t {
2647 LIST_ENTRY(string_t) hash_chain;
2648 const char *str;
2649 uint32_t refcount;
2650 } string_t;
2651
2652
2653 static void
2654 resize_string_ref_table(void)
2655 {
2656 struct stringhead *new_table;
2657 struct stringhead *old_table;
2658 struct stringhead *old_head, *head;
2659 string_t *entry, *next;
2660 uint32_t i, hashval;
2661 u_long new_mask, old_mask;
2662
2663 /*
2664 * need to hold the table lock exclusively
2665 * in order to grow the table... need to recheck
2666 * the need to resize again after we've taken
2667 * the lock exclusively in case some other thread
2668 * beat us to the punch
2669 */
2670 lck_rw_lock_exclusive(strtable_rw_lock);
2671
2672 if (4 * filled_buckets < ((string_table_mask + 1) * 3)) {
2673 lck_rw_done(strtable_rw_lock);
2674 return;
2675 }
2676 new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask);
2677
2678 if (new_table == NULL) {
2679 printf("failed to resize the hash table.\n");
2680 lck_rw_done(strtable_rw_lock);
2681 return;
2682 }
2683
2684 // do the switch!
2685 old_table = string_ref_table;
2686 string_ref_table = new_table;
2687 old_mask = string_table_mask;
2688 string_table_mask = new_mask;
2689 filled_buckets = 0;
2690
2691 // walk the old table and insert all the entries into
2692 // the new table
2693 //
2694 for (i = 0; i <= old_mask; i++) {
2695 old_head = &old_table[i];
2696 for (entry = old_head->lh_first; entry != NULL; entry = next) {
2697 hashval = hash_string((const char *)entry->str, 0);
2698 head = &string_ref_table[hashval & string_table_mask];
2699 if (head->lh_first == NULL) {
2700 filled_buckets++;
2701 }
2702 next = entry->hash_chain.le_next;
2703 LIST_INSERT_HEAD(head, entry, hash_chain);
2704 }
2705 }
2706 lck_rw_done(strtable_rw_lock);
2707
2708 FREE(old_table, M_CACHE);
2709 }
2710
2711
2712 static void
2713 init_string_table(void)
2714 {
2715 string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask);
2716 }
2717
2718
2719 const char *
2720 vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags)
2721 {
2722 return add_name_internal(name, len, hashval, FALSE, flags);
2723 }
2724
2725
2726 static const char *
2727 add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags)
2728 {
2729 struct stringhead *head;
2730 string_t *entry;
2731 uint32_t chain_len = 0;
2732 uint32_t hash_index;
2733 uint32_t lock_index;
2734 char *ptr;
2735
2736 if (len > MAXPATHLEN) {
2737 len = MAXPATHLEN;
2738 }
2739
2740 /*
2741 * if the length already accounts for the null-byte, then
2742 * subtract one so later on we don't index past the end
2743 * of the string.
2744 */
2745 if (len > 0 && name[len - 1] == '\0') {
2746 len--;
2747 }
2748 if (hashval == 0) {
2749 hashval = hash_string(name, len);
2750 }
2751
2752 /*
2753 * take this lock 'shared' to keep the hash stable
2754 * if someone else decides to grow the pool they
2755 * will take this lock exclusively
2756 */
2757 lck_rw_lock_shared(strtable_rw_lock);
2758
2759 /*
2760 * If the table gets more than 3/4 full, resize it
2761 */
2762 if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) {
2763 lck_rw_done(strtable_rw_lock);
2764
2765 resize_string_ref_table();
2766
2767 lck_rw_lock_shared(strtable_rw_lock);
2768 }
2769 hash_index = hashval & string_table_mask;
2770 lock_index = hash_index % NUM_STRCACHE_LOCKS;
2771
2772 head = &string_ref_table[hash_index];
2773
2774 lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
2775
2776 for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) {
2777 if (strncmp(entry->str, name, len) == 0 && entry->str[len] == 0) {
2778 entry->refcount++;
2779 break;
2780 }
2781 }
2782 if (entry == NULL) {
2783 lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]);
2784 /*
2785 * it wasn't already there so add it.
2786 */
2787 MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK);
2788
2789 if (head->lh_first == NULL) {
2790 OSAddAtomic(1, &filled_buckets);
2791 }
2792 ptr = (char *)((char *)entry + sizeof(string_t));
2793 strncpy(ptr, name, len);
2794 ptr[len] = '\0';
2795 entry->str = ptr;
2796 entry->refcount = 1;
2797 LIST_INSERT_HEAD(head, entry, hash_chain);
2798 }
2799 if (need_extra_ref == TRUE) {
2800 entry->refcount++;
2801 }
2802
2803 lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
2804 lck_rw_done(strtable_rw_lock);
2805
2806 return (const char *)entry->str;
2807 }
2808
2809
2810 int
2811 vfs_removename(const char *nameref)
2812 {
2813 struct stringhead *head;
2814 string_t *entry;
2815 uint32_t hashval;
2816 uint32_t hash_index;
2817 uint32_t lock_index;
2818 int retval = ENOENT;
2819
2820 hashval = hash_string(nameref, 0);
2821
2822 /*
2823 * take this lock 'shared' to keep the hash stable
2824 * if someone else decides to grow the pool they
2825 * will take this lock exclusively
2826 */
2827 lck_rw_lock_shared(strtable_rw_lock);
2828 /*
2829 * must compute the head behind the table lock
2830 * since the size and location of the table
2831 * can change on the fly
2832 */
2833 hash_index = hashval & string_table_mask;
2834 lock_index = hash_index % NUM_STRCACHE_LOCKS;
2835
2836 head = &string_ref_table[hash_index];
2837
2838 lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
2839
2840 for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
2841 if (entry->str == nameref) {
2842 entry->refcount--;
2843
2844 if (entry->refcount == 0) {
2845 LIST_REMOVE(entry, hash_chain);
2846
2847 if (head->lh_first == NULL) {
2848 OSAddAtomic(-1, &filled_buckets);
2849 }
2850 } else {
2851 entry = NULL;
2852 }
2853 retval = 0;
2854 break;
2855 }
2856 }
2857 lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
2858 lck_rw_done(strtable_rw_lock);
2859
2860 if (entry != NULL) {
2861 FREE(entry, M_TEMP);
2862 }
2863
2864 return retval;
2865 }
2866
2867
2868 #ifdef DUMP_STRING_TABLE
2869 void
2870 dump_string_table(void)
2871 {
2872 struct stringhead *head;
2873 string_t *entry;
2874 u_long i;
2875
2876 lck_rw_lock_shared(strtable_rw_lock);
2877
2878 for (i = 0; i <= string_table_mask; i++) {
2879 head = &string_ref_table[i];
2880 for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
2881 printf("%6d - %s\n", entry->refcount, entry->str);
2882 }
2883 }
2884 lck_rw_done(strtable_rw_lock);
2885 }
2886 #endif /* DUMP_STRING_TABLE */