]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1989, 1993, 1995 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * This code is derived from software contributed to Berkeley by | |
34 | * Poul-Henning Kamp of the FreeBSD Project. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. All advertising materials mentioning features or use of this software | |
45 | * must display the following acknowledgement: | |
46 | * This product includes software developed by the University of | |
47 | * California, Berkeley and its contributors. | |
48 | * 4. Neither the name of the University nor the names of its contributors | |
49 | * may be used to endorse or promote products derived from this software | |
50 | * without specific prior written permission. | |
51 | * | |
52 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
53 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
54 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
55 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
56 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
57 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
58 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
59 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
60 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
61 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
62 | * SUCH DAMAGE. | |
63 | * | |
64 | * | |
65 | * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 | |
66 | */ | |
67 | #include <sys/param.h> | |
68 | #include <sys/systm.h> | |
69 | #include <sys/time.h> | |
70 | #include <sys/mount_internal.h> | |
71 | #include <sys/vnode_internal.h> | |
72 | #include <sys/namei.h> | |
73 | #include <sys/errno.h> | |
74 | #include <sys/malloc.h> | |
75 | #include <sys/kauth.h> | |
76 | #include <sys/user.h> | |
77 | ||
78 | /* | |
79 | * Name caching works as follows: | |
80 | * | |
81 | * Names found by directory scans are retained in a cache | |
82 | * for future reference. It is managed LRU, so frequently | |
83 | * used names will hang around. Cache is indexed by hash value | |
84 | * obtained from (vp, name) where vp refers to the directory | |
85 | * containing name. | |
86 | * | |
87 | * If it is a "negative" entry, (i.e. for a name that is known NOT to | |
88 | * exist) the vnode pointer will be NULL. | |
89 | * | |
90 | * Upon reaching the last segment of a path, if the reference | |
91 | * is for DELETE, or NOCACHE is set (rewrite), and the | |
92 | * name is located in the cache, it will be dropped. | |
93 | */ | |
94 | ||
95 | /* | |
96 | * Structures associated with name cacheing. | |
97 | */ | |
98 | ||
99 | LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ | |
100 | u_long nchashmask; | |
101 | u_long nchash; /* size of hash table - 1 */ | |
102 | long numcache; /* number of cache entries allocated */ | |
103 | int desiredNodes; | |
104 | int desiredNegNodes; | |
105 | TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ | |
106 | TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ | |
107 | struct nchstats nchstats; /* cache effectiveness statistics */ | |
108 | ||
109 | /* vars for name cache list lock */ | |
110 | lck_grp_t * namecache_lck_grp; | |
111 | lck_grp_attr_t * namecache_lck_grp_attr; | |
112 | lck_attr_t * namecache_lck_attr; | |
113 | lck_mtx_t * namecache_mtx_lock; | |
114 | ||
115 | static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp); | |
116 | static int remove_name_locked(const char *); | |
117 | static char *add_name_locked(const char *, size_t, u_int, u_int); | |
118 | static void init_string_table(void); | |
119 | static void cache_delete(struct namecache *, int); | |
120 | static void dump_string_table(void); | |
121 | ||
122 | static void init_crc32(void); | |
123 | static unsigned int crc32tab[256]; | |
124 | ||
125 | ||
126 | #define NCHHASH(dvp, hash_val) \ | |
127 | (&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask]) | |
128 | ||
129 | ||
130 | ||
131 | // | |
132 | // This function builds the path to a filename in "buff". The | |
133 | // length of the buffer *INCLUDING* the trailing zero byte is | |
134 | // returned in outlen. NOTE: the length includes the trailing | |
135 | // zero byte and thus the length is one greater than what strlen | |
136 | // would return. This is important and lots of code elsewhere | |
137 | // in the kernel assumes this behavior. | |
138 | // | |
139 | int | |
140 | build_path(vnode_t first_vp, char *buff, int buflen, int *outlen) | |
141 | { | |
142 | vnode_t vp = first_vp; | |
143 | char *end, *str; | |
144 | int len, ret=0, counter=0; | |
145 | ||
146 | end = &buff[buflen-1]; | |
147 | *end = '\0'; | |
148 | ||
149 | /* | |
150 | * if this is the root dir of a file system... | |
151 | */ | |
152 | if (vp && (vp->v_flag & VROOT) && vp->v_mount) { | |
153 | /* | |
154 | * then if it's the root fs, just put in a '/' and get out of here | |
155 | */ | |
156 | if (vp->v_mount->mnt_flag & MNT_ROOTFS) { | |
157 | *--end = '/'; | |
158 | goto out; | |
159 | } else { | |
160 | /* | |
161 | * else just use the covered vnode to get the mount path | |
162 | */ | |
163 | vp = vp->v_mount->mnt_vnodecovered; | |
164 | } | |
165 | } | |
166 | name_cache_lock(); | |
167 | ||
168 | while (vp && vp->v_parent != vp) { | |
169 | /* | |
170 | * the maximum depth of a file system hierarchy is MAXPATHLEN/2 | |
171 | * (with single-char names separated by slashes). we panic if | |
172 | * we've ever looped more than that. | |
173 | */ | |
174 | if (counter++ > MAXPATHLEN/2) { | |
175 | panic("build_path: vnode parent chain is too long! vp 0x%x\n", vp); | |
176 | } | |
177 | str = vp->v_name; | |
178 | ||
179 | if (str == NULL) { | |
180 | if (vp->v_parent != NULL) { | |
181 | ret = EINVAL; | |
182 | } | |
183 | break; | |
184 | } | |
185 | len = strlen(str); | |
186 | ||
187 | /* | |
188 | * check that there's enough space (make sure to include space for the '/') | |
189 | */ | |
190 | if ((end - buff) < (len + 1)) { | |
191 | ret = ENOSPC; | |
192 | break; | |
193 | } | |
194 | /* | |
195 | * copy it backwards | |
196 | */ | |
197 | str += len; | |
198 | ||
199 | for (; len > 0; len--) { | |
200 | *--end = *--str; | |
201 | } | |
202 | /* | |
203 | * put in the path separator | |
204 | */ | |
205 | *--end = '/'; | |
206 | ||
207 | /* | |
208 | * walk up the chain (as long as we're not the root) | |
209 | */ | |
210 | if (vp == first_vp && (vp->v_flag & VROOT)) { | |
211 | if (vp->v_mount && vp->v_mount->mnt_vnodecovered) { | |
212 | vp = vp->v_mount->mnt_vnodecovered->v_parent; | |
213 | } else { | |
214 | vp = NULLVP; | |
215 | } | |
216 | } else { | |
217 | vp = vp->v_parent; | |
218 | } | |
219 | /* | |
220 | * check if we're crossing a mount point and | |
221 | * switch the vp if we are. | |
222 | */ | |
223 | if (vp && (vp->v_flag & VROOT) && vp->v_mount) { | |
224 | vp = vp->v_mount->mnt_vnodecovered; | |
225 | } | |
226 | } | |
227 | name_cache_unlock(); | |
228 | out: | |
229 | /* | |
230 | * slide it down to the beginning of the buffer | |
231 | */ | |
232 | memmove(buff, end, &buff[buflen] - end); | |
233 | ||
234 | *outlen = &buff[buflen] - end; // length includes the trailing zero byte | |
235 | ||
236 | return ret; | |
237 | } | |
238 | ||
239 | ||
240 | /* | |
241 | * return NULLVP if vp's parent doesn't | |
242 | * exist, or we can't get a valid iocount | |
243 | * else return the parent of vp | |
244 | */ | |
245 | vnode_t | |
246 | vnode_getparent(vnode_t vp) | |
247 | { | |
248 | vnode_t pvp = NULLVP; | |
249 | int pvid; | |
250 | ||
251 | name_cache_lock(); | |
252 | /* | |
253 | * v_parent is stable behind the name_cache lock | |
254 | * however, the only thing we can really guarantee | |
255 | * is that we've grabbed a valid iocount on the | |
256 | * parent of 'vp' at the time we took the name_cache lock... | |
257 | * once we drop the lock, vp could get re-parented | |
258 | */ | |
259 | if ( (pvp = vp->v_parent) != NULLVP ) { | |
260 | pvid = pvp->v_id; | |
261 | ||
262 | name_cache_unlock(); | |
263 | ||
264 | if (vnode_getwithvid(pvp, pvid) != 0) | |
265 | pvp = NULL; | |
266 | } else | |
267 | name_cache_unlock(); | |
268 | ||
269 | return (pvp); | |
270 | } | |
271 | ||
272 | char * | |
273 | vnode_getname(vnode_t vp) | |
274 | { | |
275 | char *name = NULL; | |
276 | ||
277 | name_cache_lock(); | |
278 | ||
279 | if (vp->v_name) | |
280 | name = add_name_locked(vp->v_name, strlen(vp->v_name), 0, 0); | |
281 | name_cache_unlock(); | |
282 | ||
283 | return (name); | |
284 | } | |
285 | ||
286 | void | |
287 | vnode_putname(char *name) | |
288 | { | |
289 | name_cache_lock(); | |
290 | ||
291 | remove_name_locked(name); | |
292 | ||
293 | name_cache_unlock(); | |
294 | } | |
295 | ||
296 | ||
297 | /* | |
298 | * if VNODE_UPDATE_PARENT, and we can take | |
299 | * a reference on dvp, then update vp with | |
300 | * it's new parent... if vp already has a parent, | |
301 | * then drop the reference vp held on it | |
302 | * | |
303 | * if VNODE_UPDATE_NAME, | |
304 | * then drop string ref on v_name if it exists, and if name is non-NULL | |
305 | * then pick up a string reference on name and record it in v_name... | |
306 | * optionally pass in the length and hashval of name if known | |
307 | * | |
308 | * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp | |
309 | */ | |
310 | void | |
311 | vnode_update_identity(vnode_t vp, vnode_t dvp, char *name, int name_len, int name_hashval, int flags) | |
312 | { | |
313 | struct namecache *ncp; | |
314 | vnode_t old_parentvp = NULLVP; | |
315 | ||
316 | ||
317 | if (flags & VNODE_UPDATE_PARENT) { | |
318 | if (dvp && vnode_ref(dvp) != 0) | |
319 | dvp = NULLVP; | |
320 | } else | |
321 | dvp = NULLVP; | |
322 | name_cache_lock(); | |
323 | ||
324 | if ( (flags & VNODE_UPDATE_NAME) && (name != vp->v_name) ) { | |
325 | if (vp->v_name != NULL) { | |
326 | remove_name_locked(vp->v_name); | |
327 | vp->v_name = NULL; | |
328 | } | |
329 | if (name && *name) { | |
330 | if (name_len == 0) | |
331 | name_len = strlen(name); | |
332 | vp->v_name = add_name_locked(name, name_len, name_hashval, 0); | |
333 | } | |
334 | } | |
335 | if (flags & VNODE_UPDATE_PARENT) { | |
336 | if (dvp != vp && dvp != vp->v_parent) { | |
337 | old_parentvp = vp->v_parent; | |
338 | vp->v_parent = dvp; | |
339 | dvp = NULLVP; | |
340 | ||
341 | if (old_parentvp) | |
342 | flags |= VNODE_UPDATE_CACHE; | |
343 | } | |
344 | } | |
345 | if (flags & VNODE_UPDATE_CACHE) { | |
346 | while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) | |
347 | cache_delete(ncp, 1); | |
348 | } | |
349 | name_cache_unlock(); | |
350 | ||
351 | if (dvp != NULLVP) | |
352 | vnode_rele(dvp); | |
353 | ||
354 | if (old_parentvp) { | |
355 | struct uthread *ut; | |
356 | ||
357 | ut = get_bsdthread_info(current_thread()); | |
358 | ||
359 | /* | |
360 | * indicated to vnode_rele that it shouldn't do a | |
361 | * vnode_reclaim at this time... instead it will | |
362 | * chain the vnode to the uu_vreclaims list... | |
363 | * we'll be responsible for calling vnode_reclaim | |
364 | * on each of the vnodes in this list... | |
365 | */ | |
366 | ut->uu_defer_reclaims = 1; | |
367 | ut->uu_vreclaims = NULLVP; | |
368 | ||
369 | while ( (vp = old_parentvp) != NULLVP ) { | |
370 | ||
371 | vnode_lock(vp); | |
372 | ||
373 | vnode_rele_internal(vp, 0, 0, 1); | |
374 | ||
375 | /* | |
376 | * check to see if the vnode is now in the state | |
377 | * that would have triggered a vnode_reclaim in vnode_rele | |
378 | * if it is, we save it's parent pointer and then NULL | |
379 | * out the v_parent field... we'll drop the reference | |
380 | * that was held on the next iteration of this loop... | |
381 | * this short circuits a potential deep recursion if we | |
382 | * have a long chain of parents in this state... | |
383 | * we'll sit in this loop until we run into | |
384 | * a parent in this chain that is not in this state | |
385 | * | |
386 | * make our check and the node_rele atomic | |
387 | * with respect to the current vnode we're working on | |
388 | * by holding the vnode lock | |
389 | * if vnode_rele deferred the vnode_reclaim and has put | |
390 | * this vnode on the list to be reaped by us, than | |
391 | * it has left this vnode with an iocount == 1 | |
392 | */ | |
393 | if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && | |
394 | ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { | |
395 | /* | |
396 | * vnode_rele wanted to do a vnode_reclaim on this vnode | |
397 | * it should be sitting on the head of the uu_vreclaims chain | |
398 | * pull the parent pointer now so that when we do the | |
399 | * vnode_reclaim for each of the vnodes in the uu_vreclaims | |
400 | * list, we won't recurse back through here | |
401 | */ | |
402 | name_cache_lock(); | |
403 | old_parentvp = vp->v_parent; | |
404 | vp->v_parent = NULLVP; | |
405 | name_cache_unlock(); | |
406 | } else { | |
407 | /* | |
408 | * we're done... we ran into a vnode that isn't | |
409 | * being terminated | |
410 | */ | |
411 | old_parentvp = NULLVP; | |
412 | } | |
413 | vnode_unlock(vp); | |
414 | } | |
415 | ut->uu_defer_reclaims = 0; | |
416 | ||
417 | while ( (vp = ut->uu_vreclaims) != NULLVP) { | |
418 | ut->uu_vreclaims = vp->v_defer_reclaimlist; | |
419 | ||
420 | /* | |
421 | * vnode_put will drive the vnode_reclaim if | |
422 | * we are still the only reference on this vnode | |
423 | */ | |
424 | vnode_put(vp); | |
425 | } | |
426 | } | |
427 | } | |
428 | ||
429 | ||
430 | /* | |
431 | * Mark a vnode as having multiple hard links. HFS makes use of this | |
432 | * because it keeps track of each link separately, and wants to know | |
433 | * which link was actually used. | |
434 | * | |
435 | * This will cause the name cache to force a VNOP_LOOKUP on the vnode | |
436 | * so that HFS can post-process the lookup. Also, volfs will call | |
437 | * VNOP_GETATTR2 to determine the parent, instead of using v_parent. | |
438 | */ | |
439 | void vnode_set_hard_link(vnode_t vp) | |
440 | { | |
441 | vnode_lock(vp); | |
442 | ||
443 | /* | |
444 | * In theory, we're changing the vnode's identity as far as the | |
445 | * name cache is concerned, so we ought to grab the name cache lock | |
446 | * here. However, there is already a race, and grabbing the name | |
447 | * cache lock only makes the race window slightly smaller. | |
448 | * | |
449 | * The race happens because the vnode already exists in the name | |
450 | * cache, and could be found by one thread before another thread | |
451 | * can set the hard link flag. | |
452 | */ | |
453 | ||
454 | vp->v_flag |= VISHARDLINK; | |
455 | ||
456 | vnode_unlock(vp); | |
457 | } | |
458 | ||
459 | ||
460 | void vnode_uncache_credentials(vnode_t vp) | |
461 | { | |
462 | kauth_cred_t ucred = NULL; | |
463 | ||
464 | if (vp->v_cred) { | |
465 | vnode_lock(vp); | |
466 | ||
467 | ucred = vp->v_cred; | |
468 | vp->v_cred = NULL; | |
469 | ||
470 | vnode_unlock(vp); | |
471 | ||
472 | if (ucred) | |
473 | kauth_cred_rele(ucred); | |
474 | } | |
475 | } | |
476 | ||
477 | ||
478 | void vnode_cache_credentials(vnode_t vp, vfs_context_t context) | |
479 | { | |
480 | kauth_cred_t ucred; | |
481 | kauth_cred_t tcred = NOCRED; | |
482 | struct timeval tv; | |
483 | ||
484 | ucred = vfs_context_ucred(context); | |
485 | ||
486 | if (vp->v_cred != ucred || (vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE)) { | |
487 | vnode_lock(vp); | |
488 | ||
489 | microuptime(&tv); | |
490 | vp->v_cred_timestamp = tv.tv_sec; | |
491 | ||
492 | if (vp->v_cred != ucred) { | |
493 | kauth_cred_ref(ucred); | |
494 | ||
495 | tcred = vp->v_cred; | |
496 | vp->v_cred = ucred; | |
497 | } | |
498 | vnode_unlock(vp); | |
499 | ||
500 | if (tcred) | |
501 | kauth_cred_rele(tcred); | |
502 | } | |
503 | } | |
504 | ||
505 | /* reverse_lookup - lookup by walking back up the parent chain while leveraging | |
506 | * use of the name cache lock in order to protect our starting vnode. | |
507 | * NOTE - assumes you already have search access to starting point. | |
508 | * returns 0 when we have reached the root, current working dir, or chroot root | |
509 | * | |
510 | */ | |
511 | int | |
512 | reverse_lookup(vnode_t start_vp, vnode_t *lookup_vpp, struct filedesc *fdp, vfs_context_t context, int *dp_authorized) | |
513 | { | |
514 | int vid, done = 0; | |
515 | int auth_opaque = 0; | |
516 | vnode_t dp = start_vp; | |
517 | vnode_t vp = NULLVP; | |
518 | kauth_cred_t ucred; | |
519 | struct timeval tv; | |
520 | ||
521 | ucred = vfs_context_ucred(context); | |
522 | *lookup_vpp = start_vp; | |
523 | ||
524 | name_cache_lock(); | |
525 | ||
526 | if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { | |
527 | auth_opaque = 1; | |
528 | microuptime(&tv); | |
529 | } | |
530 | for (;;) { | |
531 | *dp_authorized = 0; | |
532 | ||
533 | if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) | |
534 | break; | |
535 | if (dp->v_cred != ucred) | |
536 | break; | |
537 | /* | |
538 | * indicate that we're allowed to traverse this directory... | |
539 | * even if we bail for some reason, this information is valid and is used | |
540 | * to avoid doing a vnode_authorize | |
541 | */ | |
542 | *dp_authorized = 1; | |
543 | ||
544 | if ((dp->v_flag & VROOT) != 0 || /* Hit "/" */ | |
545 | (dp == fdp->fd_cdir) || /* Hit process's working directory */ | |
546 | (dp == fdp->fd_rdir)) { /* Hit process chroot()-ed root */ | |
547 | done = 1; | |
548 | break; | |
549 | } | |
550 | ||
551 | if ( (vp = dp->v_parent) == NULLVP) | |
552 | break; | |
553 | ||
554 | dp = vp; | |
555 | *lookup_vpp = dp; | |
556 | } /* for (;;) */ | |
557 | ||
558 | vid = dp->v_id; | |
559 | ||
560 | name_cache_unlock(); | |
561 | ||
562 | if (done == 0 && dp != start_vp) { | |
563 | if (vnode_getwithvid(dp, vid) != 0) { | |
564 | *lookup_vpp = start_vp; | |
565 | } | |
566 | } | |
567 | ||
568 | return((done == 1) ? 0 : -1); | |
569 | } | |
570 | ||
571 | int | |
572 | cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, vfs_context_t context, int *trailing_slash, int *dp_authorized) | |
573 | { | |
574 | char *cp; /* pointer into pathname argument */ | |
575 | int vid, vvid; | |
576 | int auth_opaque = 0; | |
577 | vnode_t vp = NULLVP; | |
578 | vnode_t tdp = NULLVP; | |
579 | kauth_cred_t ucred; | |
580 | struct timeval tv; | |
581 | unsigned int hash; | |
582 | ||
583 | ucred = vfs_context_ucred(context); | |
584 | *trailing_slash = 0; | |
585 | ||
586 | name_cache_lock(); | |
587 | ||
588 | ||
589 | if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) ) { | |
590 | auth_opaque = 1; | |
591 | microuptime(&tv); | |
592 | } | |
593 | for (;;) { | |
594 | /* | |
595 | * Search a directory. | |
596 | * | |
597 | * The cn_hash value is for use by cache_lookup | |
598 | * The last component of the filename is left accessible via | |
599 | * cnp->cn_nameptr for callers that need the name. | |
600 | */ | |
601 | hash = 0; | |
602 | cp = cnp->cn_nameptr; | |
603 | ||
604 | while (*cp && (*cp != '/')) { | |
605 | hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; | |
606 | } | |
607 | /* | |
608 | * the crc generator can legitimately generate | |
609 | * a 0... however, 0 for us means that we | |
610 | * haven't computed a hash, so use 1 instead | |
611 | */ | |
612 | if (hash == 0) | |
613 | hash = 1; | |
614 | cnp->cn_hash = hash; | |
615 | cnp->cn_namelen = cp - cnp->cn_nameptr; | |
616 | ||
617 | ndp->ni_pathlen -= cnp->cn_namelen; | |
618 | ndp->ni_next = cp; | |
619 | ||
620 | /* | |
621 | * Replace multiple slashes by a single slash and trailing slashes | |
622 | * by a null. This must be done before VNOP_LOOKUP() because some | |
623 | * fs's don't know about trailing slashes. Remember if there were | |
624 | * trailing slashes to handle symlinks, existing non-directories | |
625 | * and non-existing files that won't be directories specially later. | |
626 | */ | |
627 | while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { | |
628 | cp++; | |
629 | ndp->ni_pathlen--; | |
630 | ||
631 | if (*cp == '\0') { | |
632 | *trailing_slash = 1; | |
633 | *ndp->ni_next = '\0'; | |
634 | } | |
635 | } | |
636 | ndp->ni_next = cp; | |
637 | ||
638 | cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT); | |
639 | ||
640 | if (*cp == '\0') | |
641 | cnp->cn_flags |= ISLASTCN; | |
642 | ||
643 | if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') | |
644 | cnp->cn_flags |= ISDOTDOT; | |
645 | ||
646 | *dp_authorized = 0; | |
647 | ||
648 | if (auth_opaque && ((tv.tv_sec - dp->v_cred_timestamp) > VCRED_EXPIRED)) | |
649 | break; | |
650 | ||
651 | if (dp->v_cred != ucred) | |
652 | break; | |
653 | /* | |
654 | * indicate that we're allowed to traverse this directory... | |
655 | * even if we fail the cache lookup or decide to bail for | |
656 | * some other reason, this information is valid and is used | |
657 | * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP | |
658 | */ | |
659 | *dp_authorized = 1; | |
660 | ||
661 | if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { | |
662 | if (cnp->cn_nameiop != LOOKUP) | |
663 | break; | |
664 | if (cnp->cn_flags & (LOCKPARENT | NOCACHE)) | |
665 | break; | |
666 | if (cnp->cn_flags & ISDOTDOT) { | |
667 | /* | |
668 | * Quit here only if we can't use | |
669 | * the parent directory pointer or | |
670 | * don't have one. Otherwise, we'll | |
671 | * use it below. | |
672 | */ | |
673 | if ((dp->v_flag & VROOT) || | |
674 | dp->v_parent == NULLVP) | |
675 | break; | |
676 | } | |
677 | } | |
678 | ||
679 | /* | |
680 | * "." and ".." aren't supposed to be cached, so check | |
681 | * for them before checking the cache. | |
682 | */ | |
683 | if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') | |
684 | vp = dp; | |
685 | else if (cnp->cn_flags & ISDOTDOT) | |
686 | vp = dp->v_parent; | |
687 | else { | |
688 | if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) | |
689 | break; | |
690 | } | |
691 | ||
692 | if ( (cnp->cn_flags & ISLASTCN) ) | |
693 | break; | |
694 | ||
695 | if (vp->v_type != VDIR) { | |
696 | if (vp->v_type != VLNK) | |
697 | vp = NULL; | |
698 | break; | |
699 | } | |
700 | if (vp->v_mountedhere && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) | |
701 | break; | |
702 | ||
703 | dp = vp; | |
704 | vp = NULLVP; | |
705 | ||
706 | cnp->cn_nameptr = ndp->ni_next + 1; | |
707 | ndp->ni_pathlen--; | |
708 | while (*cnp->cn_nameptr == '/') { | |
709 | cnp->cn_nameptr++; | |
710 | ndp->ni_pathlen--; | |
711 | } | |
712 | } | |
713 | if (vp != NULLVP) | |
714 | vvid = vp->v_id; | |
715 | vid = dp->v_id; | |
716 | ||
717 | name_cache_unlock(); | |
718 | ||
719 | ||
720 | if ((vp != NULLVP) && (vp->v_type != VLNK) && | |
721 | ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) { | |
722 | /* | |
723 | * if we've got a child and it's the last component, and | |
724 | * the lookup doesn't need to return the parent then we | |
725 | * can skip grabbing an iocount on the parent, since all | |
726 | * we're going to do with it is a vnode_put just before | |
727 | * we return from 'lookup'. If it's a symbolic link, | |
728 | * we need the parent in case the link happens to be | |
729 | * a relative pathname. | |
730 | */ | |
731 | tdp = dp; | |
732 | dp = NULLVP; | |
733 | } else { | |
734 | need_dp: | |
735 | /* | |
736 | * return the last directory we looked at | |
737 | * with an io reference held | |
738 | */ | |
739 | if (dp == ndp->ni_usedvp) { | |
740 | /* | |
741 | * if this vnode matches the one passed in via USEDVP | |
742 | * than this context already holds an io_count... just | |
743 | * use vnode_get to get an extra ref for lookup to play | |
744 | * with... can't use the getwithvid variant here because | |
745 | * it will block behind a vnode_drain which would result | |
746 | * in a deadlock (since we already own an io_count that the | |
747 | * vnode_drain is waiting on)... vnode_get grabs the io_count | |
748 | * immediately w/o waiting... it always succeeds | |
749 | */ | |
750 | vnode_get(dp); | |
751 | } else if ( (vnode_getwithvid(dp, vid)) ) { | |
752 | /* | |
753 | * failure indicates the vnode | |
754 | * changed identity or is being | |
755 | * TERMINATED... in either case | |
756 | * punt this lookup | |
757 | */ | |
758 | return (ENOENT); | |
759 | } | |
760 | } | |
761 | if (vp != NULLVP) { | |
762 | if ( (vnode_getwithvid(vp, vvid)) ) { | |
763 | vp = NULLVP; | |
764 | ||
765 | /* | |
766 | * can't get reference on the vp we'd like | |
767 | * to return... if we didn't grab a reference | |
768 | * on the directory (due to fast path bypass), | |
769 | * then we need to do it now... we can't return | |
770 | * with both ni_dvp and ni_vp NULL, and no | |
771 | * error condition | |
772 | */ | |
773 | if (dp == NULLVP) { | |
774 | dp = tdp; | |
775 | goto need_dp; | |
776 | } | |
777 | } | |
778 | } | |
779 | ndp->ni_dvp = dp; | |
780 | ndp->ni_vp = vp; | |
781 | ||
782 | return (0); | |
783 | } | |
784 | ||
785 | ||
786 | static vnode_t | |
787 | cache_lookup_locked(vnode_t dvp, struct componentname *cnp) | |
788 | { | |
789 | register struct namecache *ncp; | |
790 | register struct nchashhead *ncpp; | |
791 | register long namelen = cnp->cn_namelen; | |
792 | char *nameptr = cnp->cn_nameptr; | |
793 | unsigned int hashval = (cnp->cn_hash & NCHASHMASK); | |
794 | vnode_t vp; | |
795 | ||
796 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
797 | LIST_FOREACH(ncp, ncpp, nc_hash) { | |
798 | if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { | |
799 | if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) | |
800 | break; | |
801 | } | |
802 | } | |
803 | if (ncp == 0) | |
804 | /* | |
805 | * We failed to find an entry | |
806 | */ | |
807 | return (NULL); | |
808 | ||
809 | vp = ncp->nc_vp; | |
810 | if (vp && (vp->v_flag & VISHARDLINK)) { | |
811 | /* | |
812 | * The file system wants a VNOP_LOOKUP on this vnode | |
813 | */ | |
814 | vp = NULL; | |
815 | } | |
816 | ||
817 | return (vp); | |
818 | } | |
819 | ||
820 | ||
821 | // | |
822 | // Have to take a len argument because we may only need to | |
823 | // hash part of a componentname. | |
824 | // | |
825 | static unsigned int | |
826 | hash_string(const char *cp, int len) | |
827 | { | |
828 | unsigned hash = 0; | |
829 | ||
830 | if (len) { | |
831 | while (len--) { | |
832 | hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; | |
833 | } | |
834 | } else { | |
835 | while (*cp != '\0') { | |
836 | hash ^= crc32tab[((hash >> 24) ^ (unsigned char)*cp++)]; | |
837 | } | |
838 | } | |
839 | /* | |
840 | * the crc generator can legitimately generate | |
841 | * a 0... however, 0 for us means that we | |
842 | * haven't computed a hash, so use 1 instead | |
843 | */ | |
844 | if (hash == 0) | |
845 | hash = 1; | |
846 | return hash; | |
847 | } | |
848 | ||
849 | ||
850 | /* | |
851 | * Lookup an entry in the cache | |
852 | * | |
853 | * We don't do this if the segment name is long, simply so the cache | |
854 | * can avoid holding long names (which would either waste space, or | |
855 | * add greatly to the complexity). | |
856 | * | |
857 | * Lookup is called with dvp pointing to the directory to search, | |
858 | * cnp pointing to the name of the entry being sought. If the lookup | |
859 | * succeeds, the vnode is returned in *vpp, and a status of -1 is | |
860 | * returned. If the lookup determines that the name does not exist | |
861 | * (negative cacheing), a status of ENOENT is returned. If the lookup | |
862 | * fails, a status of zero is returned. | |
863 | */ | |
864 | ||
865 | int | |
866 | cache_lookup(dvp, vpp, cnp) | |
867 | struct vnode *dvp; | |
868 | struct vnode **vpp; | |
869 | struct componentname *cnp; | |
870 | { | |
871 | register struct namecache *ncp; | |
872 | register struct nchashhead *ncpp; | |
873 | register long namelen = cnp->cn_namelen; | |
874 | char *nameptr = cnp->cn_nameptr; | |
875 | unsigned int hashval = (cnp->cn_hash & NCHASHMASK); | |
876 | uint32_t vid; | |
877 | vnode_t vp; | |
878 | ||
879 | name_cache_lock(); | |
880 | ||
881 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
882 | LIST_FOREACH(ncp, ncpp, nc_hash) { | |
883 | if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { | |
884 | if (memcmp(ncp->nc_name, nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) | |
885 | break; | |
886 | } | |
887 | } | |
888 | /* We failed to find an entry */ | |
889 | if (ncp == 0) { | |
890 | nchstats.ncs_miss++; | |
891 | name_cache_unlock(); | |
892 | return (0); | |
893 | } | |
894 | ||
895 | /* We don't want to have an entry, so dump it */ | |
896 | if ((cnp->cn_flags & MAKEENTRY) == 0) { | |
897 | nchstats.ncs_badhits++; | |
898 | cache_delete(ncp, 1); | |
899 | name_cache_unlock(); | |
900 | return (0); | |
901 | } | |
902 | vp = ncp->nc_vp; | |
903 | ||
904 | /* We found a "positive" match, return the vnode */ | |
905 | if (vp) { | |
906 | nchstats.ncs_goodhits++; | |
907 | ||
908 | vid = vp->v_id; | |
909 | name_cache_unlock(); | |
910 | ||
911 | if (vnode_getwithvid(vp, vid)) { | |
912 | name_cache_lock(); | |
913 | nchstats.ncs_badvid++; | |
914 | name_cache_unlock(); | |
915 | return (0); | |
916 | } | |
917 | *vpp = vp; | |
918 | return (-1); | |
919 | } | |
920 | ||
921 | /* We found a negative match, and want to create it, so purge */ | |
922 | if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) { | |
923 | nchstats.ncs_badhits++; | |
924 | cache_delete(ncp, 1); | |
925 | name_cache_unlock(); | |
926 | return (0); | |
927 | } | |
928 | ||
929 | /* | |
930 | * We found a "negative" match, ENOENT notifies client of this match. | |
931 | * The nc_whiteout field records whether this is a whiteout. | |
932 | */ | |
933 | nchstats.ncs_neghits++; | |
934 | ||
935 | if (ncp->nc_whiteout) | |
936 | cnp->cn_flags |= ISWHITEOUT; | |
937 | name_cache_unlock(); | |
938 | return (ENOENT); | |
939 | } | |
940 | ||
941 | /* | |
942 | * Add an entry to the cache. | |
943 | */ | |
944 | void | |
945 | cache_enter(dvp, vp, cnp) | |
946 | struct vnode *dvp; | |
947 | struct vnode *vp; | |
948 | struct componentname *cnp; | |
949 | { | |
950 | register struct namecache *ncp, *negp; | |
951 | register struct nchashhead *ncpp; | |
952 | ||
953 | if (cnp->cn_hash == 0) | |
954 | cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); | |
955 | ||
956 | name_cache_lock(); | |
957 | ||
958 | /* if the entry is for -ve caching vp is null */ | |
959 | if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) { | |
960 | /* | |
961 | * someone beat us to the punch.. | |
962 | * this vnode is already in the cache | |
963 | */ | |
964 | name_cache_unlock(); | |
965 | return; | |
966 | } | |
967 | /* | |
968 | * We allocate a new entry if we are less than the maximum | |
969 | * allowed and the one at the front of the list is in use. | |
970 | * Otherwise we use the one at the front of the list. | |
971 | */ | |
972 | if (numcache < desiredNodes && | |
973 | ((ncp = nchead.tqh_first) == NULL || | |
974 | ncp->nc_hash.le_prev != 0)) { | |
975 | /* | |
976 | * Allocate one more entry | |
977 | */ | |
978 | ncp = (struct namecache *)_MALLOC_ZONE((u_long)sizeof *ncp, M_CACHE, M_WAITOK); | |
979 | numcache++; | |
980 | } else { | |
981 | /* | |
982 | * reuse an old entry | |
983 | */ | |
984 | ncp = TAILQ_FIRST(&nchead); | |
985 | TAILQ_REMOVE(&nchead, ncp, nc_entry); | |
986 | ||
987 | if (ncp->nc_hash.le_prev != 0) { | |
988 | /* | |
989 | * still in use... we need to | |
990 | * delete it before re-using it | |
991 | */ | |
992 | nchstats.ncs_stolen++; | |
993 | cache_delete(ncp, 0); | |
994 | } | |
995 | } | |
996 | nchstats.ncs_enters++; | |
997 | ||
998 | /* | |
999 | * Fill in cache info, if vp is NULL this is a "negative" cache entry. | |
1000 | */ | |
1001 | ncp->nc_vp = vp; | |
1002 | ncp->nc_dvp = dvp; | |
1003 | ncp->nc_hashval = cnp->cn_hash; | |
1004 | ncp->nc_whiteout = FALSE; | |
1005 | ncp->nc_name = add_name_locked(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); | |
1006 | ||
1007 | /* | |
1008 | * make us the newest entry in the cache | |
1009 | * i.e. we'll be the last to be stolen | |
1010 | */ | |
1011 | TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry); | |
1012 | ||
1013 | ncpp = NCHHASH(dvp, cnp->cn_hash); | |
1014 | #if DIAGNOSTIC | |
1015 | { | |
1016 | register struct namecache *p; | |
1017 | ||
1018 | for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) | |
1019 | if (p == ncp) | |
1020 | panic("cache_enter: duplicate"); | |
1021 | } | |
1022 | #endif | |
1023 | /* | |
1024 | * make us available to be found via lookup | |
1025 | */ | |
1026 | LIST_INSERT_HEAD(ncpp, ncp, nc_hash); | |
1027 | ||
1028 | if (vp) { | |
1029 | /* | |
1030 | * add to the list of name cache entries | |
1031 | * that point at vp | |
1032 | */ | |
1033 | LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link); | |
1034 | } else { | |
1035 | /* | |
1036 | * this is a negative cache entry (vp == NULL) | |
1037 | * stick it on the negative cache list | |
1038 | * and record the whiteout state | |
1039 | */ | |
1040 | TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); | |
1041 | ||
1042 | if (cnp->cn_flags & ISWHITEOUT) | |
1043 | ncp->nc_whiteout = TRUE; | |
1044 | nchstats.ncs_negtotal++; | |
1045 | ||
1046 | if (nchstats.ncs_negtotal > desiredNegNodes) { | |
1047 | /* | |
1048 | * if we've reached our desired limit | |
1049 | * of negative cache entries, delete | |
1050 | * the oldest | |
1051 | */ | |
1052 | negp = TAILQ_FIRST(&neghead); | |
1053 | TAILQ_REMOVE(&neghead, negp, nc_un.nc_negentry); | |
1054 | ||
1055 | cache_delete(negp, 1); | |
1056 | } | |
1057 | } | |
1058 | /* | |
1059 | * add us to the list of name cache entries that | |
1060 | * are children of dvp | |
1061 | */ | |
1062 | LIST_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); | |
1063 | ||
1064 | name_cache_unlock(); | |
1065 | } | |
1066 | ||
1067 | ||
1068 | /* | |
1069 | * Initialize CRC-32 remainder table. | |
1070 | */ | |
1071 | static void init_crc32(void) | |
1072 | { | |
1073 | /* | |
1074 | * the CRC-32 generator polynomial is: | |
1075 | * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10 | |
1076 | * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 | |
1077 | */ | |
1078 | unsigned int crc32_polynomial = 0x04c11db7; | |
1079 | unsigned int i,j; | |
1080 | ||
1081 | /* | |
1082 | * pre-calculate the CRC-32 remainder for each possible octet encoding | |
1083 | */ | |
1084 | for (i = 0; i < 256; i++) { | |
1085 | unsigned int crc_rem = i << 24; | |
1086 | ||
1087 | for (j = 0; j < 8; j++) { | |
1088 | if (crc_rem & 0x80000000) | |
1089 | crc_rem = (crc_rem << 1) ^ crc32_polynomial; | |
1090 | else | |
1091 | crc_rem = (crc_rem << 1); | |
1092 | } | |
1093 | crc32tab[i] = crc_rem; | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | ||
1098 | /* | |
1099 | * Name cache initialization, from vfs_init() when we are booting | |
1100 | */ | |
1101 | void | |
1102 | nchinit(void) | |
1103 | { | |
1104 | desiredNegNodes = (desiredvnodes / 10); | |
1105 | desiredNodes = desiredvnodes + desiredNegNodes; | |
1106 | ||
1107 | TAILQ_INIT(&nchead); | |
1108 | TAILQ_INIT(&neghead); | |
1109 | ||
1110 | init_crc32(); | |
1111 | ||
1112 | nchashtbl = hashinit(MAX(4096, (2 *desiredNodes)), M_CACHE, &nchash); | |
1113 | nchashmask = nchash; | |
1114 | nchash++; | |
1115 | ||
1116 | init_string_table(); | |
1117 | ||
1118 | /* Allocate mount list lock group attribute and group */ | |
1119 | namecache_lck_grp_attr= lck_grp_attr_alloc_init(); | |
1120 | lck_grp_attr_setstat(namecache_lck_grp_attr); | |
1121 | ||
1122 | namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); | |
1123 | ||
1124 | /* Allocate mount list lock attribute */ | |
1125 | namecache_lck_attr = lck_attr_alloc_init(); | |
1126 | //lck_attr_setdebug(namecache_lck_attr); | |
1127 | ||
1128 | /* Allocate mount list lock */ | |
1129 | namecache_mtx_lock = lck_mtx_alloc_init(namecache_lck_grp, namecache_lck_attr); | |
1130 | ||
1131 | ||
1132 | } | |
1133 | ||
1134 | void | |
1135 | name_cache_lock(void) | |
1136 | { | |
1137 | lck_mtx_lock(namecache_mtx_lock); | |
1138 | } | |
1139 | ||
1140 | void | |
1141 | name_cache_unlock(void) | |
1142 | { | |
1143 | lck_mtx_unlock(namecache_mtx_lock); | |
1144 | ||
1145 | } | |
1146 | ||
1147 | ||
1148 | int | |
1149 | resize_namecache(u_int newsize) | |
1150 | { | |
1151 | struct nchashhead *new_table; | |
1152 | struct nchashhead *old_table; | |
1153 | struct nchashhead *old_head, *head; | |
1154 | struct namecache *entry, *next; | |
1155 | uint32_t i, hashval; | |
1156 | int dNodes, dNegNodes; | |
1157 | u_long new_size, old_size; | |
1158 | ||
1159 | dNegNodes = (newsize / 10); | |
1160 | dNodes = newsize + dNegNodes; | |
1161 | ||
1162 | // we don't support shrinking yet | |
1163 | if (dNodes < desiredNodes) { | |
1164 | return 0; | |
1165 | } | |
1166 | new_table = hashinit(2 * dNodes, M_CACHE, &nchashmask); | |
1167 | new_size = nchashmask + 1; | |
1168 | ||
1169 | if (new_table == NULL) { | |
1170 | return ENOMEM; | |
1171 | } | |
1172 | ||
1173 | name_cache_lock(); | |
1174 | // do the switch! | |
1175 | old_table = nchashtbl; | |
1176 | nchashtbl = new_table; | |
1177 | old_size = nchash; | |
1178 | nchash = new_size; | |
1179 | ||
1180 | // walk the old table and insert all the entries into | |
1181 | // the new table | |
1182 | // | |
1183 | for(i=0; i < old_size; i++) { | |
1184 | old_head = &old_table[i]; | |
1185 | for (entry=old_head->lh_first; entry != NULL; entry=next) { | |
1186 | // | |
1187 | // XXXdbg - Beware: this assumes that hash_string() does | |
1188 | // the same thing as what happens in | |
1189 | // lookup() over in vfs_lookup.c | |
1190 | hashval = hash_string(entry->nc_name, 0); | |
1191 | entry->nc_hashval = hashval; | |
1192 | head = NCHHASH(entry->nc_dvp, hashval); | |
1193 | ||
1194 | next = entry->nc_hash.le_next; | |
1195 | LIST_INSERT_HEAD(head, entry, nc_hash); | |
1196 | } | |
1197 | } | |
1198 | desiredNodes = dNodes; | |
1199 | desiredNegNodes = dNegNodes; | |
1200 | ||
1201 | name_cache_unlock(); | |
1202 | FREE(old_table, M_CACHE); | |
1203 | ||
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static void | |
1208 | cache_delete(struct namecache *ncp, int age_entry) | |
1209 | { | |
1210 | nchstats.ncs_deletes++; | |
1211 | ||
1212 | if (ncp->nc_vp) { | |
1213 | LIST_REMOVE(ncp, nc_un.nc_link); | |
1214 | } else { | |
1215 | TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); | |
1216 | nchstats.ncs_negtotal--; | |
1217 | } | |
1218 | LIST_REMOVE(ncp, nc_child); | |
1219 | ||
1220 | LIST_REMOVE(ncp, nc_hash); | |
1221 | /* | |
1222 | * this field is used to indicate | |
1223 | * that the entry is in use and | |
1224 | * must be deleted before it can | |
1225 | * be reused... | |
1226 | */ | |
1227 | ncp->nc_hash.le_prev = NULL; | |
1228 | ||
1229 | if (age_entry) { | |
1230 | /* | |
1231 | * make it the next one available | |
1232 | * for cache_enter's use | |
1233 | */ | |
1234 | TAILQ_REMOVE(&nchead, ncp, nc_entry); | |
1235 | TAILQ_INSERT_HEAD(&nchead, ncp, nc_entry); | |
1236 | } | |
1237 | remove_name_locked(ncp->nc_name); | |
1238 | ncp->nc_name = NULL; | |
1239 | } | |
1240 | ||
1241 | ||
1242 | /* | |
1243 | * purge the entry associated with the | |
1244 | * specified vnode from the name cache | |
1245 | */ | |
1246 | void | |
1247 | cache_purge(vnode_t vp) | |
1248 | { | |
1249 | struct namecache *ncp; | |
1250 | ||
1251 | if ((LIST_FIRST(&vp->v_nclinks) == NULL) && (LIST_FIRST(&vp->v_ncchildren) == NULL)) | |
1252 | return; | |
1253 | ||
1254 | name_cache_lock(); | |
1255 | ||
1256 | while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) | |
1257 | cache_delete(ncp, 1); | |
1258 | ||
1259 | while ( (ncp = LIST_FIRST(&vp->v_ncchildren)) ) | |
1260 | cache_delete(ncp, 1); | |
1261 | ||
1262 | name_cache_unlock(); | |
1263 | } | |
1264 | ||
1265 | /* | |
1266 | * Purge all negative cache entries that are children of the | |
1267 | * given vnode. A case-insensitive file system (or any file | |
1268 | * system that has multiple equivalent names for the same | |
1269 | * directory entry) can use this when creating or renaming | |
1270 | * to remove negative entries that may no longer apply. | |
1271 | */ | |
1272 | void | |
1273 | cache_purge_negatives(vnode_t vp) | |
1274 | { | |
1275 | struct namecache *ncp; | |
1276 | ||
1277 | name_cache_lock(); | |
1278 | ||
1279 | LIST_FOREACH(ncp, &vp->v_ncchildren, nc_child) | |
1280 | if (ncp->nc_vp == NULL) | |
1281 | cache_delete(ncp , 1); | |
1282 | ||
1283 | name_cache_unlock(); | |
1284 | } | |
1285 | ||
1286 | /* | |
1287 | * Flush all entries referencing a particular filesystem. | |
1288 | * | |
1289 | * Since we need to check it anyway, we will flush all the invalid | |
1290 | * entries at the same time. | |
1291 | */ | |
1292 | void | |
1293 | cache_purgevfs(mp) | |
1294 | struct mount *mp; | |
1295 | { | |
1296 | struct nchashhead *ncpp; | |
1297 | struct namecache *ncp; | |
1298 | ||
1299 | name_cache_lock(); | |
1300 | /* Scan hash tables for applicable entries */ | |
1301 | for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) { | |
1302 | restart: | |
1303 | for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) { | |
1304 | if (ncp->nc_dvp->v_mount == mp) { | |
1305 | cache_delete(ncp, 0); | |
1306 | goto restart; | |
1307 | } | |
1308 | } | |
1309 | } | |
1310 | name_cache_unlock(); | |
1311 | } | |
1312 | ||
1313 | ||
1314 | ||
1315 | // | |
1316 | // String ref routines | |
1317 | // | |
1318 | static LIST_HEAD(stringhead, string_t) *string_ref_table; | |
1319 | static u_long string_table_mask; | |
1320 | static uint32_t max_chain_len=0; | |
1321 | static struct stringhead *long_chain_head=NULL; | |
1322 | static uint32_t filled_buckets=0; | |
1323 | static uint32_t num_dups=0; | |
1324 | static uint32_t nstrings=0; | |
1325 | ||
1326 | typedef struct string_t { | |
1327 | LIST_ENTRY(string_t) hash_chain; | |
1328 | unsigned char *str; | |
1329 | uint32_t refcount; | |
1330 | } string_t; | |
1331 | ||
1332 | ||
1333 | ||
1334 | static int | |
1335 | resize_string_ref_table(void) | |
1336 | { | |
1337 | struct stringhead *new_table; | |
1338 | struct stringhead *old_table; | |
1339 | struct stringhead *old_head, *head; | |
1340 | string_t *entry, *next; | |
1341 | uint32_t i, hashval; | |
1342 | u_long new_mask, old_mask; | |
1343 | ||
1344 | new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); | |
1345 | if (new_table == NULL) { | |
1346 | return ENOMEM; | |
1347 | } | |
1348 | ||
1349 | // do the switch! | |
1350 | old_table = string_ref_table; | |
1351 | string_ref_table = new_table; | |
1352 | old_mask = string_table_mask; | |
1353 | string_table_mask = new_mask; | |
1354 | ||
1355 | printf("resize: max chain len %d, new table size %d\n", | |
1356 | max_chain_len, new_mask + 1); | |
1357 | max_chain_len = 0; | |
1358 | long_chain_head = NULL; | |
1359 | filled_buckets = 0; | |
1360 | ||
1361 | // walk the old table and insert all the entries into | |
1362 | // the new table | |
1363 | // | |
1364 | for(i=0; i <= old_mask; i++) { | |
1365 | old_head = &old_table[i]; | |
1366 | for (entry=old_head->lh_first; entry != NULL; entry=next) { | |
1367 | hashval = hash_string(entry->str, 0); | |
1368 | head = &string_ref_table[hashval & string_table_mask]; | |
1369 | if (head->lh_first == NULL) { | |
1370 | filled_buckets++; | |
1371 | } | |
1372 | ||
1373 | next = entry->hash_chain.le_next; | |
1374 | LIST_INSERT_HEAD(head, entry, hash_chain); | |
1375 | } | |
1376 | } | |
1377 | ||
1378 | FREE(old_table, M_CACHE); | |
1379 | ||
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | ||
1384 | static void | |
1385 | init_string_table(void) | |
1386 | { | |
1387 | string_ref_table = hashinit(4096, M_CACHE, &string_table_mask); | |
1388 | } | |
1389 | ||
1390 | ||
1391 | char * | |
1392 | vfs_addname(const char *name, size_t len, u_int hashval, u_int flags) | |
1393 | { | |
1394 | char * ptr; | |
1395 | ||
1396 | name_cache_lock(); | |
1397 | ptr = add_name_locked(name, len, hashval, flags); | |
1398 | name_cache_unlock(); | |
1399 | ||
1400 | return(ptr); | |
1401 | } | |
1402 | ||
1403 | static char * | |
1404 | add_name_locked(const char *name, size_t len, u_int hashval, __unused u_int flags) | |
1405 | { | |
1406 | struct stringhead *head; | |
1407 | string_t *entry; | |
1408 | uint32_t chain_len = 0; | |
1409 | ||
1410 | // | |
1411 | // If the table gets more than 3/4 full, resize it | |
1412 | // | |
1413 | if (4*filled_buckets >= ((string_table_mask + 1) * 3)) { | |
1414 | if (resize_string_ref_table() != 0) { | |
1415 | printf("failed to resize the hash table.\n"); | |
1416 | } | |
1417 | } | |
1418 | if (hashval == 0) { | |
1419 | hashval = hash_string(name, 0); | |
1420 | } | |
1421 | ||
1422 | head = &string_ref_table[hashval & string_table_mask]; | |
1423 | for (entry=head->lh_first; entry != NULL; chain_len++, entry=entry->hash_chain.le_next) { | |
1424 | if (memcmp(entry->str, name, len) == 0 && entry->str[len] == '\0') { | |
1425 | entry->refcount++; | |
1426 | num_dups++; | |
1427 | break; | |
1428 | } | |
1429 | } | |
1430 | ||
1431 | if (entry == NULL) { | |
1432 | // it wasn't already there so add it. | |
1433 | MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); | |
1434 | ||
1435 | // have to get "head" again because we could have blocked | |
1436 | // in malloc and thus head could have changed. | |
1437 | // | |
1438 | head = &string_ref_table[hashval & string_table_mask]; | |
1439 | if (head->lh_first == NULL) { | |
1440 | filled_buckets++; | |
1441 | } | |
1442 | ||
1443 | entry->str = (char *)((char *)entry + sizeof(string_t)); | |
1444 | strncpy(entry->str, name, len); | |
1445 | entry->str[len] = '\0'; | |
1446 | entry->refcount = 1; | |
1447 | LIST_INSERT_HEAD(head, entry, hash_chain); | |
1448 | ||
1449 | if (chain_len > max_chain_len) { | |
1450 | max_chain_len = chain_len; | |
1451 | long_chain_head = head; | |
1452 | } | |
1453 | ||
1454 | nstrings++; | |
1455 | } | |
1456 | ||
1457 | return entry->str; | |
1458 | } | |
1459 | ||
1460 | int | |
1461 | vfs_removename(const char *nameref) | |
1462 | { | |
1463 | int i; | |
1464 | ||
1465 | name_cache_lock(); | |
1466 | i = remove_name_locked(nameref); | |
1467 | name_cache_unlock(); | |
1468 | ||
1469 | return(i); | |
1470 | ||
1471 | } | |
1472 | ||
1473 | ||
1474 | static int | |
1475 | remove_name_locked(const char *nameref) | |
1476 | { | |
1477 | struct stringhead *head; | |
1478 | string_t *entry; | |
1479 | uint32_t hashval; | |
1480 | char * ptr; | |
1481 | ||
1482 | hashval = hash_string(nameref, 0); | |
1483 | head = &string_ref_table[hashval & string_table_mask]; | |
1484 | for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { | |
1485 | if (entry->str == (unsigned char *)nameref) { | |
1486 | entry->refcount--; | |
1487 | if (entry->refcount == 0) { | |
1488 | LIST_REMOVE(entry, hash_chain); | |
1489 | if (head->lh_first == NULL) { | |
1490 | filled_buckets--; | |
1491 | } | |
1492 | ptr = entry->str; | |
1493 | entry->str = NULL; | |
1494 | nstrings--; | |
1495 | ||
1496 | FREE(entry, M_TEMP); | |
1497 | } else { | |
1498 | num_dups--; | |
1499 | } | |
1500 | ||
1501 | return 0; | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | return ENOENT; | |
1506 | } | |
1507 | ||
1508 | ||
1509 | void | |
1510 | dump_string_table(void) | |
1511 | { | |
1512 | struct stringhead *head; | |
1513 | string_t *entry; | |
1514 | u_long i; | |
1515 | ||
1516 | name_cache_lock(); | |
1517 | for (i = 0; i <= string_table_mask; i++) { | |
1518 | head = &string_ref_table[i]; | |
1519 | for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { | |
1520 | printf("%6d - %s\n", entry->refcount, entry->str); | |
1521 | } | |
1522 | } | |
1523 | name_cache_unlock(); | |
1524 | } |