]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vhash.c
8e403895f5610fb0ac48dcf3543217a9c891d71b
[apple/xnu.git] / bsd / hfs / hfs_vhash.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 /* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
26 * The Regents of the University of California. All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)hfs_vhash.c
57 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/vnode.h>
63 #include <sys/malloc.h>
64 #include <sys/proc.h>
65 #include <sys/queue.h>
66
67 #include "hfs.h"
68 #include "hfs_dbg.h"
69
70
71 /*
72 * Structures associated with hfsnode cacheing.
73 */
74 LIST_HEAD(vhashhead, hfsnode) *vhashtbl;
75 u_long vhash; /* size of hash table - 1 */
76 #define HFSNODEHASH(device, nodeID) (&vhashtbl[((device) + (nodeID)) & vhash])
77 struct slock hfs_vhash_slock;
78
79 /*
80 * Initialize hfsnode hash table.
81 */
82 void
83 hfs_vhashinit()
84 {
85
86 vhashtbl = hashinit(desiredvnodes, M_HFSMNT, &vhash);
87 simple_lock_init(&hfs_vhash_slock);
88 }
89
90 /*
91 * Use the device/dirID/forkType tuple to find the incore hfsnode, and return a pointer
92 * to it. If it is in core, but locked, wait for it.
93 *
94 * Acceptable forkTypes are kData, kRsrcFork, kDirectory, or kDefault which translates to either
95 * kDataFork or kDirectory
96 *
97 * While traversing the hash, expext that a hfsnode is in the midst of being allocated, if so,
98 * then sleep and try again
99 */
100 struct vnode *
101 hfs_vhashget(dev, nodeID, forkType)
102 dev_t dev;
103 UInt32 nodeID;
104 UInt8 forkType;
105 {
106 struct proc *p = current_proc();
107 struct hfsnode *hp;
108 struct vnode *vp;
109
110 /*
111 * Go through the hash list
112 * If a vnode is in the process of being cleaned out or being
113 * allocated, wait for it to be finished and then try again
114 */
115 loop:
116 simple_lock(&hfs_vhash_slock);
117 for (hp = HFSNODEHASH(dev, nodeID)->lh_first; hp; hp = hp->h_hash.le_next) {
118 if (hp->h_nodeflags & IN_ALLOCATING) {
119 /*
120 * vnode is being created. Wait for it to finish...
121 */
122 hp->h_nodeflags |= IN_WANT;
123 simple_unlock(&hfs_vhash_slock);
124 tsleep((caddr_t)hp, PINOD, "hfs_vhashget", 0);
125 goto loop;
126 }
127 if ((H_FILEID(hp) != nodeID) || (H_DEV(hp) != dev) ||
128 (hp->h_meta->h_metaflags & IN_NOEXISTS))
129 continue;
130
131 /* SER XXX kDefault of meta data (ksysfile) is not assumed here */
132 if ( (forkType == kAnyFork) ||
133 (H_FORKTYPE(hp) == forkType) ||
134 ((forkType == kDefault) && ((H_FORKTYPE(hp) == kDirectory)
135 || (H_FORKTYPE(hp) == kDataFork)))) {
136 vp = HTOV(hp);
137 simple_lock(&vp->v_interlock);
138 simple_unlock(&hfs_vhash_slock);
139 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
140 goto loop;
141 return (vp);
142 }
143 }
144 simple_unlock(&hfs_vhash_slock);
145 return (NULL);
146 }
147
148
149
150
151 /*
152 * Lock the hfsnode and insert the hfsnode into the hash table, and return it locked.
153 * Returns the sibling meta data if it exists, elses return NULL
154 */
155 void
156 hfs_vhashins_sibling(dev, nodeID, hp, fm)
157 dev_t dev;
158 UInt32 nodeID;
159 struct hfsnode *hp;
160 struct hfsfilemeta **fm;
161 {
162 struct vhashhead *ipp;
163 struct hfsnode *thp;
164 struct hfsfilemeta *tfm;
165
166 tfm = NULL;
167 lockmgr(&hp->h_lock, LK_EXCLUSIVE, (struct slock *)0, current_proc());
168
169 /*
170 * Go through the hash list to see if a sibling exists
171 * If it does, store it to return
172 * If a vnode is in the process of being cleaned out or being
173 * allocated, wait for it to be finished and then try again
174 */
175
176 ipp = HFSNODEHASH(dev, nodeID);
177
178 loop:
179 simple_lock(&hfs_vhash_slock);
180 for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) {
181 if (thp->h_nodeflags & IN_ALLOCATING) {
182 /*
183 * vnode is being created. Wait for it to finish...
184 */
185 thp->h_nodeflags |= IN_WANT;
186 simple_unlock(&hfs_vhash_slock);
187 tsleep((caddr_t)thp, PINOD, "hfs_vhashins_sibling", 0);
188 goto loop;
189 }
190 if ((H_FILEID(thp) == nodeID) && (H_DEV(thp) == dev)) {
191 tfm = hp->h_meta = thp->h_meta;
192 break;
193 }
194 }
195
196 /* Add to sibling list..if it can have them */
197 if (tfm && (H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork)) {
198 simple_lock(&tfm->h_siblinglock);
199 CIRCLEQ_INSERT_HEAD(&tfm->h_siblinghead, hp, h_sibling);
200 simple_unlock(&tfm->h_siblinglock);
201 };
202
203 LIST_INSERT_HEAD(ipp, hp, h_hash);
204 simple_unlock(&hfs_vhash_slock);
205 *fm = tfm;
206 }
207
208
209
210 /*
211 * Lock the hfsnode and insert the hfsnode into the hash table, and return it locked.
212 */
213 void
214 hfs_vhashins(dev, nodeID, hp)
215 dev_t dev;
216 UInt32 nodeID;
217 struct hfsnode *hp;
218 {
219 struct vhashhead *ipp;
220
221 DBG_ASSERT(hp != NULL);
222 DBG_ASSERT(nodeID != 0);
223
224 lockmgr(&hp->h_lock, LK_EXCLUSIVE, (struct slock *)0, current_proc());
225
226 simple_lock(&hfs_vhash_slock);
227 ipp = HFSNODEHASH(dev, nodeID);
228 LIST_INSERT_HEAD(ipp, hp, h_hash);
229 simple_unlock(&hfs_vhash_slock);
230 }
231
232
233 /*
234 * Remove the hfsnode from the hash table and then checks to see if another forks exists.
235 */
236 void
237 hfs_vhashrem(hp)
238 struct hfsnode *hp;
239 {
240
241 DBG_ASSERT(hp != NULL);
242 DBG_ASSERT(hp->h_meta != NULL);
243
244 simple_lock(&hfs_vhash_slock);
245
246 /* Test to see if there are siblings, should only apply to forks */
247 if (hp->h_meta != NULL && hp->h_meta->h_siblinghead.cqh_first != NULL) {
248 simple_lock(&hp->h_meta->h_siblinglock);
249 CIRCLEQ_REMOVE(&hp->h_meta->h_siblinghead, hp, h_sibling);
250 simple_unlock(&hp->h_meta->h_siblinglock);
251 };
252
253 LIST_REMOVE(hp, h_hash);
254
255 #if HFS_DIAGNOSTIC
256 hp->h_hash.le_next = NULL;
257 hp->h_hash.le_prev = NULL;
258 #endif
259
260
261 simple_unlock(&hfs_vhash_slock);
262
263
264 }
265
266
267 /*
268 * Moves the entries from one bucket to another
269 * nodeID is the old bucket id
270 */
271 void
272 hfs_vhashmove(hp, oldNodeID)
273 struct hfsnode *hp;
274 UInt32 oldNodeID;
275 {
276 struct vhashhead *oldHeadIndex, *newHeadIndex;
277 struct hfsnode *thp, *nextNode;
278 UInt32 newNodeID;
279
280 newNodeID = H_FILEID(hp);
281 oldHeadIndex = HFSNODEHASH(H_DEV(hp), oldNodeID);
282 newHeadIndex = HFSNODEHASH(H_DEV(hp), newNodeID);
283
284 /* If it is moving to the same bucket...then we are done */
285 if (oldHeadIndex == newHeadIndex)
286 return;
287
288 loop:
289 /*
290 * Go through the old hash list
291 * If there is a nodeid mismatch, or the nodeid doesnt match the current bucket
292 * remove it and add it to the right bucket.
293 * If a vnode is in the process of being cleaned out or being
294 * allocated, wait for it to be finished and then try again
295 */
296 simple_lock(&hfs_vhash_slock);
297 for (nextNode = oldHeadIndex->lh_first; nextNode; ) {
298 if (nextNode->h_nodeflags & IN_ALLOCATING) {
299 /*
300 * vnode is being created. Wait for it to finish...
301 */
302 nextNode->h_nodeflags |= IN_WANT;
303 simple_unlock(&hfs_vhash_slock);
304 tsleep((caddr_t)nextNode, PINOD, "hfs_vhashmove", 0);
305 goto loop;
306 }
307
308 thp = nextNode;
309 nextNode = nextNode->h_hash.le_next;
310 if (newNodeID == H_FILEID(thp)) {
311 LIST_REMOVE(thp, h_hash);
312 thp->h_hash.le_next = NULL;
313 thp->h_hash.le_next = NULL;
314 LIST_INSERT_HEAD(newHeadIndex, thp, h_hash);
315 }
316 }
317
318 simple_unlock(&hfs_vhash_slock);
319 }
320
321 #if HFS_DIAGNOSTIC
322 /*
323 * This will test the hash entry for a given hfsnode
324 * It will test:
325 * 1. The uniqei existance of the node
326 * 2. All other nodes, proper membership to the hash
327 * 3. Proper termination of the hash
328 * 4. All members have a non-null h_meta
329 */
330 void hfs_vhash_dbg(hp)
331 struct hfsnode *hp;
332 {
333 struct proc *p = current_proc(); /* XXX */
334 struct vnode *vp;
335 struct hfsnode *thp, *tthp;
336 int maxsiblings = 1;
337 int wasFound = false;
338 struct vhashhead *ipp, *jpp;
339 dev_t dev = H_DEV(hp);
340 UInt32 nodeID = H_FILEID(hp);
341 UInt8 forkType = H_FORKTYPE(hp);
342 u_long forksfound = 0;
343
344 if (forkType==kDataFork || forkType==kRsrcFork)
345 maxsiblings++;
346
347 if (hp == NULL)
348 DEBUG_BREAK_MSG(("hash_dgh: Null hfsnode"));
349 /*
350 * Go through the hash list
351 * If a vnode is in the process of being cleaned out or being
352 * allocated, wait for it to be finished and then try again
353 */
354 ipp = HFSNODEHASH(dev, nodeID);
355
356 loop:
357 simple_lock(&hfs_vhash_slock);
358 for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) {
359 if (thp->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */
360 simple_unlock(&hfs_vhash_slock);
361 tsleep((caddr_t)thp, PINOD, "hfs_vhash_ins_meta", 0);
362 goto loop;
363 };
364
365 if (thp->h_meta == NULL)
366 DEBUG_BREAK_MSG(("hash_dgh: Null hfs_meta"));
367 jpp = (HFSNODEHASH(H_DEV(thp), H_FILEID(thp)));
368 if (ipp != jpp)
369 DEBUG_BREAK_MSG(("hash_dgh: Member on wrong hash"));
370
371 if ((H_FILEID(thp) == nodeID) && (H_DEV(thp) == dev)) {
372 maxsiblings--;
373 if (maxsiblings < 0)
374 DEBUG_BREAK_MSG(("hash_dgh: Too many siblings"));
375 if ((1<<H_FORKTYPE(thp)) & forksfound)
376 DEBUG_BREAK_MSG(("hash_dgh: Fork already found"));
377 forksfound |= (1<<H_FORKTYPE(thp));
378
379 if (H_FORKTYPE(thp) == forkType) {
380 if (wasFound == true)
381 DEBUG_BREAK_MSG(("hash_dgh: Already found"));
382 wasFound = true;
383 };
384 };
385 };
386 simple_unlock(&hfs_vhash_slock);
387
388 if (! wasFound)
389 DEBUG_BREAK_MSG(("hash_dgh: Not found"));
390
391 }
392
393 #endif /* HFS_DIAGNOSTIC */