]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_chash.c
5161e46a8e0445071ffca5f8ffc3ba61230ed2b7
[apple/xnu.git] / bsd / hfs / hfs_chash.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)hfs_chash.c
62 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/vnode.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/proc.h>
71 #include <sys/queue.h>
72
73
74 #include "hfs.h" /* XXX bringup */
75 #include "hfs_cnode.h"
76
77 extern lck_attr_t * hfs_lock_attr;
78 extern lck_grp_t * hfs_mutex_group;
79 extern lck_grp_t * hfs_rwlock_group;
80
81 lck_grp_t * chash_lck_grp;
82 lck_grp_attr_t * chash_lck_grp_attr;
83 lck_attr_t * chash_lck_attr;
84
85 /*
86 * Structures associated with cnode caching.
87 */
88 LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
89 u_long cnodehash; /* size of hash table - 1 */
90 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
91
92 lck_mtx_t hfs_chash_mutex;
93
94
95 /*
96 * Initialize cnode hash table.
97 */
98 __private_extern__
99 void
100 hfs_chashinit()
101 {
102 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
103
104 chash_lck_grp_attr= lck_grp_attr_alloc_init();
105 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
106 chash_lck_attr = lck_attr_alloc_init();
107
108 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
109 }
110
111
112 /*
113 * Use the device, inum pair to find the incore cnode.
114 *
115 * If it is in core, but locked, wait for it.
116 */
117 __private_extern__
118 struct vnode *
119 hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
120 {
121 struct cnode *cp;
122 struct vnode *vp;
123 int error;
124 uint32_t vid;
125
126 /*
127 * Go through the hash list
128 * If a cnode is in the process of being cleaned out or being
129 * allocated, wait for it to be finished and then try again.
130 */
131 loop:
132 lck_mtx_lock(&hfs_chash_mutex);
133 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
134 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
135 continue;
136 /* Wait if cnode is being created or reclaimed. */
137 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
138 SET(cp->c_hflag, H_WAITING);
139
140 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
141 "hfs_chash_getvnode", 0);
142 goto loop;
143 }
144 /*
145 * Skip cnodes that are not in the name space anymore
146 * note that this check is done outside of the proper
147 * lock to catch nodes already in this state... this
148 * state must be rechecked after we acquire the cnode lock
149 */
150 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
151 continue;
152 }
153 /* Obtain the desired vnode. */
154 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
155 if (vp == NULLVP)
156 goto exit;
157
158 vid = vnode_vid(vp);
159 lck_mtx_unlock(&hfs_chash_mutex);
160
161 if ((error = vnode_getwithvid(vp, vid))) {
162 /*
163 * If vnode is being reclaimed, or has
164 * already changed identity, no need to wait
165 */
166 return (NULL);
167 }
168 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
169 vnode_put(vp);
170 return (NULL);
171 }
172
173 /*
174 * Skip cnodes that are not in the name space anymore
175 * we need to check again with the cnode lock held
176 * because we may have blocked acquiring the vnode ref
177 * or the lock on the cnode which would allow the node
178 * to be unlinked
179 */
180 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
181 if (!skiplock)
182 hfs_unlock(cp);
183 vnode_put(vp);
184
185 return (NULL);
186 }
187 return (vp);
188 }
189 exit:
190 lck_mtx_unlock(&hfs_chash_mutex);
191 return (NULL);
192 }
193
194
195 /*
196 * Use the device, fileid pair to find the incore cnode.
197 * If no cnode if found one is created
198 *
199 * If it is in core, but locked, wait for it.
200 */
201 __private_extern__
202 int
203 hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
204 const struct cat_attr *, void *), void * arg)
205 {
206 struct cnode *cp;
207 int result = ENOENT;
208
209 /*
210 * Go through the hash list
211 * If a cnode is in the process of being cleaned out or being
212 * allocated, wait for it to be finished and then try again.
213 */
214 lck_mtx_lock(&hfs_chash_mutex);
215 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
216 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
217 continue;
218 /* Skip cnodes being created or reclaimed. */
219 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
220 result = callout(&cp->c_desc, &cp->c_attr, arg);
221 }
222 break;
223 }
224 lck_mtx_unlock(&hfs_chash_mutex);
225 return (result);
226 }
227
228
229 /*
230 * Use the device, fileid pair to find the incore cnode.
231 * If no cnode if found one is created
232 *
233 * If it is in core, but locked, wait for it.
234 */
235 __private_extern__
236 struct cnode *
237 hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
238 {
239 struct cnode *cp;
240 struct cnode *ncp = NULL;
241 vnode_t vp;
242 uint32_t vid;
243
244 /*
245 * Go through the hash list
246 * If a cnode is in the process of being cleaned out or being
247 * allocated, wait for it to be finished and then try again.
248 */
249 loop:
250 lck_mtx_lock(&hfs_chash_mutex);
251
252 loop_with_lock:
253 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
254 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
255 continue;
256 /*
257 * Wait if cnode is being created, attached to or reclaimed.
258 */
259 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
260 SET(cp->c_hflag, H_WAITING);
261
262 (void) msleep(cp, &hfs_chash_mutex, PINOD,
263 "hfs_chash_getcnode", 0);
264 goto loop_with_lock;
265 }
266 /*
267 * Skip cnodes that are not in the name space anymore
268 * note that this check is done outside of the proper
269 * lock to catch nodes already in this state... this
270 * state must be rechecked after we acquire the cnode lock
271 */
272 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
273 continue;
274 }
275 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
276 if (vp == NULL) {
277 /*
278 * The desired vnode isn't there so tag the cnode.
279 */
280 SET(cp->c_hflag, H_ATTACH);
281
282 lck_mtx_unlock(&hfs_chash_mutex);
283 } else {
284 vid = vnode_vid(vp);
285
286 lck_mtx_unlock(&hfs_chash_mutex);
287
288 if (vnode_getwithvid(vp, vid))
289 goto loop;
290 }
291 if (ncp) {
292 /*
293 * someone else won the race to create
294 * this cnode and add it to the hash
295 * just dump our allocation
296 */
297 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
298 ncp = NULL;
299 }
300 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
301 if (vp != NULLVP)
302 vnode_put(vp);
303 lck_mtx_lock(&hfs_chash_mutex);
304
305 if (vp == NULLVP)
306 CLR(cp->c_hflag, H_ATTACH);
307 goto loop_with_lock;
308 }
309 /*
310 * Skip cnodes that are not in the name space anymore
311 * we need to check again with the cnode lock held
312 * because we may have blocked acquiring the vnode ref
313 * or the lock on the cnode which would allow the node
314 * to be unlinked
315 */
316 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
317 if (!skiplock)
318 hfs_unlock(cp);
319 if (vp != NULLVP)
320 vnode_put(vp);
321 lck_mtx_lock(&hfs_chash_mutex);
322
323 if (vp == NULLVP)
324 CLR(cp->c_hflag, H_ATTACH);
325 goto loop_with_lock;
326 }
327 *vpp = vp;
328 return (cp);
329 }
330
331 /*
332 * Allocate a new cnode
333 */
334 if (skiplock)
335 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
336
337 if (ncp == NULL) {
338 lck_mtx_unlock(&hfs_chash_mutex);
339
340 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
341 /*
342 * since we dropped the chash lock,
343 * we need to go back and re-verify
344 * that this node hasn't come into
345 * existence...
346 */
347 goto loop;
348 }
349 bzero(ncp, sizeof(struct cnode));
350 SET(ncp->c_hflag, H_ALLOC);
351 ncp->c_fileid = inum;
352 ncp->c_dev = dev;
353 TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
354
355 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
356 if (!skiplock)
357 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
358
359 /* Insert the new cnode with it's H_ALLOC flag set */
360 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
361 lck_mtx_unlock(&hfs_chash_mutex);
362
363 *vpp = NULL;
364 return (ncp);
365 }
366
367
368 __private_extern__
369 void
370 hfs_chashwakeup(struct cnode *cp, int hflags)
371 {
372 lck_mtx_lock(&hfs_chash_mutex);
373
374 CLR(cp->c_hflag, hflags);
375
376 if (ISSET(cp->c_hflag, H_WAITING)) {
377 CLR(cp->c_hflag, H_WAITING);
378 wakeup((caddr_t)cp);
379 }
380 lck_mtx_unlock(&hfs_chash_mutex);
381 }
382
383
384 /*
385 * Re-hash two cnodes in the hash table.
386 */
387 __private_extern__
388 void
389 hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
390 {
391 lck_mtx_lock(&hfs_chash_mutex);
392
393 LIST_REMOVE(cp1, c_hash);
394 LIST_REMOVE(cp2, c_hash);
395 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
396 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
397
398 lck_mtx_unlock(&hfs_chash_mutex);
399 }
400
401
402 /*
403 * Remove a cnode from the hash table.
404 */
405 __private_extern__
406 int
407 hfs_chashremove(struct cnode *cp)
408 {
409 lck_mtx_lock(&hfs_chash_mutex);
410
411 /* Check if a vnode is getting attached */
412 if (ISSET(cp->c_hflag, H_ATTACH)) {
413 lck_mtx_unlock(&hfs_chash_mutex);
414 return (EBUSY);
415 }
416 LIST_REMOVE(cp, c_hash);
417 cp->c_hash.le_next = NULL;
418 cp->c_hash.le_prev = NULL;
419
420 lck_mtx_unlock(&hfs_chash_mutex);
421 return (0);
422 }
423
424 /*
425 * Remove a cnode from the hash table and wakeup any waiters.
426 */
427 __private_extern__
428 void
429 hfs_chash_abort(struct cnode *cp)
430 {
431 lck_mtx_lock(&hfs_chash_mutex);
432
433 LIST_REMOVE(cp, c_hash);
434 cp->c_hash.le_next = NULL;
435 cp->c_hash.le_prev = NULL;
436
437 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
438 if (ISSET(cp->c_hflag, H_WAITING)) {
439 CLR(cp->c_hflag, H_WAITING);
440 wakeup((caddr_t)cp);
441 }
442 lck_mtx_unlock(&hfs_chash_mutex);
443 }
444
445
446 /*
447 * mark a cnode as in transistion
448 */
449 __private_extern__
450 void
451 hfs_chash_mark_in_transit(struct cnode *cp)
452 {
453 lck_mtx_lock(&hfs_chash_mutex);
454
455 SET(cp->c_hflag, H_TRANSIT);
456
457 lck_mtx_unlock(&hfs_chash_mutex);
458 }