]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_chash.c
1cbaf8186ffd66b4826c781cfc072f3fc5cc81e4
[apple/xnu.git] / bsd / hfs / hfs_chash.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)hfs_chash.c
56 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
57 */
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/vnode.h>
62 #include <sys/kernel.h>
63 #include <sys/malloc.h>
64 #include <sys/proc.h>
65 #include <sys/queue.h>
66
67
68 #include "hfs.h" /* XXX bringup */
69 #include "hfs_cnode.h"
70
71 extern lck_attr_t * hfs_lock_attr;
72 extern lck_grp_t * hfs_mutex_group;
73 extern lck_grp_t * hfs_rwlock_group;
74
75 lck_grp_t * chash_lck_grp;
76 lck_grp_attr_t * chash_lck_grp_attr;
77 lck_attr_t * chash_lck_attr;
78
79 /*
80 * Structures associated with cnode caching.
81 */
82 LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
83 u_long cnodehash; /* size of hash table - 1 */
84 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
85
86 lck_mtx_t hfs_chash_mutex;
87
88
89 /*
90 * Initialize cnode hash table.
91 */
92 __private_extern__
93 void
94 hfs_chashinit()
95 {
96 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
97
98 chash_lck_grp_attr= lck_grp_attr_alloc_init();
99 lck_grp_attr_setstat(chash_lck_grp_attr);
100 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
101
102 chash_lck_attr = lck_attr_alloc_init();
103 //lck_attr_setdebug(chash_lck_attr);
104
105 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
106 }
107
108
109 /*
110 * Use the device, inum pair to find the incore cnode.
111 *
112 * If it is in core, but locked, wait for it.
113 */
114 __private_extern__
115 struct vnode *
116 hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
117 {
118 struct cnode *cp;
119 struct vnode *vp;
120 int error;
121 uint32_t vid;
122
123 /*
124 * Go through the hash list
125 * If a cnode is in the process of being cleaned out or being
126 * allocated, wait for it to be finished and then try again.
127 */
128 loop:
129 lck_mtx_lock(&hfs_chash_mutex);
130 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
131 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
132 continue;
133 /* Wait if cnode is being created or reclaimed. */
134 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
135 SET(cp->c_hflag, H_WAITING);
136
137 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
138 "hfs_chash_getvnode", 0);
139 goto loop;
140 }
141 /*
142 * Skip cnodes that are not in the name space anymore
143 * note that this check is done outside of the proper
144 * lock to catch nodes already in this state... this
145 * state must be rechecked after we acquire the cnode lock
146 */
147 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
148 continue;
149 }
150 /* Obtain the desired vnode. */
151 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
152 if (vp == NULLVP)
153 goto exit;
154
155 vid = vnode_vid(vp);
156 lck_mtx_unlock(&hfs_chash_mutex);
157
158 if ((error = vnode_getwithvid(vp, vid))) {
159 /*
160 * If vnode is being reclaimed, or has
161 * already changed identity, no need to wait
162 */
163 return (NULL);
164 }
165 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
166 vnode_put(vp);
167 return (NULL);
168 }
169
170 /*
171 * Skip cnodes that are not in the name space anymore
172 * we need to check again with the cnode lock held
173 * because we may have blocked acquiring the vnode ref
174 * or the lock on the cnode which would allow the node
175 * to be unlinked
176 */
177 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
178 if (!skiplock)
179 hfs_unlock(cp);
180 vnode_put(vp);
181
182 return (NULL);
183 }
184 return (vp);
185 }
186 exit:
187 lck_mtx_unlock(&hfs_chash_mutex);
188 return (NULL);
189 }
190
191
192 /*
193 * Use the device, fileid pair to find the incore cnode.
194 * If no cnode if found one is created
195 *
196 * If it is in core, but locked, wait for it.
197 */
198 __private_extern__
199 int
200 hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
201 const struct cat_attr *, void *), void * arg)
202 {
203 struct cnode *cp;
204 int result = ENOENT;
205
206 /*
207 * Go through the hash list
208 * If a cnode is in the process of being cleaned out or being
209 * allocated, wait for it to be finished and then try again.
210 */
211 lck_mtx_lock(&hfs_chash_mutex);
212 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
213 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
214 continue;
215 /* Skip cnodes being created or reclaimed. */
216 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
217 result = callout(&cp->c_desc, &cp->c_attr, arg);
218 }
219 break;
220 }
221 lck_mtx_unlock(&hfs_chash_mutex);
222 return (result);
223 }
224
225
226 /*
227 * Use the device, fileid pair to find the incore cnode.
228 * If no cnode if found one is created
229 *
230 * If it is in core, but locked, wait for it.
231 */
232 __private_extern__
233 struct cnode *
234 hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
235 {
236 struct cnode *cp;
237 struct cnode *ncp = NULL;
238 vnode_t vp;
239 uint32_t vid;
240
241 /*
242 * Go through the hash list
243 * If a cnode is in the process of being cleaned out or being
244 * allocated, wait for it to be finished and then try again.
245 */
246 loop:
247 lck_mtx_lock(&hfs_chash_mutex);
248
249 loop_with_lock:
250 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
251 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
252 continue;
253 /*
254 * Wait if cnode is being created, attached to or reclaimed.
255 */
256 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
257 SET(cp->c_hflag, H_WAITING);
258
259 (void) msleep(cp, &hfs_chash_mutex, PINOD,
260 "hfs_chash_getcnode", 0);
261 goto loop_with_lock;
262 }
263 /*
264 * Skip cnodes that are not in the name space anymore
265 * note that this check is done outside of the proper
266 * lock to catch nodes already in this state... this
267 * state must be rechecked after we acquire the cnode lock
268 */
269 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
270 continue;
271 }
272 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
273 if (vp == NULL) {
274 /*
275 * The desired vnode isn't there so tag the cnode.
276 */
277 SET(cp->c_hflag, H_ATTACH);
278
279 lck_mtx_unlock(&hfs_chash_mutex);
280 } else {
281 vid = vnode_vid(vp);
282
283 lck_mtx_unlock(&hfs_chash_mutex);
284
285 if (vnode_getwithvid(vp, vid))
286 goto loop;
287 }
288 if (ncp) {
289 /*
290 * someone else won the race to create
291 * this cnode and add it to the hash
292 * just dump our allocation
293 */
294 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
295 ncp = NULL;
296 }
297 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
298 if (vp != NULLVP)
299 vnode_put(vp);
300 lck_mtx_lock(&hfs_chash_mutex);
301
302 if (vp == NULLVP)
303 CLR(cp->c_hflag, H_ATTACH);
304 goto loop_with_lock;
305 }
306 /*
307 * Skip cnodes that are not in the name space anymore
308 * we need to check again with the cnode lock held
309 * because we may have blocked acquiring the vnode ref
310 * or the lock on the cnode which would allow the node
311 * to be unlinked
312 */
313 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
314 if (!skiplock)
315 hfs_unlock(cp);
316 if (vp != NULLVP)
317 vnode_put(vp);
318 lck_mtx_lock(&hfs_chash_mutex);
319
320 if (vp == NULLVP)
321 CLR(cp->c_hflag, H_ATTACH);
322 goto loop_with_lock;
323 }
324 *vpp = vp;
325 return (cp);
326 }
327
328 /*
329 * Allocate a new cnode
330 */
331 if (skiplock)
332 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
333
334 if (ncp == NULL) {
335 lck_mtx_unlock(&hfs_chash_mutex);
336
337 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
338 /*
339 * since we dropped the chash lock,
340 * we need to go back and re-verify
341 * that this node hasn't come into
342 * existence...
343 */
344 goto loop;
345 }
346 bzero(ncp, sizeof(struct cnode));
347 SET(ncp->c_hflag, H_ALLOC);
348 ncp->c_fileid = inum;
349 ncp->c_dev = dev;
350
351 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
352 if (!skiplock)
353 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
354
355 /* Insert the new cnode with it's H_ALLOC flag set */
356 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
357 lck_mtx_unlock(&hfs_chash_mutex);
358
359 *vpp = NULL;
360 return (ncp);
361 }
362
363
364 __private_extern__
365 void
366 hfs_chashwakeup(struct cnode *cp, int hflags)
367 {
368 lck_mtx_lock(&hfs_chash_mutex);
369
370 CLR(cp->c_hflag, hflags);
371
372 if (ISSET(cp->c_hflag, H_WAITING)) {
373 CLR(cp->c_hflag, H_WAITING);
374 wakeup((caddr_t)cp);
375 }
376 lck_mtx_unlock(&hfs_chash_mutex);
377 }
378
379
380 /*
381 * Re-hash two cnodes in the hash table.
382 */
383 __private_extern__
384 void
385 hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
386 {
387 lck_mtx_lock(&hfs_chash_mutex);
388
389 LIST_REMOVE(cp1, c_hash);
390 LIST_REMOVE(cp2, c_hash);
391 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
392 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
393
394 lck_mtx_unlock(&hfs_chash_mutex);
395 }
396
397
398 /*
399 * Remove a cnode from the hash table.
400 */
401 __private_extern__
402 int
403 hfs_chashremove(struct cnode *cp)
404 {
405 lck_mtx_lock(&hfs_chash_mutex);
406
407 /* Check if a vnode is getting attached */
408 if (ISSET(cp->c_hflag, H_ATTACH)) {
409 lck_mtx_unlock(&hfs_chash_mutex);
410 return (EBUSY);
411 }
412 LIST_REMOVE(cp, c_hash);
413 cp->c_hash.le_next = NULL;
414 cp->c_hash.le_prev = NULL;
415
416 lck_mtx_unlock(&hfs_chash_mutex);
417 return (0);
418 }
419
420 /*
421 * Remove a cnode from the hash table and wakeup any waiters.
422 */
423 __private_extern__
424 void
425 hfs_chash_abort(struct cnode *cp)
426 {
427 lck_mtx_lock(&hfs_chash_mutex);
428
429 LIST_REMOVE(cp, c_hash);
430 cp->c_hash.le_next = NULL;
431 cp->c_hash.le_prev = NULL;
432
433 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
434 if (ISSET(cp->c_hflag, H_WAITING)) {
435 CLR(cp->c_hflag, H_WAITING);
436 wakeup((caddr_t)cp);
437 }
438 lck_mtx_unlock(&hfs_chash_mutex);
439 }
440
441
442 /*
443 * mark a cnode as in transistion
444 */
445 __private_extern__
446 void
447 hfs_chash_mark_in_transit(struct cnode *cp)
448 {
449 lck_mtx_lock(&hfs_chash_mutex);
450
451 SET(cp->c_hflag, H_TRANSIT);
452
453 lck_mtx_unlock(&hfs_chash_mutex);
454 }