]> git.saurik.com Git - apple/xnu.git/blame - bsd/hfs/hfs_chash.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_chash.c
CommitLineData
9bccf70c 1/*
91447636 2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
9bccf70c
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
9bccf70c 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
9bccf70c
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
9bccf70c
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23/*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)hfs_chash.c
56 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
57 */
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/vnode.h>
91447636 62#include <sys/kernel.h>
9bccf70c
A
63#include <sys/malloc.h>
64#include <sys/proc.h>
65#include <sys/queue.h>
66
91447636
A
67
68#include "hfs.h" /* XXX bringup */
9bccf70c
A
69#include "hfs_cnode.h"
70
91447636
A
71extern lck_attr_t * hfs_lock_attr;
72extern lck_grp_t * hfs_mutex_group;
73extern lck_grp_t * hfs_rwlock_group;
74
75lck_grp_t * chash_lck_grp;
76lck_grp_attr_t * chash_lck_grp_attr;
77lck_attr_t * chash_lck_attr;
9bccf70c
A
78
79/*
80 * Structures associated with cnode caching.
81 */
82LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
83u_long cnodehash; /* size of hash table - 1 */
84#define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
91447636
A
85
86lck_mtx_t hfs_chash_mutex;
87
9bccf70c
A
88
89/*
90 * Initialize cnode hash table.
91 */
92__private_extern__
93void
94hfs_chashinit()
95{
96 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
91447636
A
97
98 chash_lck_grp_attr= lck_grp_attr_alloc_init();
99 lck_grp_attr_setstat(chash_lck_grp_attr);
100 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
101
102 chash_lck_attr = lck_attr_alloc_init();
103 //lck_attr_setdebug(chash_lck_attr);
104
105 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
9bccf70c
A
106}
107
108
109/*
110 * Use the device, inum pair to find the incore cnode.
111 *
112 * If it is in core, but locked, wait for it.
9bccf70c
A
113 */
114__private_extern__
91447636
A
115struct vnode *
116hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
9bccf70c 117{
9bccf70c
A
118 struct cnode *cp;
119 struct vnode *vp;
120 int error;
91447636 121 uint32_t vid;
9bccf70c 122
9bccf70c
A
123 /*
124 * Go through the hash list
125 * If a cnode is in the process of being cleaned out or being
126 * allocated, wait for it to be finished and then try again.
127 */
128loop:
91447636 129 lck_mtx_lock(&hfs_chash_mutex);
9bccf70c
A
130 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
131 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
132 continue;
91447636
A
133 /* Wait if cnode is being created or reclaimed. */
134 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
135 SET(cp->c_hflag, H_WAITING);
136
137 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
138 "hfs_chash_getvnode", 0);
9bccf70c 139 goto loop;
91447636
A
140 }
141 /*
142 * Skip cnodes that are not in the name space anymore
143 * note that this check is done outside of the proper
144 * lock to catch nodes already in this state... this
145 * state must be rechecked after we acquire the cnode lock
146 */
147 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
148 continue;
149 }
150 /* Obtain the desired vnode. */
151 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
152 if (vp == NULLVP)
153 goto exit;
154
155 vid = vnode_vid(vp);
156 lck_mtx_unlock(&hfs_chash_mutex);
157
158 if ((error = vnode_getwithvid(vp, vid))) {
159 /*
160 * If vnode is being reclaimed, or has
161 * already changed identity, no need to wait
9bccf70c 162 */
91447636 163 return (NULL);
9bccf70c 164 }
91447636
A
165 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
166 vnode_put(vp);
167 return (NULL);
168 }
169
170 /*
171 * Skip cnodes that are not in the name space anymore
172 * we need to check again with the cnode lock held
173 * because we may have blocked acquiring the vnode ref
174 * or the lock on the cnode which would allow the node
175 * to be unlinked
176 */
177 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
178 if (!skiplock)
179 hfs_unlock(cp);
180 vnode_put(vp);
181
182 return (NULL);
183 }
184 return (vp);
185 }
186exit:
187 lck_mtx_unlock(&hfs_chash_mutex);
188 return (NULL);
189}
190
191
192/*
193 * Use the device, fileid pair to find the incore cnode.
194 * If no cnode if found one is created
195 *
196 * If it is in core, but locked, wait for it.
197 */
198__private_extern__
199int
200hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
201 const struct cat_attr *, void *), void * arg)
202{
203 struct cnode *cp;
204 int result = ENOENT;
205
206 /*
207 * Go through the hash list
208 * If a cnode is in the process of being cleaned out or being
209 * allocated, wait for it to be finished and then try again.
210 */
211 lck_mtx_lock(&hfs_chash_mutex);
212 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
213 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
9bccf70c 214 continue;
91447636
A
215 /* Skip cnodes being created or reclaimed. */
216 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
217 result = callout(&cp->c_desc, &cp->c_attr, arg);
218 }
219 break;
220 }
221 lck_mtx_unlock(&hfs_chash_mutex);
222 return (result);
223}
224
9bccf70c 225
91447636
A
226/*
227 * Use the device, fileid pair to find the incore cnode.
228 * If no cnode if found one is created
229 *
230 * If it is in core, but locked, wait for it.
231 */
232__private_extern__
233struct cnode *
234hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
235{
236 struct cnode *cp;
237 struct cnode *ncp = NULL;
238 vnode_t vp;
239 uint32_t vid;
240
241 /*
242 * Go through the hash list
243 * If a cnode is in the process of being cleaned out or being
244 * allocated, wait for it to be finished and then try again.
245 */
246loop:
247 lck_mtx_lock(&hfs_chash_mutex);
248
249loop_with_lock:
250 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
251 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
252 continue;
9bccf70c 253 /*
91447636 254 * Wait if cnode is being created, attached to or reclaimed.
9bccf70c 255 */
91447636
A
256 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
257 SET(cp->c_hflag, H_WAITING);
9bccf70c 258
91447636
A
259 (void) msleep(cp, &hfs_chash_mutex, PINOD,
260 "hfs_chash_getcnode", 0);
261 goto loop_with_lock;
262 }
263 /*
264 * Skip cnodes that are not in the name space anymore
265 * note that this check is done outside of the proper
266 * lock to catch nodes already in this state... this
267 * state must be rechecked after we acquire the cnode lock
268 */
269 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
270 continue;
271 }
272 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
273 if (vp == NULL) {
9bccf70c 274 /*
91447636 275 * The desired vnode isn't there so tag the cnode.
9bccf70c 276 */
91447636
A
277 SET(cp->c_hflag, H_ATTACH);
278
279 lck_mtx_unlock(&hfs_chash_mutex);
280 } else {
281 vid = vnode_vid(vp);
282
283 lck_mtx_unlock(&hfs_chash_mutex);
284
285 if (vnode_getwithvid(vp, vid))
286 goto loop;
287 }
288 if (ncp) {
289 /*
290 * someone else won the race to create
291 * this cnode and add it to the hash
292 * just dump our allocation
293 */
294 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
295 ncp = NULL;
9bccf70c 296 }
91447636
A
297 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
298 if (vp != NULLVP)
299 vnode_put(vp);
300 lck_mtx_lock(&hfs_chash_mutex);
9bccf70c 301
91447636
A
302 if (vp == NULLVP)
303 CLR(cp->c_hflag, H_ATTACH);
304 goto loop_with_lock;
305 }
9bccf70c 306 /*
91447636
A
307 * Skip cnodes that are not in the name space anymore
308 * we need to check again with the cnode lock held
309 * because we may have blocked acquiring the vnode ref
310 * or the lock on the cnode which would allow the node
311 * to be unlinked
9bccf70c 312 */
91447636
A
313 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
314 if (!skiplock)
315 hfs_unlock(cp);
316 if (vp != NULLVP)
317 vnode_put(vp);
318 lck_mtx_lock(&hfs_chash_mutex);
319
320 if (vp == NULLVP)
321 CLR(cp->c_hflag, H_ATTACH);
322 goto loop_with_lock;
9bccf70c 323 }
91447636 324 *vpp = vp;
9bccf70c
A
325 return (cp);
326 }
91447636
A
327
328 /*
329 * Allocate a new cnode
330 */
331 if (skiplock)
332 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
333
334 if (ncp == NULL) {
335 lck_mtx_unlock(&hfs_chash_mutex);
336
337 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
338 /*
339 * since we dropped the chash lock,
340 * we need to go back and re-verify
341 * that this node hasn't come into
342 * existence...
343 */
344 goto loop;
345 }
346 bzero(ncp, sizeof(struct cnode));
347 SET(ncp->c_hflag, H_ALLOC);
348 ncp->c_fileid = inum;
349 ncp->c_dev = dev;
b36670ce 350 TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
91447636
A
351
352 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
353 if (!skiplock)
354 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
355
356 /* Insert the new cnode with it's H_ALLOC flag set */
357 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
358 lck_mtx_unlock(&hfs_chash_mutex);
359
360 *vpp = NULL;
361 return (ncp);
362}
363
364
365__private_extern__
366void
367hfs_chashwakeup(struct cnode *cp, int hflags)
368{
369 lck_mtx_lock(&hfs_chash_mutex);
370
371 CLR(cp->c_hflag, hflags);
372
373 if (ISSET(cp->c_hflag, H_WAITING)) {
374 CLR(cp->c_hflag, H_WAITING);
375 wakeup((caddr_t)cp);
376 }
377 lck_mtx_unlock(&hfs_chash_mutex);
9bccf70c
A
378}
379
380
381/*
91447636 382 * Re-hash two cnodes in the hash table.
9bccf70c
A
383 */
384__private_extern__
385void
91447636 386hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
9bccf70c 387{
91447636
A
388 lck_mtx_lock(&hfs_chash_mutex);
389
390 LIST_REMOVE(cp1, c_hash);
391 LIST_REMOVE(cp2, c_hash);
392 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
393 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
394
395 lck_mtx_unlock(&hfs_chash_mutex);
9bccf70c
A
396}
397
398
399/*
400 * Remove a cnode from the hash table.
401 */
402__private_extern__
91447636 403int
9bccf70c
A
404hfs_chashremove(struct cnode *cp)
405{
91447636
A
406 lck_mtx_lock(&hfs_chash_mutex);
407
408 /* Check if a vnode is getting attached */
409 if (ISSET(cp->c_hflag, H_ATTACH)) {
410 lck_mtx_unlock(&hfs_chash_mutex);
411 return (EBUSY);
412 }
413 LIST_REMOVE(cp, c_hash);
414 cp->c_hash.le_next = NULL;
415 cp->c_hash.le_prev = NULL;
416
417 lck_mtx_unlock(&hfs_chash_mutex);
418 return (0);
419}
420
421/*
422 * Remove a cnode from the hash table and wakeup any waiters.
423 */
424__private_extern__
425void
426hfs_chash_abort(struct cnode *cp)
427{
428 lck_mtx_lock(&hfs_chash_mutex);
429
9bccf70c
A
430 LIST_REMOVE(cp, c_hash);
431 cp->c_hash.le_next = NULL;
432 cp->c_hash.le_prev = NULL;
91447636
A
433
434 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
435 if (ISSET(cp->c_hflag, H_WAITING)) {
436 CLR(cp->c_hflag, H_WAITING);
437 wakeup((caddr_t)cp);
438 }
439 lck_mtx_unlock(&hfs_chash_mutex);
9bccf70c
A
440}
441
91447636
A
442
443/*
444 * mark a cnode as in transistion
445 */
446__private_extern__
447void
448hfs_chash_mark_in_transit(struct cnode *cp)
449{
450 lck_mtx_lock(&hfs_chash_mutex);
451
452 SET(cp->c_hflag, H_TRANSIT);
453
454 lck_mtx_unlock(&hfs_chash_mutex);
455}