2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/vnode.h>
62 #include <sys/kernel.h>
63 #include <sys/malloc.h>
65 #include <sys/queue.h>
68 #include "hfs.h" /* XXX bringup */
69 #include "hfs_cnode.h"
71 extern lck_attr_t
* hfs_lock_attr
;
72 extern lck_grp_t
* hfs_mutex_group
;
73 extern lck_grp_t
* hfs_rwlock_group
;
75 lck_grp_t
* chash_lck_grp
;
76 lck_grp_attr_t
* chash_lck_grp_attr
;
77 lck_attr_t
* chash_lck_attr
;
80 * Structures associated with cnode caching.
82 LIST_HEAD(cnodehashhead
, cnode
) *cnodehashtbl
;
83 u_long cnodehash
; /* size of hash table - 1 */
84 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
86 lck_mtx_t hfs_chash_mutex
;
90 * Initialize cnode hash table.
96 cnodehashtbl
= hashinit(desiredvnodes
, M_HFSMNT
, &cnodehash
);
98 chash_lck_grp_attr
= lck_grp_attr_alloc_init();
99 lck_grp_attr_setstat(chash_lck_grp_attr
);
100 chash_lck_grp
= lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr
);
102 chash_lck_attr
= lck_attr_alloc_init();
103 //lck_attr_setdebug(chash_lck_attr);
105 lck_mtx_init(&hfs_chash_mutex
, chash_lck_grp
, chash_lck_attr
);
110 * Use the device, inum pair to find the incore cnode.
112 * If it is in core, but locked, wait for it.
116 hfs_chash_getvnode(dev_t dev
, ino_t inum
, int wantrsrc
, int skiplock
)
124 * Go through the hash list
125 * If a cnode is in the process of being cleaned out or being
126 * allocated, wait for it to be finished and then try again.
129 lck_mtx_lock(&hfs_chash_mutex
);
130 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
131 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
133 /* Wait if cnode is being created or reclaimed. */
134 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
135 SET(cp
->c_hflag
, H_WAITING
);
137 (void) msleep(cp
, &hfs_chash_mutex
, PDROP
| PINOD
,
138 "hfs_chash_getvnode", 0);
142 * Skip cnodes that are not in the name space anymore
143 * note that this check is done outside of the proper
144 * lock to catch nodes already in this state... this
145 * state must be rechecked after we acquire the cnode lock
147 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
150 /* Obtain the desired vnode. */
151 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
156 lck_mtx_unlock(&hfs_chash_mutex
);
158 if ((error
= vnode_getwithvid(vp
, vid
))) {
160 * If vnode is being reclaimed, or has
161 * already changed identity, no need to wait
165 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
171 * Skip cnodes that are not in the name space anymore
172 * we need to check again with the cnode lock held
173 * because we may have blocked acquiring the vnode ref
174 * or the lock on the cnode which would allow the node
177 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
187 lck_mtx_unlock(&hfs_chash_mutex
);
193 * Use the device, fileid pair to find the incore cnode.
194 * If no cnode if found one is created
196 * If it is in core, but locked, wait for it.
200 hfs_chash_snoop(dev_t dev
, ino_t inum
, int (*callout
)(const struct cat_desc
*,
201 const struct cat_attr
*, void *), void * arg
)
207 * Go through the hash list
208 * If a cnode is in the process of being cleaned out or being
209 * allocated, wait for it to be finished and then try again.
211 lck_mtx_lock(&hfs_chash_mutex
);
212 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
213 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
215 /* Skip cnodes being created or reclaimed. */
216 if (!ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
217 result
= callout(&cp
->c_desc
, &cp
->c_attr
, arg
);
221 lck_mtx_unlock(&hfs_chash_mutex
);
227 * Use the device, fileid pair to find the incore cnode.
228 * If no cnode if found one is created
230 * If it is in core, but locked, wait for it.
234 hfs_chash_getcnode(dev_t dev
, ino_t inum
, struct vnode
**vpp
, int wantrsrc
, int skiplock
)
237 struct cnode
*ncp
= NULL
;
242 * Go through the hash list
243 * If a cnode is in the process of being cleaned out or being
244 * allocated, wait for it to be finished and then try again.
247 lck_mtx_lock(&hfs_chash_mutex
);
250 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
251 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
254 * Wait if cnode is being created, attached to or reclaimed.
256 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_ATTACH
| H_TRANSIT
)) {
257 SET(cp
->c_hflag
, H_WAITING
);
259 (void) msleep(cp
, &hfs_chash_mutex
, PINOD
,
260 "hfs_chash_getcnode", 0);
264 * Skip cnodes that are not in the name space anymore
265 * note that this check is done outside of the proper
266 * lock to catch nodes already in this state... this
267 * state must be rechecked after we acquire the cnode lock
269 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
272 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
275 * The desired vnode isn't there so tag the cnode.
277 SET(cp
->c_hflag
, H_ATTACH
);
279 lck_mtx_unlock(&hfs_chash_mutex
);
283 lck_mtx_unlock(&hfs_chash_mutex
);
285 if (vnode_getwithvid(vp
, vid
))
290 * someone else won the race to create
291 * this cnode and add it to the hash
292 * just dump our allocation
294 FREE_ZONE(ncp
, sizeof(struct cnode
), M_HFSNODE
);
297 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
300 lck_mtx_lock(&hfs_chash_mutex
);
303 CLR(cp
->c_hflag
, H_ATTACH
);
307 * Skip cnodes that are not in the name space anymore
308 * we need to check again with the cnode lock held
309 * because we may have blocked acquiring the vnode ref
310 * or the lock on the cnode which would allow the node
313 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
318 lck_mtx_lock(&hfs_chash_mutex
);
321 CLR(cp
->c_hflag
, H_ATTACH
);
329 * Allocate a new cnode
332 panic("%s - should never get here when skiplock is set \n", __FUNCTION__
);
335 lck_mtx_unlock(&hfs_chash_mutex
);
337 MALLOC_ZONE(ncp
, struct cnode
*, sizeof(struct cnode
), M_HFSNODE
, M_WAITOK
);
339 * since we dropped the chash lock,
340 * we need to go back and re-verify
341 * that this node hasn't come into
346 bzero(ncp
, sizeof(struct cnode
));
347 SET(ncp
->c_hflag
, H_ALLOC
);
348 ncp
->c_fileid
= inum
;
351 lck_rw_init(&ncp
->c_rwlock
, hfs_rwlock_group
, hfs_lock_attr
);
353 (void) hfs_lock(ncp
, HFS_EXCLUSIVE_LOCK
);
355 /* Insert the new cnode with it's H_ALLOC flag set */
356 LIST_INSERT_HEAD(CNODEHASH(dev
, inum
), ncp
, c_hash
);
357 lck_mtx_unlock(&hfs_chash_mutex
);
366 hfs_chashwakeup(struct cnode
*cp
, int hflags
)
368 lck_mtx_lock(&hfs_chash_mutex
);
370 CLR(cp
->c_hflag
, hflags
);
372 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
373 CLR(cp
->c_hflag
, H_WAITING
);
376 lck_mtx_unlock(&hfs_chash_mutex
);
381 * Re-hash two cnodes in the hash table.
385 hfs_chash_rehash(struct cnode
*cp1
, struct cnode
*cp2
)
387 lck_mtx_lock(&hfs_chash_mutex
);
389 LIST_REMOVE(cp1
, c_hash
);
390 LIST_REMOVE(cp2
, c_hash
);
391 LIST_INSERT_HEAD(CNODEHASH(cp1
->c_dev
, cp1
->c_fileid
), cp1
, c_hash
);
392 LIST_INSERT_HEAD(CNODEHASH(cp2
->c_dev
, cp2
->c_fileid
), cp2
, c_hash
);
394 lck_mtx_unlock(&hfs_chash_mutex
);
399 * Remove a cnode from the hash table.
403 hfs_chashremove(struct cnode
*cp
)
405 lck_mtx_lock(&hfs_chash_mutex
);
407 /* Check if a vnode is getting attached */
408 if (ISSET(cp
->c_hflag
, H_ATTACH
)) {
409 lck_mtx_unlock(&hfs_chash_mutex
);
412 LIST_REMOVE(cp
, c_hash
);
413 cp
->c_hash
.le_next
= NULL
;
414 cp
->c_hash
.le_prev
= NULL
;
416 lck_mtx_unlock(&hfs_chash_mutex
);
421 * Remove a cnode from the hash table and wakeup any waiters.
425 hfs_chash_abort(struct cnode
*cp
)
427 lck_mtx_lock(&hfs_chash_mutex
);
429 LIST_REMOVE(cp
, c_hash
);
430 cp
->c_hash
.le_next
= NULL
;
431 cp
->c_hash
.le_prev
= NULL
;
433 CLR(cp
->c_hflag
, H_ATTACH
| H_ALLOC
);
434 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
435 CLR(cp
->c_hflag
, H_WAITING
);
438 lck_mtx_unlock(&hfs_chash_mutex
);
443 * mark a cnode as in transistion
447 hfs_chash_mark_in_transit(struct cnode
*cp
)
449 lck_mtx_lock(&hfs_chash_mutex
);
451 SET(cp
->c_hflag
, H_TRANSIT
);
453 lck_mtx_unlock(&hfs_chash_mutex
);