2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/vnode.h>
70 #include <sys/kernel.h>
71 #include <sys/malloc.h>
73 #include <sys/queue.h>
76 #include "hfs.h" /* XXX bringup */
77 #include "hfs_cnode.h"
79 extern lck_attr_t
* hfs_lock_attr
;
80 extern lck_grp_t
* hfs_mutex_group
;
81 extern lck_grp_t
* hfs_rwlock_group
;
83 lck_grp_t
* chash_lck_grp
;
84 lck_grp_attr_t
* chash_lck_grp_attr
;
85 lck_attr_t
* chash_lck_attr
;
88 * Structures associated with cnode caching.
90 LIST_HEAD(cnodehashhead
, cnode
) *cnodehashtbl
;
91 u_long cnodehash
; /* size of hash table - 1 */
92 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
94 lck_mtx_t hfs_chash_mutex
;
98 * Initialize cnode hash table.
104 cnodehashtbl
= hashinit(desiredvnodes
, M_HFSMNT
, &cnodehash
);
106 chash_lck_grp_attr
= lck_grp_attr_alloc_init();
107 chash_lck_grp
= lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr
);
108 chash_lck_attr
= lck_attr_alloc_init();
110 lck_mtx_init(&hfs_chash_mutex
, chash_lck_grp
, chash_lck_attr
);
115 * Use the device, inum pair to find the incore cnode.
117 * If it is in core, but locked, wait for it.
121 hfs_chash_getvnode(dev_t dev
, ino_t inum
, int wantrsrc
, int skiplock
)
129 * Go through the hash list
130 * If a cnode is in the process of being cleaned out or being
131 * allocated, wait for it to be finished and then try again.
134 lck_mtx_lock(&hfs_chash_mutex
);
135 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
136 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
138 /* Wait if cnode is being created or reclaimed. */
139 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
140 SET(cp
->c_hflag
, H_WAITING
);
142 (void) msleep(cp
, &hfs_chash_mutex
, PDROP
| PINOD
,
143 "hfs_chash_getvnode", 0);
147 * Skip cnodes that are not in the name space anymore
148 * note that this check is done outside of the proper
149 * lock to catch nodes already in this state... this
150 * state must be rechecked after we acquire the cnode lock
152 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
155 /* Obtain the desired vnode. */
156 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
161 lck_mtx_unlock(&hfs_chash_mutex
);
163 if ((error
= vnode_getwithvid(vp
, vid
))) {
165 * If vnode is being reclaimed, or has
166 * already changed identity, no need to wait
170 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
176 * Skip cnodes that are not in the name space anymore
177 * we need to check again with the cnode lock held
178 * because we may have blocked acquiring the vnode ref
179 * or the lock on the cnode which would allow the node
182 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
192 lck_mtx_unlock(&hfs_chash_mutex
);
198 * Use the device, fileid pair to find the incore cnode.
199 * If no cnode if found one is created
201 * If it is in core, but locked, wait for it.
205 hfs_chash_snoop(dev_t dev
, ino_t inum
, int (*callout
)(const struct cat_desc
*,
206 const struct cat_attr
*, void *), void * arg
)
212 * Go through the hash list
213 * If a cnode is in the process of being cleaned out or being
214 * allocated, wait for it to be finished and then try again.
216 lck_mtx_lock(&hfs_chash_mutex
);
217 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
218 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
220 /* Skip cnodes being created or reclaimed. */
221 if (!ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
222 result
= callout(&cp
->c_desc
, &cp
->c_attr
, arg
);
226 lck_mtx_unlock(&hfs_chash_mutex
);
232 * Use the device, fileid pair to find the incore cnode.
233 * If no cnode if found one is created
235 * If it is in core, but locked, wait for it.
239 hfs_chash_getcnode(dev_t dev
, ino_t inum
, struct vnode
**vpp
, int wantrsrc
, int skiplock
)
242 struct cnode
*ncp
= NULL
;
247 * Go through the hash list
248 * If a cnode is in the process of being cleaned out or being
249 * allocated, wait for it to be finished and then try again.
252 lck_mtx_lock(&hfs_chash_mutex
);
255 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
256 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
259 * Wait if cnode is being created, attached to or reclaimed.
261 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_ATTACH
| H_TRANSIT
)) {
262 SET(cp
->c_hflag
, H_WAITING
);
264 (void) msleep(cp
, &hfs_chash_mutex
, PINOD
,
265 "hfs_chash_getcnode", 0);
269 * Skip cnodes that are not in the name space anymore
270 * note that this check is done outside of the proper
271 * lock to catch nodes already in this state... this
272 * state must be rechecked after we acquire the cnode lock
274 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
277 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
280 * The desired vnode isn't there so tag the cnode.
282 SET(cp
->c_hflag
, H_ATTACH
);
284 lck_mtx_unlock(&hfs_chash_mutex
);
288 lck_mtx_unlock(&hfs_chash_mutex
);
290 if (vnode_getwithvid(vp
, vid
))
295 * someone else won the race to create
296 * this cnode and add it to the hash
297 * just dump our allocation
299 FREE_ZONE(ncp
, sizeof(struct cnode
), M_HFSNODE
);
302 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
305 lck_mtx_lock(&hfs_chash_mutex
);
308 CLR(cp
->c_hflag
, H_ATTACH
);
312 * Skip cnodes that are not in the name space anymore
313 * we need to check again with the cnode lock held
314 * because we may have blocked acquiring the vnode ref
315 * or the lock on the cnode which would allow the node
318 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
323 lck_mtx_lock(&hfs_chash_mutex
);
326 CLR(cp
->c_hflag
, H_ATTACH
);
334 * Allocate a new cnode
337 panic("%s - should never get here when skiplock is set \n", __FUNCTION__
);
340 lck_mtx_unlock(&hfs_chash_mutex
);
342 MALLOC_ZONE(ncp
, struct cnode
*, sizeof(struct cnode
), M_HFSNODE
, M_WAITOK
);
344 * since we dropped the chash lock,
345 * we need to go back and re-verify
346 * that this node hasn't come into
351 bzero(ncp
, sizeof(struct cnode
));
352 SET(ncp
->c_hflag
, H_ALLOC
);
353 ncp
->c_fileid
= inum
;
355 TAILQ_INIT(&ncp
->c_hintlist
); /* make the list empty */
357 lck_rw_init(&ncp
->c_rwlock
, hfs_rwlock_group
, hfs_lock_attr
);
359 (void) hfs_lock(ncp
, HFS_EXCLUSIVE_LOCK
);
361 /* Insert the new cnode with it's H_ALLOC flag set */
362 LIST_INSERT_HEAD(CNODEHASH(dev
, inum
), ncp
, c_hash
);
363 lck_mtx_unlock(&hfs_chash_mutex
);
372 hfs_chashwakeup(struct cnode
*cp
, int hflags
)
374 lck_mtx_lock(&hfs_chash_mutex
);
376 CLR(cp
->c_hflag
, hflags
);
378 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
379 CLR(cp
->c_hflag
, H_WAITING
);
382 lck_mtx_unlock(&hfs_chash_mutex
);
387 * Re-hash two cnodes in the hash table.
391 hfs_chash_rehash(struct cnode
*cp1
, struct cnode
*cp2
)
393 lck_mtx_lock(&hfs_chash_mutex
);
395 LIST_REMOVE(cp1
, c_hash
);
396 LIST_REMOVE(cp2
, c_hash
);
397 LIST_INSERT_HEAD(CNODEHASH(cp1
->c_dev
, cp1
->c_fileid
), cp1
, c_hash
);
398 LIST_INSERT_HEAD(CNODEHASH(cp2
->c_dev
, cp2
->c_fileid
), cp2
, c_hash
);
400 lck_mtx_unlock(&hfs_chash_mutex
);
405 * Remove a cnode from the hash table.
409 hfs_chashremove(struct cnode
*cp
)
411 lck_mtx_lock(&hfs_chash_mutex
);
413 /* Check if a vnode is getting attached */
414 if (ISSET(cp
->c_hflag
, H_ATTACH
)) {
415 lck_mtx_unlock(&hfs_chash_mutex
);
418 LIST_REMOVE(cp
, c_hash
);
419 cp
->c_hash
.le_next
= NULL
;
420 cp
->c_hash
.le_prev
= NULL
;
422 lck_mtx_unlock(&hfs_chash_mutex
);
427 * Remove a cnode from the hash table and wakeup any waiters.
431 hfs_chash_abort(struct cnode
*cp
)
433 lck_mtx_lock(&hfs_chash_mutex
);
435 LIST_REMOVE(cp
, c_hash
);
436 cp
->c_hash
.le_next
= NULL
;
437 cp
->c_hash
.le_prev
= NULL
;
439 CLR(cp
->c_hflag
, H_ATTACH
| H_ALLOC
);
440 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
441 CLR(cp
->c_hflag
, H_WAITING
);
444 lck_mtx_unlock(&hfs_chash_mutex
);
449 * mark a cnode as in transistion
453 hfs_chash_mark_in_transit(struct cnode
*cp
)
455 lck_mtx_lock(&hfs_chash_mutex
);
457 SET(cp
->c_hflag
, H_TRANSIT
);
459 lck_mtx_unlock(&hfs_chash_mutex
);