2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/vnode.h>
70 #include <sys/kernel.h>
71 #include <sys/malloc.h>
73 #include <sys/queue.h>
76 #include "hfs.h" /* XXX bringup */
77 #include "hfs_cnode.h"
79 extern lck_attr_t
* hfs_lock_attr
;
80 extern lck_grp_t
* hfs_mutex_group
;
81 extern lck_grp_t
* hfs_rwlock_group
;
83 lck_grp_t
* chash_lck_grp
;
84 lck_grp_attr_t
* chash_lck_grp_attr
;
85 lck_attr_t
* chash_lck_attr
;
88 * Structures associated with cnode caching.
90 LIST_HEAD(cnodehashhead
, cnode
) *cnodehashtbl
;
91 u_long cnodehash
; /* size of hash table - 1 */
92 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
94 lck_mtx_t hfs_chash_mutex
;
98 * Initialize cnode hash table.
104 cnodehashtbl
= hashinit(desiredvnodes
, M_HFSMNT
, &cnodehash
);
106 chash_lck_grp_attr
= lck_grp_attr_alloc_init();
107 lck_grp_attr_setstat(chash_lck_grp_attr
);
108 chash_lck_grp
= lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr
);
110 chash_lck_attr
= lck_attr_alloc_init();
111 //lck_attr_setdebug(chash_lck_attr);
113 lck_mtx_init(&hfs_chash_mutex
, chash_lck_grp
, chash_lck_attr
);
118 * Use the device, inum pair to find the incore cnode.
120 * If it is in core, but locked, wait for it.
124 hfs_chash_getvnode(dev_t dev
, ino_t inum
, int wantrsrc
, int skiplock
)
132 * Go through the hash list
133 * If a cnode is in the process of being cleaned out or being
134 * allocated, wait for it to be finished and then try again.
137 lck_mtx_lock(&hfs_chash_mutex
);
138 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
139 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
141 /* Wait if cnode is being created or reclaimed. */
142 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
143 SET(cp
->c_hflag
, H_WAITING
);
145 (void) msleep(cp
, &hfs_chash_mutex
, PDROP
| PINOD
,
146 "hfs_chash_getvnode", 0);
150 * Skip cnodes that are not in the name space anymore
151 * note that this check is done outside of the proper
152 * lock to catch nodes already in this state... this
153 * state must be rechecked after we acquire the cnode lock
155 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
158 /* Obtain the desired vnode. */
159 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
164 lck_mtx_unlock(&hfs_chash_mutex
);
166 if ((error
= vnode_getwithvid(vp
, vid
))) {
168 * If vnode is being reclaimed, or has
169 * already changed identity, no need to wait
173 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
179 * Skip cnodes that are not in the name space anymore
180 * we need to check again with the cnode lock held
181 * because we may have blocked acquiring the vnode ref
182 * or the lock on the cnode which would allow the node
185 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
195 lck_mtx_unlock(&hfs_chash_mutex
);
201 * Use the device, fileid pair to find the incore cnode.
202 * If no cnode if found one is created
204 * If it is in core, but locked, wait for it.
208 hfs_chash_snoop(dev_t dev
, ino_t inum
, int (*callout
)(const struct cat_desc
*,
209 const struct cat_attr
*, void *), void * arg
)
215 * Go through the hash list
216 * If a cnode is in the process of being cleaned out or being
217 * allocated, wait for it to be finished and then try again.
219 lck_mtx_lock(&hfs_chash_mutex
);
220 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
221 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
223 /* Skip cnodes being created or reclaimed. */
224 if (!ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
225 result
= callout(&cp
->c_desc
, &cp
->c_attr
, arg
);
229 lck_mtx_unlock(&hfs_chash_mutex
);
235 * Use the device, fileid pair to find the incore cnode.
236 * If no cnode if found one is created
238 * If it is in core, but locked, wait for it.
242 hfs_chash_getcnode(dev_t dev
, ino_t inum
, struct vnode
**vpp
, int wantrsrc
, int skiplock
)
245 struct cnode
*ncp
= NULL
;
250 * Go through the hash list
251 * If a cnode is in the process of being cleaned out or being
252 * allocated, wait for it to be finished and then try again.
255 lck_mtx_lock(&hfs_chash_mutex
);
258 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
259 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
262 * Wait if cnode is being created, attached to or reclaimed.
264 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_ATTACH
| H_TRANSIT
)) {
265 SET(cp
->c_hflag
, H_WAITING
);
267 (void) msleep(cp
, &hfs_chash_mutex
, PINOD
,
268 "hfs_chash_getcnode", 0);
272 * Skip cnodes that are not in the name space anymore
273 * note that this check is done outside of the proper
274 * lock to catch nodes already in this state... this
275 * state must be rechecked after we acquire the cnode lock
277 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
280 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
283 * The desired vnode isn't there so tag the cnode.
285 SET(cp
->c_hflag
, H_ATTACH
);
287 lck_mtx_unlock(&hfs_chash_mutex
);
291 lck_mtx_unlock(&hfs_chash_mutex
);
293 if (vnode_getwithvid(vp
, vid
))
298 * someone else won the race to create
299 * this cnode and add it to the hash
300 * just dump our allocation
302 FREE_ZONE(ncp
, sizeof(struct cnode
), M_HFSNODE
);
305 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
308 lck_mtx_lock(&hfs_chash_mutex
);
311 CLR(cp
->c_hflag
, H_ATTACH
);
315 * Skip cnodes that are not in the name space anymore
316 * we need to check again with the cnode lock held
317 * because we may have blocked acquiring the vnode ref
318 * or the lock on the cnode which would allow the node
321 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
326 lck_mtx_lock(&hfs_chash_mutex
);
329 CLR(cp
->c_hflag
, H_ATTACH
);
337 * Allocate a new cnode
340 panic("%s - should never get here when skiplock is set \n", __FUNCTION__
);
343 lck_mtx_unlock(&hfs_chash_mutex
);
345 MALLOC_ZONE(ncp
, struct cnode
*, sizeof(struct cnode
), M_HFSNODE
, M_WAITOK
);
347 * since we dropped the chash lock,
348 * we need to go back and re-verify
349 * that this node hasn't come into
354 bzero(ncp
, sizeof(struct cnode
));
355 SET(ncp
->c_hflag
, H_ALLOC
);
356 ncp
->c_fileid
= inum
;
358 TAILQ_INIT(&ncp
->c_hintlist
); /* make the list empty */
360 lck_rw_init(&ncp
->c_rwlock
, hfs_rwlock_group
, hfs_lock_attr
);
362 (void) hfs_lock(ncp
, HFS_EXCLUSIVE_LOCK
);
364 /* Insert the new cnode with it's H_ALLOC flag set */
365 LIST_INSERT_HEAD(CNODEHASH(dev
, inum
), ncp
, c_hash
);
366 lck_mtx_unlock(&hfs_chash_mutex
);
375 hfs_chashwakeup(struct cnode
*cp
, int hflags
)
377 lck_mtx_lock(&hfs_chash_mutex
);
379 CLR(cp
->c_hflag
, hflags
);
381 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
382 CLR(cp
->c_hflag
, H_WAITING
);
385 lck_mtx_unlock(&hfs_chash_mutex
);
390 * Re-hash two cnodes in the hash table.
394 hfs_chash_rehash(struct cnode
*cp1
, struct cnode
*cp2
)
396 lck_mtx_lock(&hfs_chash_mutex
);
398 LIST_REMOVE(cp1
, c_hash
);
399 LIST_REMOVE(cp2
, c_hash
);
400 LIST_INSERT_HEAD(CNODEHASH(cp1
->c_dev
, cp1
->c_fileid
), cp1
, c_hash
);
401 LIST_INSERT_HEAD(CNODEHASH(cp2
->c_dev
, cp2
->c_fileid
), cp2
, c_hash
);
403 lck_mtx_unlock(&hfs_chash_mutex
);
408 * Remove a cnode from the hash table.
412 hfs_chashremove(struct cnode
*cp
)
414 lck_mtx_lock(&hfs_chash_mutex
);
416 /* Check if a vnode is getting attached */
417 if (ISSET(cp
->c_hflag
, H_ATTACH
)) {
418 lck_mtx_unlock(&hfs_chash_mutex
);
421 LIST_REMOVE(cp
, c_hash
);
422 cp
->c_hash
.le_next
= NULL
;
423 cp
->c_hash
.le_prev
= NULL
;
425 lck_mtx_unlock(&hfs_chash_mutex
);
430 * Remove a cnode from the hash table and wakeup any waiters.
434 hfs_chash_abort(struct cnode
*cp
)
436 lck_mtx_lock(&hfs_chash_mutex
);
438 LIST_REMOVE(cp
, c_hash
);
439 cp
->c_hash
.le_next
= NULL
;
440 cp
->c_hash
.le_prev
= NULL
;
442 CLR(cp
->c_hflag
, H_ATTACH
| H_ALLOC
);
443 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
444 CLR(cp
->c_hflag
, H_WAITING
);
447 lck_mtx_unlock(&hfs_chash_mutex
);
452 * mark a cnode as in transistion
456 hfs_chash_mark_in_transit(struct cnode
*cp
)
458 lck_mtx_lock(&hfs_chash_mutex
);
460 SET(cp
->c_hflag
, H_TRANSIT
);
462 lck_mtx_unlock(&hfs_chash_mutex
);