2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/vnode.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
71 #include <sys/queue.h>
74 #include "hfs.h" /* XXX bringup */
75 #include "hfs_cnode.h"
77 extern lck_attr_t
* hfs_lock_attr
;
78 extern lck_grp_t
* hfs_mutex_group
;
79 extern lck_grp_t
* hfs_rwlock_group
;
81 lck_grp_t
* chash_lck_grp
;
82 lck_grp_attr_t
* chash_lck_grp_attr
;
83 lck_attr_t
* chash_lck_attr
;
86 * Structures associated with cnode caching.
88 LIST_HEAD(cnodehashhead
, cnode
) *cnodehashtbl
;
89 u_long cnodehash
; /* size of hash table - 1 */
90 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
92 lck_mtx_t hfs_chash_mutex
;
96 * Initialize cnode hash table.
102 cnodehashtbl
= hashinit(desiredvnodes
, M_HFSMNT
, &cnodehash
);
104 chash_lck_grp_attr
= lck_grp_attr_alloc_init();
105 lck_grp_attr_setstat(chash_lck_grp_attr
);
106 chash_lck_grp
= lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr
);
108 chash_lck_attr
= lck_attr_alloc_init();
109 //lck_attr_setdebug(chash_lck_attr);
111 lck_mtx_init(&hfs_chash_mutex
, chash_lck_grp
, chash_lck_attr
);
116 * Use the device, inum pair to find the incore cnode.
118 * If it is in core, but locked, wait for it.
122 hfs_chash_getvnode(dev_t dev
, ino_t inum
, int wantrsrc
, int skiplock
)
130 * Go through the hash list
131 * If a cnode is in the process of being cleaned out or being
132 * allocated, wait for it to be finished and then try again.
135 lck_mtx_lock(&hfs_chash_mutex
);
136 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
137 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
139 /* Wait if cnode is being created or reclaimed. */
140 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
141 SET(cp
->c_hflag
, H_WAITING
);
143 (void) msleep(cp
, &hfs_chash_mutex
, PDROP
| PINOD
,
144 "hfs_chash_getvnode", 0);
148 * Skip cnodes that are not in the name space anymore
149 * note that this check is done outside of the proper
150 * lock to catch nodes already in this state... this
151 * state must be rechecked after we acquire the cnode lock
153 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
156 /* Obtain the desired vnode. */
157 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
162 lck_mtx_unlock(&hfs_chash_mutex
);
164 if ((error
= vnode_getwithvid(vp
, vid
))) {
166 * If vnode is being reclaimed, or has
167 * already changed identity, no need to wait
171 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
177 * Skip cnodes that are not in the name space anymore
178 * we need to check again with the cnode lock held
179 * because we may have blocked acquiring the vnode ref
180 * or the lock on the cnode which would allow the node
183 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
193 lck_mtx_unlock(&hfs_chash_mutex
);
199 * Use the device, fileid pair to find the incore cnode.
200 * If no cnode if found one is created
202 * If it is in core, but locked, wait for it.
206 hfs_chash_snoop(dev_t dev
, ino_t inum
, int (*callout
)(const struct cat_desc
*,
207 const struct cat_attr
*, void *), void * arg
)
213 * Go through the hash list
214 * If a cnode is in the process of being cleaned out or being
215 * allocated, wait for it to be finished and then try again.
217 lck_mtx_lock(&hfs_chash_mutex
);
218 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
219 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
221 /* Skip cnodes being created or reclaimed. */
222 if (!ISSET(cp
->c_hflag
, H_ALLOC
| H_TRANSIT
| H_ATTACH
)) {
223 result
= callout(&cp
->c_desc
, &cp
->c_attr
, arg
);
227 lck_mtx_unlock(&hfs_chash_mutex
);
233 * Use the device, fileid pair to find the incore cnode.
234 * If no cnode if found one is created
236 * If it is in core, but locked, wait for it.
240 hfs_chash_getcnode(dev_t dev
, ino_t inum
, struct vnode
**vpp
, int wantrsrc
, int skiplock
)
243 struct cnode
*ncp
= NULL
;
248 * Go through the hash list
249 * If a cnode is in the process of being cleaned out or being
250 * allocated, wait for it to be finished and then try again.
253 lck_mtx_lock(&hfs_chash_mutex
);
256 for (cp
= CNODEHASH(dev
, inum
)->lh_first
; cp
; cp
= cp
->c_hash
.le_next
) {
257 if ((cp
->c_fileid
!= inum
) || (cp
->c_dev
!= dev
))
260 * Wait if cnode is being created, attached to or reclaimed.
262 if (ISSET(cp
->c_hflag
, H_ALLOC
| H_ATTACH
| H_TRANSIT
)) {
263 SET(cp
->c_hflag
, H_WAITING
);
265 (void) msleep(cp
, &hfs_chash_mutex
, PINOD
,
266 "hfs_chash_getcnode", 0);
270 * Skip cnodes that are not in the name space anymore
271 * note that this check is done outside of the proper
272 * lock to catch nodes already in this state... this
273 * state must be rechecked after we acquire the cnode lock
275 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
278 vp
= wantrsrc
? cp
->c_rsrc_vp
: cp
->c_vp
;
281 * The desired vnode isn't there so tag the cnode.
283 SET(cp
->c_hflag
, H_ATTACH
);
285 lck_mtx_unlock(&hfs_chash_mutex
);
289 lck_mtx_unlock(&hfs_chash_mutex
);
291 if (vnode_getwithvid(vp
, vid
))
296 * someone else won the race to create
297 * this cnode and add it to the hash
298 * just dump our allocation
300 FREE_ZONE(ncp
, sizeof(struct cnode
), M_HFSNODE
);
303 if (!skiplock
&& hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
) != 0) {
306 lck_mtx_lock(&hfs_chash_mutex
);
309 CLR(cp
->c_hflag
, H_ATTACH
);
313 * Skip cnodes that are not in the name space anymore
314 * we need to check again with the cnode lock held
315 * because we may have blocked acquiring the vnode ref
316 * or the lock on the cnode which would allow the node
319 if (cp
->c_flag
& (C_NOEXISTS
| C_DELETED
)) {
324 lck_mtx_lock(&hfs_chash_mutex
);
327 CLR(cp
->c_hflag
, H_ATTACH
);
335 * Allocate a new cnode
338 panic("%s - should never get here when skiplock is set \n", __FUNCTION__
);
341 lck_mtx_unlock(&hfs_chash_mutex
);
343 MALLOC_ZONE(ncp
, struct cnode
*, sizeof(struct cnode
), M_HFSNODE
, M_WAITOK
);
345 * since we dropped the chash lock,
346 * we need to go back and re-verify
347 * that this node hasn't come into
352 bzero(ncp
, sizeof(struct cnode
));
353 SET(ncp
->c_hflag
, H_ALLOC
);
354 ncp
->c_fileid
= inum
;
356 TAILQ_INIT(&ncp
->c_hintlist
); /* make the list empty */
358 lck_rw_init(&ncp
->c_rwlock
, hfs_rwlock_group
, hfs_lock_attr
);
360 (void) hfs_lock(ncp
, HFS_EXCLUSIVE_LOCK
);
362 /* Insert the new cnode with it's H_ALLOC flag set */
363 LIST_INSERT_HEAD(CNODEHASH(dev
, inum
), ncp
, c_hash
);
364 lck_mtx_unlock(&hfs_chash_mutex
);
373 hfs_chashwakeup(struct cnode
*cp
, int hflags
)
375 lck_mtx_lock(&hfs_chash_mutex
);
377 CLR(cp
->c_hflag
, hflags
);
379 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
380 CLR(cp
->c_hflag
, H_WAITING
);
383 lck_mtx_unlock(&hfs_chash_mutex
);
388 * Re-hash two cnodes in the hash table.
392 hfs_chash_rehash(struct cnode
*cp1
, struct cnode
*cp2
)
394 lck_mtx_lock(&hfs_chash_mutex
);
396 LIST_REMOVE(cp1
, c_hash
);
397 LIST_REMOVE(cp2
, c_hash
);
398 LIST_INSERT_HEAD(CNODEHASH(cp1
->c_dev
, cp1
->c_fileid
), cp1
, c_hash
);
399 LIST_INSERT_HEAD(CNODEHASH(cp2
->c_dev
, cp2
->c_fileid
), cp2
, c_hash
);
401 lck_mtx_unlock(&hfs_chash_mutex
);
406 * Remove a cnode from the hash table.
410 hfs_chashremove(struct cnode
*cp
)
412 lck_mtx_lock(&hfs_chash_mutex
);
414 /* Check if a vnode is getting attached */
415 if (ISSET(cp
->c_hflag
, H_ATTACH
)) {
416 lck_mtx_unlock(&hfs_chash_mutex
);
419 LIST_REMOVE(cp
, c_hash
);
420 cp
->c_hash
.le_next
= NULL
;
421 cp
->c_hash
.le_prev
= NULL
;
423 lck_mtx_unlock(&hfs_chash_mutex
);
428 * Remove a cnode from the hash table and wakeup any waiters.
432 hfs_chash_abort(struct cnode
*cp
)
434 lck_mtx_lock(&hfs_chash_mutex
);
436 LIST_REMOVE(cp
, c_hash
);
437 cp
->c_hash
.le_next
= NULL
;
438 cp
->c_hash
.le_prev
= NULL
;
440 CLR(cp
->c_hflag
, H_ATTACH
| H_ALLOC
);
441 if (ISSET(cp
->c_hflag
, H_WAITING
)) {
442 CLR(cp
->c_hflag
, H_WAITING
);
445 lck_mtx_unlock(&hfs_chash_mutex
);
450 * mark a cnode as in transistion
454 hfs_chash_mark_in_transit(struct cnode
*cp
)
456 lck_mtx_lock(&hfs_chash_mutex
);
458 SET(cp
->c_hflag
, H_TRANSIT
);
460 lck_mtx_unlock(&hfs_chash_mutex
);