]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/hfs/hfs_chash.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_chash.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23/*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)hfs_chash.c
56 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
57 */
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/vnode.h>
62#include <sys/kernel.h>
63#include <sys/malloc.h>
64#include <sys/proc.h>
65#include <sys/queue.h>
66
67
68#include "hfs.h" /* XXX bringup */
69#include "hfs_cnode.h"
70
71extern lck_attr_t * hfs_lock_attr;
72extern lck_grp_t * hfs_mutex_group;
73extern lck_grp_t * hfs_rwlock_group;
74
75lck_grp_t * chash_lck_grp;
76lck_grp_attr_t * chash_lck_grp_attr;
77lck_attr_t * chash_lck_attr;
78
79/*
80 * Structures associated with cnode caching.
81 */
82LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
83u_long cnodehash; /* size of hash table - 1 */
84#define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
85
86lck_mtx_t hfs_chash_mutex;
87
88
89/*
90 * Initialize cnode hash table.
91 */
92__private_extern__
93void
94hfs_chashinit()
95{
96 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
97
98 chash_lck_grp_attr= lck_grp_attr_alloc_init();
99 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
100 chash_lck_attr = lck_attr_alloc_init();
101
102 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
103}
104
105
106/*
107 * Use the device, inum pair to find the incore cnode.
108 *
109 * If it is in core, but locked, wait for it.
110 */
111__private_extern__
112struct vnode *
113hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
114{
115 struct cnode *cp;
116 struct vnode *vp;
117 int error;
118 uint32_t vid;
119
120 /*
121 * Go through the hash list
122 * If a cnode is in the process of being cleaned out or being
123 * allocated, wait for it to be finished and then try again.
124 */
125loop:
126 lck_mtx_lock(&hfs_chash_mutex);
127 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
128 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
129 continue;
130 /* Wait if cnode is being created or reclaimed. */
131 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
132 SET(cp->c_hflag, H_WAITING);
133
134 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
135 "hfs_chash_getvnode", 0);
136 goto loop;
137 }
138 /*
139 * Skip cnodes that are not in the name space anymore
140 * note that this check is done outside of the proper
141 * lock to catch nodes already in this state... this
142 * state must be rechecked after we acquire the cnode lock
143 */
144 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
145 continue;
146 }
147 /* Obtain the desired vnode. */
148 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
149 if (vp == NULLVP)
150 goto exit;
151
152 vid = vnode_vid(vp);
153 lck_mtx_unlock(&hfs_chash_mutex);
154
155 if ((error = vnode_getwithvid(vp, vid))) {
156 /*
157 * If vnode is being reclaimed, or has
158 * already changed identity, no need to wait
159 */
160 return (NULL);
161 }
162 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
163 vnode_put(vp);
164 return (NULL);
165 }
166
167 /*
168 * Skip cnodes that are not in the name space anymore
169 * we need to check again with the cnode lock held
170 * because we may have blocked acquiring the vnode ref
171 * or the lock on the cnode which would allow the node
172 * to be unlinked
173 */
174 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
175 if (!skiplock)
176 hfs_unlock(cp);
177 vnode_put(vp);
178
179 return (NULL);
180 }
181 return (vp);
182 }
183exit:
184 lck_mtx_unlock(&hfs_chash_mutex);
185 return (NULL);
186}
187
188
189/*
190 * Use the device, fileid pair to find the incore cnode.
191 * If no cnode if found one is created
192 *
193 * If it is in core, but locked, wait for it.
194 */
195__private_extern__
196int
197hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
198 const struct cat_attr *, void *), void * arg)
199{
200 struct cnode *cp;
201 int result = ENOENT;
202
203 /*
204 * Go through the hash list
205 * If a cnode is in the process of being cleaned out or being
206 * allocated, wait for it to be finished and then try again.
207 */
208 lck_mtx_lock(&hfs_chash_mutex);
209 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
210 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
211 continue;
212 /* Skip cnodes being created or reclaimed. */
213 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
214 result = callout(&cp->c_desc, &cp->c_attr, arg);
215 }
216 break;
217 }
218 lck_mtx_unlock(&hfs_chash_mutex);
219 return (result);
220}
221
222
223/*
224 * Use the device, fileid pair to find the incore cnode.
225 * If no cnode if found one is created
226 *
227 * If it is in core, but locked, wait for it.
228 */
229__private_extern__
230struct cnode *
231hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
232{
233 struct cnode *cp;
234 struct cnode *ncp = NULL;
235 vnode_t vp;
236 uint32_t vid;
237
238 /*
239 * Go through the hash list
240 * If a cnode is in the process of being cleaned out or being
241 * allocated, wait for it to be finished and then try again.
242 */
243loop:
244 lck_mtx_lock(&hfs_chash_mutex);
245
246loop_with_lock:
247 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
248 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
249 continue;
250 /*
251 * Wait if cnode is being created, attached to or reclaimed.
252 */
253 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
254 SET(cp->c_hflag, H_WAITING);
255
256 (void) msleep(cp, &hfs_chash_mutex, PINOD,
257 "hfs_chash_getcnode", 0);
258 goto loop_with_lock;
259 }
260 /*
261 * Skip cnodes that are not in the name space anymore
262 * note that this check is done outside of the proper
263 * lock to catch nodes already in this state... this
264 * state must be rechecked after we acquire the cnode lock
265 */
266 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
267 continue;
268 }
269 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
270 if (vp == NULL) {
271 /*
272 * The desired vnode isn't there so tag the cnode.
273 */
274 SET(cp->c_hflag, H_ATTACH);
275
276 lck_mtx_unlock(&hfs_chash_mutex);
277 } else {
278 vid = vnode_vid(vp);
279
280 lck_mtx_unlock(&hfs_chash_mutex);
281
282 if (vnode_getwithvid(vp, vid))
283 goto loop;
284 }
285 if (ncp) {
286 /*
287 * someone else won the race to create
288 * this cnode and add it to the hash
289 * just dump our allocation
290 */
291 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
292 ncp = NULL;
293 }
294 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
295 if (vp != NULLVP)
296 vnode_put(vp);
297 lck_mtx_lock(&hfs_chash_mutex);
298
299 if (vp == NULLVP)
300 CLR(cp->c_hflag, H_ATTACH);
301 goto loop_with_lock;
302 }
303 /*
304 * Skip cnodes that are not in the name space anymore
305 * we need to check again with the cnode lock held
306 * because we may have blocked acquiring the vnode ref
307 * or the lock on the cnode which would allow the node
308 * to be unlinked
309 */
310 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
311 if (!skiplock)
312 hfs_unlock(cp);
313 if (vp != NULLVP)
314 vnode_put(vp);
315 lck_mtx_lock(&hfs_chash_mutex);
316
317 if (vp == NULLVP)
318 CLR(cp->c_hflag, H_ATTACH);
319 goto loop_with_lock;
320 }
321 *vpp = vp;
322 return (cp);
323 }
324
325 /*
326 * Allocate a new cnode
327 */
328 if (skiplock)
329 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
330
331 if (ncp == NULL) {
332 lck_mtx_unlock(&hfs_chash_mutex);
333
334 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
335 /*
336 * since we dropped the chash lock,
337 * we need to go back and re-verify
338 * that this node hasn't come into
339 * existence...
340 */
341 goto loop;
342 }
343 bzero(ncp, sizeof(struct cnode));
344 SET(ncp->c_hflag, H_ALLOC);
345 ncp->c_fileid = inum;
346 ncp->c_dev = dev;
347 TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
348
349 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
350 if (!skiplock)
351 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
352
353 /* Insert the new cnode with it's H_ALLOC flag set */
354 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
355 lck_mtx_unlock(&hfs_chash_mutex);
356
357 *vpp = NULL;
358 return (ncp);
359}
360
361
362__private_extern__
363void
364hfs_chashwakeup(struct cnode *cp, int hflags)
365{
366 lck_mtx_lock(&hfs_chash_mutex);
367
368 CLR(cp->c_hflag, hflags);
369
370 if (ISSET(cp->c_hflag, H_WAITING)) {
371 CLR(cp->c_hflag, H_WAITING);
372 wakeup((caddr_t)cp);
373 }
374 lck_mtx_unlock(&hfs_chash_mutex);
375}
376
377
378/*
379 * Re-hash two cnodes in the hash table.
380 */
381__private_extern__
382void
383hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
384{
385 lck_mtx_lock(&hfs_chash_mutex);
386
387 LIST_REMOVE(cp1, c_hash);
388 LIST_REMOVE(cp2, c_hash);
389 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
390 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
391
392 lck_mtx_unlock(&hfs_chash_mutex);
393}
394
395
396/*
397 * Remove a cnode from the hash table.
398 */
399__private_extern__
400int
401hfs_chashremove(struct cnode *cp)
402{
403 lck_mtx_lock(&hfs_chash_mutex);
404
405 /* Check if a vnode is getting attached */
406 if (ISSET(cp->c_hflag, H_ATTACH)) {
407 lck_mtx_unlock(&hfs_chash_mutex);
408 return (EBUSY);
409 }
410 LIST_REMOVE(cp, c_hash);
411 cp->c_hash.le_next = NULL;
412 cp->c_hash.le_prev = NULL;
413
414 lck_mtx_unlock(&hfs_chash_mutex);
415 return (0);
416}
417
418/*
419 * Remove a cnode from the hash table and wakeup any waiters.
420 */
421__private_extern__
422void
423hfs_chash_abort(struct cnode *cp)
424{
425 lck_mtx_lock(&hfs_chash_mutex);
426
427 LIST_REMOVE(cp, c_hash);
428 cp->c_hash.le_next = NULL;
429 cp->c_hash.le_prev = NULL;
430
431 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
432 if (ISSET(cp->c_hflag, H_WAITING)) {
433 CLR(cp->c_hflag, H_WAITING);
434 wakeup((caddr_t)cp);
435 }
436 lck_mtx_unlock(&hfs_chash_mutex);
437}
438
439
440/*
441 * mark a cnode as in transistion
442 */
443__private_extern__
444void
445hfs_chash_mark_in_transit(struct cnode *cp)
446{
447 lck_mtx_lock(&hfs_chash_mutex);
448
449 SET(cp->c_hflag, H_TRANSIT);
450
451 lck_mtx_unlock(&hfs_chash_mutex);
452}