]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_chash.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_chash.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 /*
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)hfs_chash.c
64 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
65 */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/vnode.h>
70 #include <sys/kernel.h>
71 #include <sys/malloc.h>
72 #include <sys/proc.h>
73 #include <sys/queue.h>
74
75
76 #include "hfs.h" /* XXX bringup */
77 #include "hfs_cnode.h"
78
79 extern lck_attr_t * hfs_lock_attr;
80 extern lck_grp_t * hfs_mutex_group;
81 extern lck_grp_t * hfs_rwlock_group;
82
83 lck_grp_t * chash_lck_grp;
84 lck_grp_attr_t * chash_lck_grp_attr;
85 lck_attr_t * chash_lck_attr;
86
87 /*
88 * Structures associated with cnode caching.
89 */
90 LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
91 u_long cnodehash; /* size of hash table - 1 */
92 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
93
94 lck_mtx_t hfs_chash_mutex;
95
96
97 /*
98 * Initialize cnode hash table.
99 */
100 __private_extern__
101 void
102 hfs_chashinit()
103 {
104 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
105
106 chash_lck_grp_attr= lck_grp_attr_alloc_init();
107 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
108 chash_lck_attr = lck_attr_alloc_init();
109
110 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
111 }
112
113
114 /*
115 * Use the device, inum pair to find the incore cnode.
116 *
117 * If it is in core, but locked, wait for it.
118 */
119 __private_extern__
120 struct vnode *
121 hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
122 {
123 struct cnode *cp;
124 struct vnode *vp;
125 int error;
126 uint32_t vid;
127
128 /*
129 * Go through the hash list
130 * If a cnode is in the process of being cleaned out or being
131 * allocated, wait for it to be finished and then try again.
132 */
133 loop:
134 lck_mtx_lock(&hfs_chash_mutex);
135 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
136 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
137 continue;
138 /* Wait if cnode is being created or reclaimed. */
139 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
140 SET(cp->c_hflag, H_WAITING);
141
142 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
143 "hfs_chash_getvnode", 0);
144 goto loop;
145 }
146 /*
147 * Skip cnodes that are not in the name space anymore
148 * note that this check is done outside of the proper
149 * lock to catch nodes already in this state... this
150 * state must be rechecked after we acquire the cnode lock
151 */
152 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
153 continue;
154 }
155 /* Obtain the desired vnode. */
156 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
157 if (vp == NULLVP)
158 goto exit;
159
160 vid = vnode_vid(vp);
161 lck_mtx_unlock(&hfs_chash_mutex);
162
163 if ((error = vnode_getwithvid(vp, vid))) {
164 /*
165 * If vnode is being reclaimed, or has
166 * already changed identity, no need to wait
167 */
168 return (NULL);
169 }
170 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
171 vnode_put(vp);
172 return (NULL);
173 }
174
175 /*
176 * Skip cnodes that are not in the name space anymore
177 * we need to check again with the cnode lock held
178 * because we may have blocked acquiring the vnode ref
179 * or the lock on the cnode which would allow the node
180 * to be unlinked
181 */
182 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
183 if (!skiplock)
184 hfs_unlock(cp);
185 vnode_put(vp);
186
187 return (NULL);
188 }
189 return (vp);
190 }
191 exit:
192 lck_mtx_unlock(&hfs_chash_mutex);
193 return (NULL);
194 }
195
196
197 /*
198 * Use the device, fileid pair to find the incore cnode.
199 * If no cnode if found one is created
200 *
201 * If it is in core, but locked, wait for it.
202 */
203 __private_extern__
204 int
205 hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
206 const struct cat_attr *, void *), void * arg)
207 {
208 struct cnode *cp;
209 int result = ENOENT;
210
211 /*
212 * Go through the hash list
213 * If a cnode is in the process of being cleaned out or being
214 * allocated, wait for it to be finished and then try again.
215 */
216 lck_mtx_lock(&hfs_chash_mutex);
217 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
218 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
219 continue;
220 /* Skip cnodes being created or reclaimed. */
221 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
222 result = callout(&cp->c_desc, &cp->c_attr, arg);
223 }
224 break;
225 }
226 lck_mtx_unlock(&hfs_chash_mutex);
227 return (result);
228 }
229
230
231 /*
232 * Use the device, fileid pair to find the incore cnode.
233 * If no cnode if found one is created
234 *
235 * If it is in core, but locked, wait for it.
236 */
237 __private_extern__
238 struct cnode *
239 hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
240 {
241 struct cnode *cp;
242 struct cnode *ncp = NULL;
243 vnode_t vp;
244 uint32_t vid;
245
246 /*
247 * Go through the hash list
248 * If a cnode is in the process of being cleaned out or being
249 * allocated, wait for it to be finished and then try again.
250 */
251 loop:
252 lck_mtx_lock(&hfs_chash_mutex);
253
254 loop_with_lock:
255 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
256 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
257 continue;
258 /*
259 * Wait if cnode is being created, attached to or reclaimed.
260 */
261 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
262 SET(cp->c_hflag, H_WAITING);
263
264 (void) msleep(cp, &hfs_chash_mutex, PINOD,
265 "hfs_chash_getcnode", 0);
266 goto loop_with_lock;
267 }
268 /*
269 * Skip cnodes that are not in the name space anymore
270 * note that this check is done outside of the proper
271 * lock to catch nodes already in this state... this
272 * state must be rechecked after we acquire the cnode lock
273 */
274 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
275 continue;
276 }
277 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
278 if (vp == NULL) {
279 /*
280 * The desired vnode isn't there so tag the cnode.
281 */
282 SET(cp->c_hflag, H_ATTACH);
283
284 lck_mtx_unlock(&hfs_chash_mutex);
285 } else {
286 vid = vnode_vid(vp);
287
288 lck_mtx_unlock(&hfs_chash_mutex);
289
290 if (vnode_getwithvid(vp, vid))
291 goto loop;
292 }
293 if (ncp) {
294 /*
295 * someone else won the race to create
296 * this cnode and add it to the hash
297 * just dump our allocation
298 */
299 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
300 ncp = NULL;
301 }
302 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
303 if (vp != NULLVP)
304 vnode_put(vp);
305 lck_mtx_lock(&hfs_chash_mutex);
306
307 if (vp == NULLVP)
308 CLR(cp->c_hflag, H_ATTACH);
309 goto loop_with_lock;
310 }
311 /*
312 * Skip cnodes that are not in the name space anymore
313 * we need to check again with the cnode lock held
314 * because we may have blocked acquiring the vnode ref
315 * or the lock on the cnode which would allow the node
316 * to be unlinked
317 */
318 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
319 if (!skiplock)
320 hfs_unlock(cp);
321 if (vp != NULLVP)
322 vnode_put(vp);
323 lck_mtx_lock(&hfs_chash_mutex);
324
325 if (vp == NULLVP)
326 CLR(cp->c_hflag, H_ATTACH);
327 goto loop_with_lock;
328 }
329 *vpp = vp;
330 return (cp);
331 }
332
333 /*
334 * Allocate a new cnode
335 */
336 if (skiplock)
337 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
338
339 if (ncp == NULL) {
340 lck_mtx_unlock(&hfs_chash_mutex);
341
342 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
343 /*
344 * since we dropped the chash lock,
345 * we need to go back and re-verify
346 * that this node hasn't come into
347 * existence...
348 */
349 goto loop;
350 }
351 bzero(ncp, sizeof(struct cnode));
352 SET(ncp->c_hflag, H_ALLOC);
353 ncp->c_fileid = inum;
354 ncp->c_dev = dev;
355 TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
356
357 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
358 if (!skiplock)
359 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
360
361 /* Insert the new cnode with it's H_ALLOC flag set */
362 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
363 lck_mtx_unlock(&hfs_chash_mutex);
364
365 *vpp = NULL;
366 return (ncp);
367 }
368
369
370 __private_extern__
371 void
372 hfs_chashwakeup(struct cnode *cp, int hflags)
373 {
374 lck_mtx_lock(&hfs_chash_mutex);
375
376 CLR(cp->c_hflag, hflags);
377
378 if (ISSET(cp->c_hflag, H_WAITING)) {
379 CLR(cp->c_hflag, H_WAITING);
380 wakeup((caddr_t)cp);
381 }
382 lck_mtx_unlock(&hfs_chash_mutex);
383 }
384
385
386 /*
387 * Re-hash two cnodes in the hash table.
388 */
389 __private_extern__
390 void
391 hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
392 {
393 lck_mtx_lock(&hfs_chash_mutex);
394
395 LIST_REMOVE(cp1, c_hash);
396 LIST_REMOVE(cp2, c_hash);
397 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
398 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
399
400 lck_mtx_unlock(&hfs_chash_mutex);
401 }
402
403
404 /*
405 * Remove a cnode from the hash table.
406 */
407 __private_extern__
408 int
409 hfs_chashremove(struct cnode *cp)
410 {
411 lck_mtx_lock(&hfs_chash_mutex);
412
413 /* Check if a vnode is getting attached */
414 if (ISSET(cp->c_hflag, H_ATTACH)) {
415 lck_mtx_unlock(&hfs_chash_mutex);
416 return (EBUSY);
417 }
418 LIST_REMOVE(cp, c_hash);
419 cp->c_hash.le_next = NULL;
420 cp->c_hash.le_prev = NULL;
421
422 lck_mtx_unlock(&hfs_chash_mutex);
423 return (0);
424 }
425
426 /*
427 * Remove a cnode from the hash table and wakeup any waiters.
428 */
429 __private_extern__
430 void
431 hfs_chash_abort(struct cnode *cp)
432 {
433 lck_mtx_lock(&hfs_chash_mutex);
434
435 LIST_REMOVE(cp, c_hash);
436 cp->c_hash.le_next = NULL;
437 cp->c_hash.le_prev = NULL;
438
439 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
440 if (ISSET(cp->c_hflag, H_WAITING)) {
441 CLR(cp->c_hflag, H_WAITING);
442 wakeup((caddr_t)cp);
443 }
444 lck_mtx_unlock(&hfs_chash_mutex);
445 }
446
447
448 /*
449 * mark a cnode as in transistion
450 */
451 __private_extern__
452 void
453 hfs_chash_mark_in_transit(struct cnode *cp)
454 {
455 lck_mtx_lock(&hfs_chash_mutex);
456
457 SET(cp->c_hflag, H_TRANSIT);
458
459 lck_mtx_unlock(&hfs_chash_mutex);
460 }