]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_chash.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_chash.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)hfs_chash.c
62 * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/vnode.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/proc.h>
71 #include <sys/queue.h>
72
73
74 #include "hfs.h" /* XXX bringup */
75 #include "hfs_cnode.h"
76
77 extern lck_attr_t * hfs_lock_attr;
78 extern lck_grp_t * hfs_mutex_group;
79 extern lck_grp_t * hfs_rwlock_group;
80
81 lck_grp_t * chash_lck_grp;
82 lck_grp_attr_t * chash_lck_grp_attr;
83 lck_attr_t * chash_lck_attr;
84
85 /*
86 * Structures associated with cnode caching.
87 */
88 LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
89 u_long cnodehash; /* size of hash table - 1 */
90 #define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
91
92 lck_mtx_t hfs_chash_mutex;
93
94
95 /*
96 * Initialize cnode hash table.
97 */
98 __private_extern__
99 void
100 hfs_chashinit()
101 {
102 cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
103
104 chash_lck_grp_attr= lck_grp_attr_alloc_init();
105 lck_grp_attr_setstat(chash_lck_grp_attr);
106 chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
107
108 chash_lck_attr = lck_attr_alloc_init();
109 //lck_attr_setdebug(chash_lck_attr);
110
111 lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
112 }
113
114
115 /*
116 * Use the device, inum pair to find the incore cnode.
117 *
118 * If it is in core, but locked, wait for it.
119 */
120 __private_extern__
121 struct vnode *
122 hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
123 {
124 struct cnode *cp;
125 struct vnode *vp;
126 int error;
127 uint32_t vid;
128
129 /*
130 * Go through the hash list
131 * If a cnode is in the process of being cleaned out or being
132 * allocated, wait for it to be finished and then try again.
133 */
134 loop:
135 lck_mtx_lock(&hfs_chash_mutex);
136 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
137 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
138 continue;
139 /* Wait if cnode is being created or reclaimed. */
140 if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
141 SET(cp->c_hflag, H_WAITING);
142
143 (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
144 "hfs_chash_getvnode", 0);
145 goto loop;
146 }
147 /*
148 * Skip cnodes that are not in the name space anymore
149 * note that this check is done outside of the proper
150 * lock to catch nodes already in this state... this
151 * state must be rechecked after we acquire the cnode lock
152 */
153 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
154 continue;
155 }
156 /* Obtain the desired vnode. */
157 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
158 if (vp == NULLVP)
159 goto exit;
160
161 vid = vnode_vid(vp);
162 lck_mtx_unlock(&hfs_chash_mutex);
163
164 if ((error = vnode_getwithvid(vp, vid))) {
165 /*
166 * If vnode is being reclaimed, or has
167 * already changed identity, no need to wait
168 */
169 return (NULL);
170 }
171 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
172 vnode_put(vp);
173 return (NULL);
174 }
175
176 /*
177 * Skip cnodes that are not in the name space anymore
178 * we need to check again with the cnode lock held
179 * because we may have blocked acquiring the vnode ref
180 * or the lock on the cnode which would allow the node
181 * to be unlinked
182 */
183 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
184 if (!skiplock)
185 hfs_unlock(cp);
186 vnode_put(vp);
187
188 return (NULL);
189 }
190 return (vp);
191 }
192 exit:
193 lck_mtx_unlock(&hfs_chash_mutex);
194 return (NULL);
195 }
196
197
198 /*
199 * Use the device, fileid pair to find the incore cnode.
200 * If no cnode if found one is created
201 *
202 * If it is in core, but locked, wait for it.
203 */
204 __private_extern__
205 int
206 hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
207 const struct cat_attr *, void *), void * arg)
208 {
209 struct cnode *cp;
210 int result = ENOENT;
211
212 /*
213 * Go through the hash list
214 * If a cnode is in the process of being cleaned out or being
215 * allocated, wait for it to be finished and then try again.
216 */
217 lck_mtx_lock(&hfs_chash_mutex);
218 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
219 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
220 continue;
221 /* Skip cnodes being created or reclaimed. */
222 if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
223 result = callout(&cp->c_desc, &cp->c_attr, arg);
224 }
225 break;
226 }
227 lck_mtx_unlock(&hfs_chash_mutex);
228 return (result);
229 }
230
231
232 /*
233 * Use the device, fileid pair to find the incore cnode.
234 * If no cnode if found one is created
235 *
236 * If it is in core, but locked, wait for it.
237 */
238 __private_extern__
239 struct cnode *
240 hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
241 {
242 struct cnode *cp;
243 struct cnode *ncp = NULL;
244 vnode_t vp;
245 uint32_t vid;
246
247 /*
248 * Go through the hash list
249 * If a cnode is in the process of being cleaned out or being
250 * allocated, wait for it to be finished and then try again.
251 */
252 loop:
253 lck_mtx_lock(&hfs_chash_mutex);
254
255 loop_with_lock:
256 for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
257 if ((cp->c_fileid != inum) || (cp->c_dev != dev))
258 continue;
259 /*
260 * Wait if cnode is being created, attached to or reclaimed.
261 */
262 if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
263 SET(cp->c_hflag, H_WAITING);
264
265 (void) msleep(cp, &hfs_chash_mutex, PINOD,
266 "hfs_chash_getcnode", 0);
267 goto loop_with_lock;
268 }
269 /*
270 * Skip cnodes that are not in the name space anymore
271 * note that this check is done outside of the proper
272 * lock to catch nodes already in this state... this
273 * state must be rechecked after we acquire the cnode lock
274 */
275 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
276 continue;
277 }
278 vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
279 if (vp == NULL) {
280 /*
281 * The desired vnode isn't there so tag the cnode.
282 */
283 SET(cp->c_hflag, H_ATTACH);
284
285 lck_mtx_unlock(&hfs_chash_mutex);
286 } else {
287 vid = vnode_vid(vp);
288
289 lck_mtx_unlock(&hfs_chash_mutex);
290
291 if (vnode_getwithvid(vp, vid))
292 goto loop;
293 }
294 if (ncp) {
295 /*
296 * someone else won the race to create
297 * this cnode and add it to the hash
298 * just dump our allocation
299 */
300 FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
301 ncp = NULL;
302 }
303 if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
304 if (vp != NULLVP)
305 vnode_put(vp);
306 lck_mtx_lock(&hfs_chash_mutex);
307
308 if (vp == NULLVP)
309 CLR(cp->c_hflag, H_ATTACH);
310 goto loop_with_lock;
311 }
312 /*
313 * Skip cnodes that are not in the name space anymore
314 * we need to check again with the cnode lock held
315 * because we may have blocked acquiring the vnode ref
316 * or the lock on the cnode which would allow the node
317 * to be unlinked
318 */
319 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
320 if (!skiplock)
321 hfs_unlock(cp);
322 if (vp != NULLVP)
323 vnode_put(vp);
324 lck_mtx_lock(&hfs_chash_mutex);
325
326 if (vp == NULLVP)
327 CLR(cp->c_hflag, H_ATTACH);
328 goto loop_with_lock;
329 }
330 *vpp = vp;
331 return (cp);
332 }
333
334 /*
335 * Allocate a new cnode
336 */
337 if (skiplock)
338 panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
339
340 if (ncp == NULL) {
341 lck_mtx_unlock(&hfs_chash_mutex);
342
343 MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
344 /*
345 * since we dropped the chash lock,
346 * we need to go back and re-verify
347 * that this node hasn't come into
348 * existence...
349 */
350 goto loop;
351 }
352 bzero(ncp, sizeof(struct cnode));
353 SET(ncp->c_hflag, H_ALLOC);
354 ncp->c_fileid = inum;
355 ncp->c_dev = dev;
356 TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
357
358 lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
359 if (!skiplock)
360 (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
361
362 /* Insert the new cnode with it's H_ALLOC flag set */
363 LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
364 lck_mtx_unlock(&hfs_chash_mutex);
365
366 *vpp = NULL;
367 return (ncp);
368 }
369
370
371 __private_extern__
372 void
373 hfs_chashwakeup(struct cnode *cp, int hflags)
374 {
375 lck_mtx_lock(&hfs_chash_mutex);
376
377 CLR(cp->c_hflag, hflags);
378
379 if (ISSET(cp->c_hflag, H_WAITING)) {
380 CLR(cp->c_hflag, H_WAITING);
381 wakeup((caddr_t)cp);
382 }
383 lck_mtx_unlock(&hfs_chash_mutex);
384 }
385
386
387 /*
388 * Re-hash two cnodes in the hash table.
389 */
390 __private_extern__
391 void
392 hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
393 {
394 lck_mtx_lock(&hfs_chash_mutex);
395
396 LIST_REMOVE(cp1, c_hash);
397 LIST_REMOVE(cp2, c_hash);
398 LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
399 LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
400
401 lck_mtx_unlock(&hfs_chash_mutex);
402 }
403
404
405 /*
406 * Remove a cnode from the hash table.
407 */
408 __private_extern__
409 int
410 hfs_chashremove(struct cnode *cp)
411 {
412 lck_mtx_lock(&hfs_chash_mutex);
413
414 /* Check if a vnode is getting attached */
415 if (ISSET(cp->c_hflag, H_ATTACH)) {
416 lck_mtx_unlock(&hfs_chash_mutex);
417 return (EBUSY);
418 }
419 LIST_REMOVE(cp, c_hash);
420 cp->c_hash.le_next = NULL;
421 cp->c_hash.le_prev = NULL;
422
423 lck_mtx_unlock(&hfs_chash_mutex);
424 return (0);
425 }
426
427 /*
428 * Remove a cnode from the hash table and wakeup any waiters.
429 */
430 __private_extern__
431 void
432 hfs_chash_abort(struct cnode *cp)
433 {
434 lck_mtx_lock(&hfs_chash_mutex);
435
436 LIST_REMOVE(cp, c_hash);
437 cp->c_hash.le_next = NULL;
438 cp->c_hash.le_prev = NULL;
439
440 CLR(cp->c_hflag, H_ATTACH | H_ALLOC);
441 if (ISSET(cp->c_hflag, H_WAITING)) {
442 CLR(cp->c_hflag, H_WAITING);
443 wakeup((caddr_t)cp);
444 }
445 lck_mtx_unlock(&hfs_chash_mutex);
446 }
447
448
449 /*
450 * mark a cnode as in transistion
451 */
452 __private_extern__
453 void
454 hfs_chash_mark_in_transit(struct cnode *cp)
455 {
456 lck_mtx_lock(&hfs_chash_mutex);
457
458 SET(cp->c_hflag, H_TRANSIT);
459
460 lck_mtx_unlock(&hfs_chash_mutex);
461 }