]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/time.h>
36 #include <sys/ubc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
39
40 #include <kern/locks.h>
41
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
44
45 #include <hfs/hfs.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
49
50 extern int prtactive;
51
52 extern lck_attr_t * hfs_lock_attr;
53 extern lck_grp_t * hfs_mutex_group;
54 extern lck_grp_t * hfs_rwlock_group;
55
56 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
57
58 static void hfs_reclaim_cnode(struct cnode *);
59
60 static int hfs_isordered(struct cnode *, struct cnode *);
61
62
63 /*
64 * Last reference to an cnode. If necessary, write or delete it.
65 */
66 __private_extern__
67 int
68 hfs_vnop_inactive(struct vnop_inactive_args *ap)
69 {
70 struct vnode *vp = ap->a_vp;
71 struct cnode *cp;
72 struct hfsmount *hfsmp = VTOHFS(vp);
73 struct proc *p = vfs_context_proc(ap->a_context);
74 int error = 0;
75 int recycle = 0;
76 int forkcount = 0;
77 int truncated = 0;
78 int started_tr = 0;
79 int took_trunc_lock = 0;
80 cat_cookie_t cookie;
81 int cat_reserve = 0;
82 int lockflags;
83 enum vtype v_type;
84
85 v_type = vnode_vtype(vp);
86 cp = VTOC(vp);
87
88 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
89 (hfsmp->hfs_freezing_proc == p)) {
90 return (0);
91 }
92
93 /*
94 * Ignore nodes related to stale file handles.
95 * We are peeking at the cnode flag without the lock, but if C_NOEXISTS
96 * is set, that means the cnode doesn't have any backing store in the
97 * catalog anymore, and is otherwise safe to force a recycle
98 */
99
100 if (cp->c_flag & C_NOEXISTS) {
101 vnode_recycle(vp);
102 return (0);
103 }
104
105 if ((v_type == VREG || v_type == VLNK)) {
106 hfs_lock_truncate(cp, TRUE);
107 took_trunc_lock = 1;
108 }
109
110 (void) hfs_lock(cp, HFS_FORCE_LOCK);
111
112 if (cp->c_datafork)
113 ++forkcount;
114 if (cp->c_rsrcfork)
115 ++forkcount;
116
117 /*
118 * We should lock cnode before checking the flags in the
119 * condition below and should unlock the cnode before calling
120 * ubc_setsize() as cluster code can call other HFS vnops which
121 * will try to acquire the same cnode lock and cause deadlock.
122 * Only call ubc_setsize to 0 if we are the last fork.
123 */
124 if ((v_type == VREG || v_type == VLNK) &&
125 (cp->c_flag & C_DELETED) &&
126 (VTOF(vp)->ff_blocks != 0) && (forkcount == 1)) {
127 hfs_unlock(cp);
128 ubc_setsize(vp, 0);
129 (void) hfs_lock(cp, HFS_FORCE_LOCK);
130 }
131
132 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
133 hfs_filedone(vp, ap->a_context);
134 }
135 /*
136 * Remove any directory hints or cached origins
137 */
138 if (v_type == VDIR) {
139 hfs_reldirhints(cp, 0);
140 }
141 if (cp->c_flag & C_HARDLINK) {
142 hfs_relorigins(cp);
143 }
144
145 /* Hurry the recycling process along if we're an open-unlinked file */
146 if((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
147 recycle = 1;
148 }
149
150 /*
151 * This check is slightly complicated. We should only truncate data
152 * in very specific cases for open-unlinked files. This is because
153 * we want to ensure that the resource fork continues to be available
154 * if the caller has the data fork open. However, this is not symmetric;
155 * someone who has the resource fork open need not be able to access the data
156 * fork once the data fork has gone inactive.
157 *
158 * If we're the last fork, then we have cleaning up to do.
159 *
160 * A) last fork, and vp == c_vp
161 * Truncate away own fork dat. If rsrc fork is not in core, truncate it too.
162 *
163 * B) last fork, and vp == c_rsrc_vp
164 * Truncate ourselves, assume data fork has been cleaned due to C).
165 *
166 * If we're not the last fork, then things are a little different:
167 *
168 * C) not the last fork, vp == c_vp
169 * Truncate ourselves. Once the file has gone out of the namespace,
170 * it cannot be further opened. Further access to the rsrc fork may
171 * continue, however.
172 *
173 * D) not the last fork, vp == c_rsrc_vp
174 * Don't enter the block below, just clean up vnode and push it out of core.
175 */
176
177 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED) &&
178 ((forkcount == 1) || (!VNODE_IS_RSRC(vp)))) {
179 if (VTOF(vp)->ff_blocks != 0) {
180 /*
181 * Since we're already inside a transaction,
182 * tell hfs_truncate to skip the ubc_setsize.
183 */
184 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, 0, ap->a_context);
185 if (error)
186 goto out;
187 truncated = 1;
188 }
189
190 /*
191 * If c_blocks > 0 and we are the last fork (data fork), then
192 * we can go and and truncate away the rsrc fork blocks if
193 * they were not in core.
194 */
195 if ((cp->c_blocks > 0) && (forkcount == 1) && (vp != cp->c_rsrc_vp)) {
196 struct vnode *rvp = NULLVP;
197
198 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
199 if (error)
200 goto out;
201 /*
202 * Defer the vnode_put and ubc_setsize on rvp until hfs_unlock().
203 */
204 cp->c_flag |= C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE;
205 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, 0, ap->a_context);
206 if (error)
207 goto out;
208 vnode_recycle(rvp); /* all done with this vnode */
209 }
210 }
211
212 // If needed, get rid of any xattrs that this file (or directory) may have.
213 // Note that this must happen outside of any other transactions
214 // because it starts/ends its own transactions and grabs its
215 // own locks. This is to prevent a file with a lot of attributes
216 // from creating a transaction that is too large (which panics).
217 //
218 if ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0 &&
219 (cp->c_flag & C_DELETED) && (forkcount <= 1)) {
220 hfs_removeallattr(hfsmp, cp->c_fileid);
221 }
222
223 /*
224 * Check for a postponed deletion.
225 * (only delete cnode when the last fork goes inactive)
226 */
227 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
228 /*
229 * Mark cnode in transit so that no one can get this
230 * cnode from cnode hash.
231 */
232 // hfs_chash_mark_in_transit(hfsmp, cp);
233 // XXXdbg - remove the cnode from the hash table since it's deleted
234 // otherwise someone could go to sleep on the cnode and not
235 // be woken up until this vnode gets recycled which could be
236 // a very long time...
237 hfs_chashremove(hfsmp, cp);
238
239 cp->c_flag |= C_NOEXISTS; // XXXdbg
240 cp->c_rdev = 0;
241
242 if (started_tr == 0) {
243 if (hfs_start_transaction(hfsmp) != 0) {
244 error = EINVAL;
245 goto out;
246 }
247 started_tr = 1;
248 }
249
250 /*
251 * Reserve some space in the Catalog file.
252 */
253 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
254 goto out;
255 }
256 cat_reserve = 1;
257
258 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
259
260 if (cp->c_blocks > 0) {
261 printf("hfs_inactive: deleting non-empty%sfile %d, "
262 "blks %d\n", VNODE_IS_RSRC(vp) ? " rsrc " : " ",
263 (int)cp->c_fileid, (int)cp->c_blocks);
264 }
265
266 //
267 // release the name pointer in the descriptor so that
268 // cat_delete() will use the file-id to do the deletion.
269 // in the case of hard links this is imperative (in the
270 // case of regular files the fileid and cnid are the
271 // same so it doesn't matter).
272 //
273 cat_releasedesc(&cp->c_desc);
274
275 /*
276 * The descriptor name may be zero,
277 * in which case the fileid is used.
278 */
279 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
280
281 if (error && truncated && (error != ENXIO))
282 printf("hfs_inactive: couldn't delete a truncated file!");
283
284 /* Update HFS Private Data dir */
285 if (error == 0) {
286 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
287 if (vnode_isdir(vp)) {
288 DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
289 }
290 (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
291 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
292 }
293
294 hfs_systemfile_unlock(hfsmp, lockflags);
295
296 if (error)
297 goto out;
298
299 #if QUOTA
300 if (hfsmp->hfs_flags & HFS_QUOTAS)
301 (void)hfs_chkiq(cp, -1, NOCRED, 0);
302 #endif /* QUOTA */
303
304 /* Already set C_NOEXISTS at the beginning of this block */
305 cp->c_flag &= ~C_DELETED;
306 cp->c_touch_chgtime = TRUE;
307 cp->c_touch_modtime = TRUE;
308
309 if (error == 0)
310 hfs_volupdate(hfsmp, (v_type == VDIR) ? VOL_RMDIR : VOL_RMFILE, 0);
311 }
312
313 /*
314 * A file may have had delayed allocations, in which case hfs_update
315 * would not have updated the catalog record (cat_update). We need
316 * to do that now, before we lose our fork data. We also need to
317 * force the update, or hfs_update will again skip the cat_update.
318 */
319 if ((cp->c_flag & C_MODIFIED) ||
320 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
321 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
322 cp->c_flag |= C_FORCEUPDATE;
323 }
324 hfs_update(vp, 0);
325 }
326 out:
327 if (cat_reserve)
328 cat_postflight(hfsmp, &cookie, p);
329
330 // XXXdbg - have to do this because a goto could have come here
331 if (started_tr) {
332 hfs_end_transaction(hfsmp);
333 started_tr = 0;
334 }
335 /*
336 * This has been removed from the namespace and has no backing store
337 * in the catalog, so we should force a reclaim as soon as possible.
338 * Also, we want to check the flag while we still have the cnode lock.
339 */
340 if (cp->c_flag & C_NOEXISTS)
341 recycle = 1;
342
343 hfs_unlock(cp);
344
345 if (took_trunc_lock)
346 hfs_unlock_truncate(cp, TRUE);
347
348 /*
349 * If we are done with the vnode, reclaim it
350 * so that it can be reused immediately.
351 */
352 if (recycle)
353 vnode_recycle(vp);
354
355 return (error);
356 }
357
358 /*
359 * File clean-up (zero fill and shrink peof).
360 */
361 static int
362 hfs_filedone(struct vnode *vp, vfs_context_t context)
363 {
364 struct cnode *cp;
365 struct filefork *fp;
366 struct hfsmount *hfsmp;
367 struct rl_entry *invalid_range;
368 off_t leof;
369 u_int32_t blks, blocksize;
370
371 cp = VTOC(vp);
372 fp = VTOF(vp);
373 hfsmp = VTOHFS(vp);
374 leof = fp->ff_size;
375
376 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
377 return (0);
378
379 hfs_unlock(cp);
380 (void) cluster_push(vp, IO_CLOSE);
381 hfs_lock(cp, HFS_FORCE_LOCK);
382
383 /*
384 * Explicitly zero out the areas of file
385 * that are currently marked invalid.
386 */
387 while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
388 off_t start = invalid_range->rl_start;
389 off_t end = invalid_range->rl_end;
390
391 /* The range about to be written must be validated
392 * first, so that VNOP_BLOCKMAP() will return the
393 * appropriate mapping for the cluster code:
394 */
395 rl_remove(start, end, &fp->ff_invalidranges);
396
397 hfs_unlock(cp);
398 (void) cluster_write(vp, (struct uio *) 0,
399 leof, end + 1, start, (off_t)0,
400 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
401 hfs_lock(cp, HFS_FORCE_LOCK);
402 cp->c_flag |= C_MODIFIED;
403 }
404 cp->c_flag &= ~C_ZFWANTSYNC;
405 cp->c_zftimeout = 0;
406 blocksize = VTOVCB(vp)->blockSize;
407 blks = leof / blocksize;
408 if (((off_t)blks * (off_t)blocksize) != leof)
409 blks++;
410 /*
411 * Shrink the peof to the smallest size neccessary to contain the leof.
412 */
413 if (blks < fp->ff_blocks)
414 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, 0, context);
415 hfs_unlock(cp);
416 (void) cluster_push(vp, IO_CLOSE);
417 hfs_lock(cp, HFS_FORCE_LOCK);
418
419 /*
420 * If the hfs_truncate didn't happen to flush the vnode's
421 * information out to disk, force it to be updated now that
422 * all invalid ranges have been zero-filled and validated:
423 */
424 if (cp->c_flag & C_MODIFIED) {
425 hfs_update(vp, 0);
426 }
427 return (0);
428 }
429
430
431 /*
432 * Reclaim a cnode so that it can be used for other purposes.
433 */
434 __private_extern__
435 int
436 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
437 {
438 struct vnode *vp = ap->a_vp;
439 struct cnode *cp;
440 struct filefork *fp = NULL;
441 struct filefork *altfp = NULL;
442 struct hfsmount *hfsmp = VTOHFS(vp);
443 int reclaim_cnode = 0;
444
445 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
446 cp = VTOC(vp);
447
448 /*
449 * A file may have had delayed allocations, in which case hfs_update
450 * would not have updated the catalog record (cat_update). We need
451 * to do that now, before we lose our fork data. We also need to
452 * force the update, or hfs_update will again skip the cat_update.
453 */
454 if ((cp->c_flag & C_MODIFIED) ||
455 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
456 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
457 cp->c_flag |= C_FORCEUPDATE;
458 }
459 hfs_update(vp, 0);
460 }
461
462 /*
463 * Keep track of an inactive hot file.
464 */
465 if (!vnode_isdir(vp) &&
466 !vnode_issystem(vp) &&
467 !(cp->c_flag & (C_DELETED | C_NOEXISTS)) ) {
468 (void) hfs_addhotfile(vp);
469 }
470 vnode_removefsref(vp);
471
472 /*
473 * Find file fork for this vnode (if any)
474 * Also check if another fork is active
475 */
476 if (cp->c_vp == vp) {
477 fp = cp->c_datafork;
478 altfp = cp->c_rsrcfork;
479
480 cp->c_datafork = NULL;
481 cp->c_vp = NULL;
482 } else if (cp->c_rsrc_vp == vp) {
483 fp = cp->c_rsrcfork;
484 altfp = cp->c_datafork;
485
486 cp->c_rsrcfork = NULL;
487 cp->c_rsrc_vp = NULL;
488 } else {
489 panic("hfs_vnop_reclaim: vp points to wrong cnode (vp=%p cp->c_vp=%p cp->c_rsrc_vp=%p)\n", vp, cp->c_vp, cp->c_rsrc_vp);
490 }
491 /*
492 * On the last fork, remove the cnode from its hash chain.
493 */
494 if (altfp == NULL) {
495 /* If we can't remove it then the cnode must persist! */
496 if (hfs_chashremove(hfsmp, cp) == 0)
497 reclaim_cnode = 1;
498 /*
499 * Remove any directory hints
500 */
501 if (vnode_isdir(vp)) {
502 hfs_reldirhints(cp, 0);
503 }
504
505 if(cp->c_flag & C_HARDLINK) {
506 hfs_relorigins(cp);
507 }
508 }
509 /* Release the file fork and related data */
510 if (fp) {
511 /* Dump cached symlink data */
512 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
513 FREE(fp->ff_symlinkptr, M_TEMP);
514 }
515 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
516 }
517
518 /*
519 * If there was only one active fork then we can release the cnode.
520 */
521 if (reclaim_cnode) {
522 hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_TRANSIT);
523 hfs_reclaim_cnode(cp);
524 } else /* cnode in use */ {
525 hfs_unlock(cp);
526 }
527
528 vnode_clearfsnode(vp);
529 return (0);
530 }
531
532
533 extern int (**hfs_vnodeop_p) (void *);
534 extern int (**hfs_std_vnodeop_p) (void *);
535 extern int (**hfs_specop_p) (void *);
536 #if FIFO
537 extern int (**hfs_fifoop_p) (void *);
538 #endif
539
540 /*
541 * hfs_getnewvnode - get new default vnode
542 *
543 * The vnode is returned with an iocount and the cnode locked
544 */
545 __private_extern__
546 int
547 hfs_getnewvnode(
548 struct hfsmount *hfsmp,
549 struct vnode *dvp,
550 struct componentname *cnp,
551 struct cat_desc *descp,
552 int flags,
553 struct cat_attr *attrp,
554 struct cat_fork *forkp,
555 struct vnode **vpp)
556 {
557 struct mount *mp = HFSTOVFS(hfsmp);
558 struct vnode *vp = NULL;
559 struct vnode **cvpp;
560 struct vnode *tvp = NULLVP;
561 struct cnode *cp = NULL;
562 struct filefork *fp = NULL;
563 int hfs_standard = 0;
564 int retval;
565 int issystemfile;
566 int wantrsrc;
567 struct vnode_fsparam vfsp;
568 enum vtype vtype;
569 #if QUOTA
570 int i;
571 #endif /* QUOTA */
572
573 hfs_standard = (hfsmp->hfs_flags & HFS_STANDARD);
574
575 if (attrp->ca_fileid == 0) {
576 *vpp = NULL;
577 return (ENOENT);
578 }
579
580 #if !FIFO
581 if (IFTOVT(attrp->ca_mode) == VFIFO) {
582 *vpp = NULL;
583 return (ENOTSUP);
584 }
585 #endif /* !FIFO */
586 vtype = IFTOVT(attrp->ca_mode);
587 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
588 wantrsrc = flags & GNV_WANTRSRC;
589
590 #ifdef HFS_CHECK_LOCK_ORDER
591 /*
592 * The only case were its permissible to hold the parent cnode
593 * lock is during a create operation (hfs_makenode) or when
594 * we don't need the cnode lock (GNV_SKIPLOCK).
595 */
596 if ((dvp != NULL) &&
597 (flags & (GNV_CREATE | GNV_SKIPLOCK)) == 0 &&
598 VTOC(dvp)->c_lockowner == current_thread()) {
599 panic("hfs_getnewvnode: unexpected hold of parent cnode %p", VTOC(dvp));
600 }
601 #endif /* HFS_CHECK_LOCK_ORDER */
602
603 /*
604 * Get a cnode (new or existing)
605 */
606 cp = hfs_chash_getcnode(hfsmp, attrp->ca_fileid, vpp, wantrsrc, (flags & GNV_SKIPLOCK));
607
608 /*
609 * If the id is no longer valid for lookups we'll get back a NULL cp.
610 */
611 if (cp == NULL) {
612 return (ENOENT);
613 }
614
615 /* Hardlinks may need an updated catalog descriptor */
616 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
617 replace_desc(cp, descp);
618 }
619 /* Check if we found a matching vnode */
620 if (*vpp != NULL)
621 return (0);
622
623 /*
624 * If this is a new cnode then initialize it.
625 */
626 if (ISSET(cp->c_hflag, H_ALLOC)) {
627 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
628 #if HFS_COMPRESSION
629 cp->c_decmp = NULL;
630 #endif
631
632 /* Make sure its still valid (ie exists on disk). */
633 if (!(flags & GNV_CREATE) &&
634 !hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
635 hfs_chash_abort(hfsmp, cp);
636 hfs_reclaim_cnode(cp);
637 *vpp = NULL;
638 return (ENOENT);
639 }
640 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
641 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
642
643 /* The name was inherited so clear descriptor state... */
644 descp->cd_namelen = 0;
645 descp->cd_nameptr = NULL;
646 descp->cd_flags &= ~CD_HASBUF;
647
648 /* Tag hardlinks */
649 if ((vtype == VREG || vtype == VDIR) &&
650 ((descp->cd_cnid != attrp->ca_fileid) ||
651 (attrp->ca_recflags & kHFSHasLinkChainMask))) {
652 cp->c_flag |= C_HARDLINK;
653 }
654 /*
655 * Fix-up dir link counts.
656 *
657 * Earlier versions of Leopard used ca_linkcount for posix
658 * nlink support (effectively the sub-directory count + 2).
659 * That is now accomplished using the ca_dircount field with
660 * the corresponding kHFSHasFolderCountMask flag.
661 *
662 * For directories the ca_linkcount is the true link count,
663 * tracking the number of actual hardlinks to a directory.
664 *
665 * We only do this if the mount has HFS_FOLDERCOUNT set;
666 * at the moment, we only set that for HFSX volumes.
667 */
668 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
669 (vtype == VDIR) &&
670 !(attrp->ca_recflags & kHFSHasFolderCountMask) &&
671 (cp->c_attr.ca_linkcount > 1)) {
672 if (cp->c_attr.ca_entries == 0)
673 cp->c_attr.ca_dircount = 0;
674 else
675 cp->c_attr.ca_dircount = cp->c_attr.ca_linkcount - 2;
676
677 cp->c_attr.ca_linkcount = 1;
678 cp->c_attr.ca_recflags |= kHFSHasFolderCountMask;
679 if ( !(hfsmp->hfs_flags & HFS_READ_ONLY) )
680 cp->c_flag |= C_MODIFIED;
681 }
682 #if QUOTA
683 if (hfsmp->hfs_flags & HFS_QUOTAS) {
684 for (i = 0; i < MAXQUOTAS; i++)
685 cp->c_dquot[i] = NODQUOT;
686 }
687 #endif /* QUOTA */
688 }
689
690 if (vtype == VDIR) {
691 if (cp->c_vp != NULL)
692 panic("hfs_getnewvnode: orphaned vnode (data)");
693 cvpp = &cp->c_vp;
694 } else {
695 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
696 panic("hfs_getnewvnode: bad ca_blocks (too small)");
697 /*
698 * Allocate and initialize a file fork...
699 */
700 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
701 M_HFSFORK, M_WAITOK);
702 fp->ff_cp = cp;
703 if (forkp)
704 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
705 else
706 bzero(&fp->ff_data, sizeof(struct cat_fork));
707 rl_init(&fp->ff_invalidranges);
708 fp->ff_sysfileinfo = 0;
709
710 if (wantrsrc) {
711 if (cp->c_rsrcfork != NULL)
712 panic("hfs_getnewvnode: orphaned rsrc fork");
713 if (cp->c_rsrc_vp != NULL)
714 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
715 cp->c_rsrcfork = fp;
716 cvpp = &cp->c_rsrc_vp;
717 if ( (tvp = cp->c_vp) != NULLVP )
718 cp->c_flag |= C_NEED_DVNODE_PUT;
719 } else {
720 if (cp->c_datafork != NULL)
721 panic("hfs_getnewvnode: orphaned data fork");
722 if (cp->c_vp != NULL)
723 panic("hfs_getnewvnode: orphaned vnode (data)");
724 cp->c_datafork = fp;
725 cvpp = &cp->c_vp;
726 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
727 cp->c_flag |= C_NEED_RVNODE_PUT;
728 }
729 }
730 if (tvp != NULLVP) {
731 /*
732 * grab an iocount on the vnode we weren't
733 * interested in (i.e. we want the resource fork
734 * but the cnode already has the data fork)
735 * to prevent it from being
736 * recycled by us when we call vnode_create
737 * which will result in a deadlock when we
738 * try to take the cnode lock in hfs_vnop_fsync or
739 * hfs_vnop_reclaim... vnode_get can be called here
740 * because we already hold the cnode lock which will
741 * prevent the vnode from changing identity until
742 * we drop it.. vnode_get will not block waiting for
743 * a change of state... however, it will return an
744 * error if the current iocount == 0 and we've already
745 * started to terminate the vnode... we don't need/want to
746 * grab an iocount in the case since we can't cause
747 * the fileystem to be re-entered on this thread for this vp
748 *
749 * the matching vnode_put will happen in hfs_unlock
750 * after we've dropped the cnode lock
751 */
752 if ( vnode_get(tvp) != 0)
753 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
754 }
755 vfsp.vnfs_mp = mp;
756 vfsp.vnfs_vtype = vtype;
757 vfsp.vnfs_str = "hfs";
758 if ((cp->c_flag & C_HARDLINK) && (vtype == VDIR)) {
759 vfsp.vnfs_dvp = NULL; /* no parent for me! */
760 vfsp.vnfs_cnp = NULL; /* no name for me! */
761 } else {
762 vfsp.vnfs_dvp = dvp;
763 vfsp.vnfs_cnp = cnp;
764 }
765 vfsp.vnfs_fsnode = cp;
766
767 /*
768 * Special Case HFS Standard VNOPs from HFS+, since
769 * HFS standard is readonly/deprecated as of 10.6
770 */
771
772 #if FIFO
773 if (vtype == VFIFO )
774 vfsp.vnfs_vops = hfs_fifoop_p;
775 else
776 #endif
777 if (vtype == VBLK || vtype == VCHR)
778 vfsp.vnfs_vops = hfs_specop_p;
779 else if (hfs_standard)
780 vfsp.vnfs_vops = hfs_std_vnodeop_p;
781 else
782 vfsp.vnfs_vops = hfs_vnodeop_p;
783
784 if (vtype == VBLK || vtype == VCHR)
785 vfsp.vnfs_rdev = attrp->ca_rdev;
786 else
787 vfsp.vnfs_rdev = 0;
788
789 if (forkp)
790 vfsp.vnfs_filesize = forkp->cf_size;
791 else
792 vfsp.vnfs_filesize = 0;
793
794 vfsp.vnfs_flags = VNFS_ADDFSREF;
795 if (dvp == NULLVP || cnp == NULL || !(cnp->cn_flags & MAKEENTRY))
796 vfsp.vnfs_flags |= VNFS_NOCACHE;
797
798 /* Tag system files */
799 vfsp.vnfs_marksystem = issystemfile;
800
801 /* Tag root directory */
802 if (descp->cd_cnid == kHFSRootFolderID)
803 vfsp.vnfs_markroot = 1;
804 else
805 vfsp.vnfs_markroot = 0;
806
807 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
808 if (fp) {
809 if (fp == cp->c_datafork)
810 cp->c_datafork = NULL;
811 else
812 cp->c_rsrcfork = NULL;
813
814 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
815 }
816 /*
817 * If this is a newly created cnode or a vnode reclaim
818 * occurred during the attachment, then cleanup the cnode.
819 */
820 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
821 hfs_chash_abort(hfsmp, cp);
822 hfs_reclaim_cnode(cp);
823 }
824 else {
825 hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_ATTACH);
826 if ((flags & GNV_SKIPLOCK) == 0){
827 hfs_unlock(cp);
828 }
829 }
830 *vpp = NULL;
831 return (retval);
832 }
833 vp = *cvpp;
834 vnode_settag(vp, VT_HFS);
835 if (cp->c_flag & C_HARDLINK) {
836 vnode_setmultipath(vp);
837 }
838 /*
839 * Tag resource fork vnodes as needing an VNOP_INACTIVE
840 * so that any deferred removes (open unlinked files)
841 * have the chance to process the resource fork.
842 */
843 if (VNODE_IS_RSRC(vp)) {
844 int err;
845 KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 37)), cp->c_vp, cp->c_rsrc_vp, 0, 0, 0);
846
847 /* Force VL_NEEDINACTIVE on this vnode */
848 err = vnode_ref(vp);
849 if (err == 0) {
850 vnode_rele(vp);
851 }
852 }
853 hfs_chashwakeup(hfsmp, cp, H_ALLOC | H_ATTACH);
854
855 /*
856 * Stop tracking an active hot file.
857 */
858 if (!(flags & GNV_CREATE) && (vtype != VDIR) && !issystemfile) {
859 (void) hfs_removehotfile(vp);
860 }
861
862 *vpp = vp;
863 return (0);
864 }
865
866
867 static void
868 hfs_reclaim_cnode(struct cnode *cp)
869 {
870 #if QUOTA
871 int i;
872
873 for (i = 0; i < MAXQUOTAS; i++) {
874 if (cp->c_dquot[i] != NODQUOT) {
875 dqreclaim(cp->c_dquot[i]);
876 cp->c_dquot[i] = NODQUOT;
877 }
878 }
879 #endif /* QUOTA */
880
881 /*
882 * If the descriptor has a name then release it
883 */
884 if ((cp->c_desc.cd_flags & CD_HASBUF) && (cp->c_desc.cd_nameptr != 0)) {
885 const char *nameptr;
886
887 nameptr = (const char *) cp->c_desc.cd_nameptr;
888 cp->c_desc.cd_nameptr = 0;
889 cp->c_desc.cd_flags &= ~CD_HASBUF;
890 cp->c_desc.cd_namelen = 0;
891 vfs_removename(nameptr);
892 }
893
894 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
895 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
896 #if HFS_COMPRESSION
897 if (cp->c_decmp) {
898 decmpfs_cnode_destroy(cp->c_decmp);
899 FREE_ZONE(cp->c_decmp, sizeof(*(cp->c_decmp)), M_DECMPFS_CNODE);
900 }
901 #endif
902 bzero(cp, sizeof(struct cnode));
903 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
904 }
905
906
907 __private_extern__
908 int
909 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
910 {
911 struct cat_attr attr;
912 struct cat_desc cndesc;
913 int stillvalid = 0;
914 int lockflags;
915
916 /* System files are always valid */
917 if (cnid < kHFSFirstUserCatalogNodeID)
918 return (1);
919
920 /* XXX optimization: check write count in dvp */
921
922 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
923
924 if (dvp && cnp) {
925 bzero(&cndesc, sizeof(cndesc));
926 cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
927 cndesc.cd_namelen = cnp->cn_namelen;
928 cndesc.cd_parentcnid = VTOC(dvp)->c_fileid;
929 cndesc.cd_hint = VTOC(dvp)->c_childhint;
930
931 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
932 (cnid == attr.ca_fileid)) {
933 stillvalid = 1;
934 }
935 } else {
936 if (cat_idlookup(hfsmp, cnid, 0, NULL, NULL, NULL) == 0) {
937 stillvalid = 1;
938 }
939 }
940 hfs_systemfile_unlock(hfsmp, lockflags);
941
942 return (stillvalid);
943 }
944
945 /*
946 * Touch cnode times based on c_touch_xxx flags
947 *
948 * cnode must be locked exclusive
949 *
950 * This will also update the volume modify time
951 */
952 __private_extern__
953 void
954 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
955 {
956 /* don't modify times if volume is read-only */
957 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
958 cp->c_touch_acctime = FALSE;
959 cp->c_touch_chgtime = FALSE;
960 cp->c_touch_modtime = FALSE;
961 }
962 else if (hfsmp->hfs_flags & HFS_STANDARD) {
963 /* HFS Standard doesn't support access times */
964 cp->c_touch_acctime = FALSE;
965 }
966
967 /*
968 * Skip access time updates if:
969 * . MNT_NOATIME is set
970 * . a file system freeze is in progress
971 * . a file system resize is in progress
972 * . the vnode associated with this cnode is marked for rapid aging
973 */
974 if (cp->c_touch_acctime) {
975 if ((vfs_flags(hfsmp->hfs_mp) & MNT_NOATIME) ||
976 (hfsmp->hfs_freezing_proc != NULL) ||
977 (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) ||
978 (cp->c_vp && vnode_israge(cp->c_vp)))
979 cp->c_touch_acctime = FALSE;
980 }
981 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
982 struct timeval tv;
983 int touchvol = 0;
984
985 microtime(&tv);
986
987 if (cp->c_touch_acctime) {
988 cp->c_atime = tv.tv_sec;
989 /*
990 * When the access time is the only thing changing
991 * then make sure its sufficiently newer before
992 * committing it to disk.
993 */
994 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
995 ATIME_ONDISK_ACCURACY)) {
996 cp->c_flag |= C_MODIFIED;
997 }
998 cp->c_touch_acctime = FALSE;
999 }
1000 if (cp->c_touch_modtime) {
1001 cp->c_mtime = tv.tv_sec;
1002 cp->c_touch_modtime = FALSE;
1003 cp->c_flag |= C_MODIFIED;
1004 touchvol = 1;
1005 #if 1
1006 /*
1007 * HFS dates that WE set must be adjusted for DST
1008 */
1009 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
1010 cp->c_mtime += 3600;
1011 }
1012 #endif
1013 }
1014 if (cp->c_touch_chgtime) {
1015 cp->c_ctime = tv.tv_sec;
1016 cp->c_touch_chgtime = FALSE;
1017 cp->c_flag |= C_MODIFIED;
1018 touchvol = 1;
1019 }
1020
1021 /* Touch the volume modtime if needed */
1022 if (touchvol) {
1023 MarkVCBDirty(hfsmp);
1024 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
1025 }
1026 }
1027 }
1028
1029 /*
1030 * Lock a cnode.
1031 */
1032 __private_extern__
1033 int
1034 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
1035 {
1036 void * thread = current_thread();
1037
1038 if (cp->c_lockowner == thread) {
1039 /*
1040 * Only the extents and bitmap file's support lock recursion.
1041 */
1042 if ((cp->c_fileid == kHFSExtentsFileID) ||
1043 (cp->c_fileid == kHFSAllocationFileID)) {
1044 cp->c_syslockcount++;
1045 } else {
1046 panic("hfs_lock: locking against myself!");
1047 }
1048 } else if (locktype == HFS_SHARED_LOCK) {
1049 lck_rw_lock_shared(&cp->c_rwlock);
1050 cp->c_lockowner = HFS_SHARED_OWNER;
1051
1052 } else /* HFS_EXCLUSIVE_LOCK */ {
1053 lck_rw_lock_exclusive(&cp->c_rwlock);
1054 cp->c_lockowner = thread;
1055
1056 /*
1057 * Only the extents and bitmap file's support lock recursion.
1058 */
1059 if ((cp->c_fileid == kHFSExtentsFileID) ||
1060 (cp->c_fileid == kHFSAllocationFileID)) {
1061 cp->c_syslockcount = 1;
1062 }
1063 }
1064
1065 #ifdef HFS_CHECK_LOCK_ORDER
1066 /*
1067 * Regular cnodes (non-system files) cannot be locked
1068 * while holding the journal lock or a system file lock.
1069 */
1070 if (!(cp->c_desc.cd_flags & CD_ISMETA) &&
1071 ((cp->c_fileid > kHFSFirstUserCatalogNodeID) || (cp->c_fileid == kHFSRootFolderID))) {
1072 vnode_t vp = NULLVP;
1073
1074 /* Find corresponding vnode. */
1075 if (cp->c_vp != NULLVP && VTOC(cp->c_vp) == cp) {
1076 vp = cp->c_vp;
1077 } else if (cp->c_rsrc_vp != NULLVP && VTOC(cp->c_rsrc_vp) == cp) {
1078 vp = cp->c_rsrc_vp;
1079 }
1080 if (vp != NULLVP) {
1081 struct hfsmount *hfsmp = VTOHFS(vp);
1082
1083 if (hfsmp->jnl && (journal_owner(hfsmp->jnl) == thread)) {
1084 /* This will eventually be a panic here. */
1085 printf("hfs_lock: bad lock order (cnode after journal)\n");
1086 }
1087 if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) {
1088 panic("hfs_lock: bad lock order (cnode after catalog)");
1089 }
1090 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) {
1091 panic("hfs_lock: bad lock order (cnode after attribute)");
1092 }
1093 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) {
1094 panic("hfs_lock: bad lock order (cnode after extents)");
1095 }
1096 }
1097 }
1098 #endif /* HFS_CHECK_LOCK_ORDER */
1099
1100 /*
1101 * Skip cnodes that no longer exist (were deleted).
1102 */
1103 if ((locktype != HFS_FORCE_LOCK) &&
1104 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
1105 (cp->c_flag & C_NOEXISTS)) {
1106 hfs_unlock(cp);
1107 return (ENOENT);
1108 }
1109 return (0);
1110 }
1111
1112 /*
1113 * Lock a pair of cnodes.
1114 */
1115 __private_extern__
1116 int
1117 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
1118 {
1119 struct cnode *first, *last;
1120 int error;
1121
1122 /*
1123 * If cnodes match then just lock one.
1124 */
1125 if (cp1 == cp2) {
1126 return hfs_lock(cp1, locktype);
1127 }
1128
1129 /*
1130 * Lock in cnode address order.
1131 */
1132 if (cp1 < cp2) {
1133 first = cp1;
1134 last = cp2;
1135 } else {
1136 first = cp2;
1137 last = cp1;
1138 }
1139
1140 if ( (error = hfs_lock(first, locktype))) {
1141 return (error);
1142 }
1143 if ( (error = hfs_lock(last, locktype))) {
1144 hfs_unlock(first);
1145 return (error);
1146 }
1147 return (0);
1148 }
1149
1150 /*
1151 * Check ordering of two cnodes. Return true if they are are in-order.
1152 */
1153 static int
1154 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
1155 {
1156 if (cp1 == cp2)
1157 return (0);
1158 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
1159 return (1);
1160 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
1161 return (0);
1162 /*
1163 * Locking order is cnode address order.
1164 */
1165 return (cp1 < cp2);
1166 }
1167
1168 /*
1169 * Acquire 4 cnode locks.
1170 * - locked in cnode address order (lesser address first).
1171 * - all or none of the locks are taken
1172 * - only one lock taken per cnode (dup cnodes are skipped)
1173 * - some of the cnode pointers may be null
1174 */
1175 __private_extern__
1176 int
1177 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
1178 struct cnode *cp4, enum hfslocktype locktype, struct cnode **error_cnode)
1179 {
1180 struct cnode * a[3];
1181 struct cnode * b[3];
1182 struct cnode * list[4];
1183 struct cnode * tmp;
1184 int i, j, k;
1185 int error;
1186 if (error_cnode) {
1187 *error_cnode = NULL;
1188 }
1189
1190 if (hfs_isordered(cp1, cp2)) {
1191 a[0] = cp1; a[1] = cp2;
1192 } else {
1193 a[0] = cp2; a[1] = cp1;
1194 }
1195 if (hfs_isordered(cp3, cp4)) {
1196 b[0] = cp3; b[1] = cp4;
1197 } else {
1198 b[0] = cp4; b[1] = cp3;
1199 }
1200 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
1201 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
1202
1203 /*
1204 * Build the lock list, skipping over duplicates
1205 */
1206 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
1207 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
1208 if (k == 0 || tmp != list[k-1])
1209 list[k++] = tmp;
1210 }
1211
1212 /*
1213 * Now we can lock using list[0 - k].
1214 * Skip over NULL entries.
1215 */
1216 for (i = 0; i < k; ++i) {
1217 if (list[i])
1218 if ((error = hfs_lock(list[i], locktype))) {
1219 /* Only stuff error_cnode if requested */
1220 if (error_cnode) {
1221 *error_cnode = list[i];
1222 }
1223 /* Drop any locks we acquired. */
1224 while (--i >= 0) {
1225 if (list[i])
1226 hfs_unlock(list[i]);
1227 }
1228 return (error);
1229 }
1230 }
1231 return (0);
1232 }
1233
1234
1235 /*
1236 * Unlock a cnode.
1237 */
1238 __private_extern__
1239 void
1240 hfs_unlock(struct cnode *cp)
1241 {
1242 vnode_t rvp = NULLVP;
1243 vnode_t vp = NULLVP;
1244 u_int32_t c_flag;
1245 void *lockowner;
1246
1247 /*
1248 * Only the extents and bitmap file's support lock recursion.
1249 */
1250 if ((cp->c_fileid == kHFSExtentsFileID) ||
1251 (cp->c_fileid == kHFSAllocationFileID)) {
1252 if (--cp->c_syslockcount > 0) {
1253 return;
1254 }
1255 }
1256 c_flag = cp->c_flag;
1257 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
1258
1259 if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
1260 vp = cp->c_vp;
1261 }
1262 if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
1263 rvp = cp->c_rsrc_vp;
1264 }
1265
1266 lockowner = cp->c_lockowner;
1267 if (lockowner == current_thread()) {
1268 cp->c_lockowner = NULL;
1269 lck_rw_unlock_exclusive(&cp->c_rwlock);
1270 } else {
1271 lck_rw_unlock_shared(&cp->c_rwlock);
1272 }
1273
1274 /* Perform any vnode post processing after cnode lock is dropped. */
1275 if (vp) {
1276 if (c_flag & C_NEED_DATA_SETSIZE)
1277 ubc_setsize(vp, 0);
1278 if (c_flag & C_NEED_DVNODE_PUT)
1279 vnode_put(vp);
1280 }
1281 if (rvp) {
1282 if (c_flag & C_NEED_RSRC_SETSIZE)
1283 ubc_setsize(rvp, 0);
1284 if (c_flag & C_NEED_RVNODE_PUT)
1285 vnode_put(rvp);
1286 }
1287 }
1288
1289 /*
1290 * Unlock a pair of cnodes.
1291 */
1292 __private_extern__
1293 void
1294 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1295 {
1296 hfs_unlock(cp1);
1297 if (cp2 != cp1)
1298 hfs_unlock(cp2);
1299 }
1300
1301 /*
1302 * Unlock a group of cnodes.
1303 */
1304 __private_extern__
1305 void
1306 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1307 {
1308 struct cnode * list[4];
1309 int i, k = 0;
1310
1311 if (cp1) {
1312 hfs_unlock(cp1);
1313 list[k++] = cp1;
1314 }
1315 if (cp2) {
1316 for (i = 0; i < k; ++i) {
1317 if (list[i] == cp2)
1318 goto skip1;
1319 }
1320 hfs_unlock(cp2);
1321 list[k++] = cp2;
1322 }
1323 skip1:
1324 if (cp3) {
1325 for (i = 0; i < k; ++i) {
1326 if (list[i] == cp3)
1327 goto skip2;
1328 }
1329 hfs_unlock(cp3);
1330 list[k++] = cp3;
1331 }
1332 skip2:
1333 if (cp4) {
1334 for (i = 0; i < k; ++i) {
1335 if (list[i] == cp4)
1336 return;
1337 }
1338 hfs_unlock(cp4);
1339 }
1340 }
1341
1342
1343 /*
1344 * Protect a cnode against a truncation.
1345 *
1346 * Used mainly by read/write since they don't hold the
1347 * cnode lock across calls to the cluster layer.
1348 *
1349 * The process doing a truncation must take the lock
1350 * exclusive. The read/write processes can take it
1351 * non-exclusive.
1352 */
1353 __private_extern__
1354 void
1355 hfs_lock_truncate(struct cnode *cp, int exclusive)
1356 {
1357 #ifdef HFS_CHECK_LOCK_ORDER
1358 if (cp->c_lockowner == current_thread())
1359 panic("hfs_lock_truncate: cnode %p locked!", cp);
1360 #endif /* HFS_CHECK_LOCK_ORDER */
1361
1362 if (exclusive)
1363 lck_rw_lock_exclusive(&cp->c_truncatelock);
1364 else
1365 lck_rw_lock_shared(&cp->c_truncatelock);
1366 }
1367
1368 __private_extern__
1369 void
1370 hfs_unlock_truncate(struct cnode *cp, int exclusive)
1371 {
1372 if (exclusive) {
1373 lck_rw_unlock_exclusive(&cp->c_truncatelock);
1374 } else {
1375 lck_rw_unlock_shared(&cp->c_truncatelock);
1376 }
1377 }
1378
1379
1380
1381