2 * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <sys/param.h>
29 #include <sys/systm.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
40 #include <kern/locks.h>
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
52 extern lck_attr_t
* hfs_lock_attr
;
53 extern lck_grp_t
* hfs_mutex_group
;
54 extern lck_grp_t
* hfs_rwlock_group
;
56 static int hfs_filedone(struct vnode
*vp
, vfs_context_t context
);
58 static void hfs_reclaim_cnode(struct cnode
*);
60 static int hfs_isordered(struct cnode
*, struct cnode
*);
62 inline int hfs_checkdeleted (struct cnode
*cp
) {
63 return ((cp
->c_flag
& (C_DELETED
| C_NOEXISTS
)) ? ENOENT
: 0);
68 * Last reference to an cnode. If necessary, write or delete it.
72 hfs_vnop_inactive(struct vnop_inactive_args
*ap
)
74 struct vnode
*vp
= ap
->a_vp
;
76 struct hfsmount
*hfsmp
= VTOHFS(vp
);
77 struct proc
*p
= vfs_context_proc(ap
->a_context
);
83 int took_trunc_lock
= 0;
89 v_type
= vnode_vtype(vp
);
92 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) || vnode_issystem(vp
) ||
93 (hfsmp
->hfs_freezing_proc
== p
)) {
98 * Ignore nodes related to stale file handles.
99 * We are peeking at the cnode flag without the lock, but if C_NOEXISTS
100 * is set, that means the cnode doesn't have any backing store in the
101 * catalog anymore, and is otherwise safe to force a recycle
104 if (cp
->c_flag
& C_NOEXISTS
) {
109 if ((v_type
== VREG
|| v_type
== VLNK
)) {
110 hfs_lock_truncate(cp
, TRUE
);
114 (void) hfs_lock(cp
, HFS_FORCE_LOCK
);
122 * We should lock cnode before checking the flags in the
123 * condition below and should unlock the cnode before calling
124 * ubc_setsize() as cluster code can call other HFS vnops which
125 * will try to acquire the same cnode lock and cause deadlock.
126 * Only call ubc_setsize to 0 if we are the last fork.
128 if ((v_type
== VREG
|| v_type
== VLNK
) &&
129 (cp
->c_flag
& C_DELETED
) &&
130 (VTOF(vp
)->ff_blocks
!= 0) && (forkcount
== 1)) {
133 (void) hfs_lock(cp
, HFS_FORCE_LOCK
);
136 if (v_type
== VREG
&& !ISSET(cp
->c_flag
, C_DELETED
) && VTOF(vp
)->ff_blocks
) {
137 hfs_filedone(vp
, ap
->a_context
);
140 * Remove any directory hints or cached origins
142 if (v_type
== VDIR
) {
143 hfs_reldirhints(cp
, 0);
145 if (cp
->c_flag
& C_HARDLINK
) {
149 /* Hurry the recycling process along if we're an open-unlinked file */
150 if((v_type
== VREG
|| v_type
== VLNK
) && (cp
->c_flag
& C_DELETED
)) {
155 * This check is slightly complicated. We should only truncate data
156 * in very specific cases for open-unlinked files. This is because
157 * we want to ensure that the resource fork continues to be available
158 * if the caller has the data fork open. However, this is not symmetric;
159 * someone who has the resource fork open need not be able to access the data
160 * fork once the data fork has gone inactive.
162 * If we're the last fork, then we have cleaning up to do.
164 * A) last fork, and vp == c_vp
165 * Truncate away own fork dat. If rsrc fork is not in core, truncate it too.
167 * B) last fork, and vp == c_rsrc_vp
168 * Truncate ourselves, assume data fork has been cleaned due to C).
170 * If we're not the last fork, then things are a little different:
172 * C) not the last fork, vp == c_vp
173 * Truncate ourselves. Once the file has gone out of the namespace,
174 * it cannot be further opened. Further access to the rsrc fork may
177 * D) not the last fork, vp == c_rsrc_vp
178 * Don't enter the block below, just clean up vnode and push it out of core.
181 if ((v_type
== VREG
|| v_type
== VLNK
) && (cp
->c_flag
& C_DELETED
) &&
182 ((forkcount
== 1) || (!VNODE_IS_RSRC(vp
)))) {
183 if (VTOF(vp
)->ff_blocks
!= 0) {
185 * Since we're already inside a transaction,
186 * tell hfs_truncate to skip the ubc_setsize.
188 error
= hfs_truncate(vp
, (off_t
)0, IO_NDELAY
, 1, 0, ap
->a_context
);
195 * If c_blocks > 0 and we are the last fork (data fork), then
196 * we can go and and truncate away the rsrc fork blocks if
197 * they were not in core.
199 if ((cp
->c_blocks
> 0) && (forkcount
== 1) && (vp
!= cp
->c_rsrc_vp
)) {
200 struct vnode
*rvp
= NULLVP
;
202 error
= hfs_vgetrsrc(hfsmp
, vp
, &rvp
, FALSE
, FALSE
);
206 * Defer the vnode_put and ubc_setsize on rvp until hfs_unlock().
208 cp
->c_flag
|= C_NEED_RVNODE_PUT
| C_NEED_RSRC_SETSIZE
;
209 error
= hfs_truncate(rvp
, (off_t
)0, IO_NDELAY
, 1, 0, ap
->a_context
);
212 vnode_recycle(rvp
); /* all done with this vnode */
216 // If needed, get rid of any xattrs that this file (or directory) may have.
217 // Note that this must happen outside of any other transactions
218 // because it starts/ends its own transactions and grabs its
219 // own locks. This is to prevent a file with a lot of attributes
220 // from creating a transaction that is too large (which panics).
222 if ((cp
->c_attr
.ca_recflags
& kHFSHasAttributesMask
) != 0 &&
223 (cp
->c_flag
& C_DELETED
) && (forkcount
<= 1)) {
224 hfs_removeallattr(hfsmp
, cp
->c_fileid
);
228 * Check for a postponed deletion.
229 * (only delete cnode when the last fork goes inactive)
231 if ((cp
->c_flag
& C_DELETED
) && (forkcount
<= 1)) {
233 * Mark cnode in transit so that no one can get this
234 * cnode from cnode hash.
236 // hfs_chash_mark_in_transit(hfsmp, cp);
237 // XXXdbg - remove the cnode from the hash table since it's deleted
238 // otherwise someone could go to sleep on the cnode and not
239 // be woken up until this vnode gets recycled which could be
240 // a very long time...
241 hfs_chashremove(hfsmp
, cp
);
243 cp
->c_flag
|= C_NOEXISTS
; // XXXdbg
246 if (started_tr
== 0) {
247 if (hfs_start_transaction(hfsmp
) != 0) {
255 * Reserve some space in the Catalog file.
257 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
262 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
, HFS_EXCLUSIVE_LOCK
);
264 if (cp
->c_blocks
> 0) {
265 printf("hfs_inactive: deleting non-empty%sfile %d, "
266 "blks %d\n", VNODE_IS_RSRC(vp
) ? " rsrc " : " ",
267 (int)cp
->c_fileid
, (int)cp
->c_blocks
);
271 // release the name pointer in the descriptor so that
272 // cat_delete() will use the file-id to do the deletion.
273 // in the case of hard links this is imperative (in the
274 // case of regular files the fileid and cnid are the
275 // same so it doesn't matter).
277 cat_releasedesc(&cp
->c_desc
);
280 * The descriptor name may be zero,
281 * in which case the fileid is used.
283 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
285 if (error
&& truncated
&& (error
!= ENXIO
))
286 printf("hfs_inactive: couldn't delete a truncated file!");
288 /* Update HFS Private Data dir */
290 hfsmp
->hfs_private_attr
[FILE_HARDLINKS
].ca_entries
--;
291 if (vnode_isdir(vp
)) {
292 DEC_FOLDERCOUNT(hfsmp
, hfsmp
->hfs_private_attr
[FILE_HARDLINKS
]);
294 (void)cat_update(hfsmp
, &hfsmp
->hfs_private_desc
[FILE_HARDLINKS
],
295 &hfsmp
->hfs_private_attr
[FILE_HARDLINKS
], NULL
, NULL
);
298 hfs_systemfile_unlock(hfsmp
, lockflags
);
304 if (hfsmp
->hfs_flags
& HFS_QUOTAS
)
305 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
308 /* Already set C_NOEXISTS at the beginning of this block */
309 cp
->c_flag
&= ~C_DELETED
;
310 cp
->c_touch_chgtime
= TRUE
;
311 cp
->c_touch_modtime
= TRUE
;
314 hfs_volupdate(hfsmp
, (v_type
== VDIR
) ? VOL_RMDIR
: VOL_RMFILE
, 0);
318 * A file may have had delayed allocations, in which case hfs_update
319 * would not have updated the catalog record (cat_update). We need
320 * to do that now, before we lose our fork data. We also need to
321 * force the update, or hfs_update will again skip the cat_update.
323 if ((cp
->c_flag
& C_MODIFIED
) ||
324 cp
->c_touch_acctime
|| cp
->c_touch_chgtime
|| cp
->c_touch_modtime
) {
325 if ((cp
->c_flag
& C_MODIFIED
) || cp
->c_touch_modtime
){
326 cp
->c_flag
|= C_FORCEUPDATE
;
332 cat_postflight(hfsmp
, &cookie
, p
);
334 // XXXdbg - have to do this because a goto could have come here
336 hfs_end_transaction(hfsmp
);
340 * This has been removed from the namespace and has no backing store
341 * in the catalog, so we should force a reclaim as soon as possible.
342 * Also, we want to check the flag while we still have the cnode lock.
344 if (cp
->c_flag
& C_NOEXISTS
)
350 hfs_unlock_truncate(cp
, TRUE
);
353 * If we are done with the vnode, reclaim it
354 * so that it can be reused immediately.
363 * File clean-up (zero fill and shrink peof).
366 hfs_filedone(struct vnode
*vp
, vfs_context_t context
)
370 struct hfsmount
*hfsmp
;
371 struct rl_entry
*invalid_range
;
373 u_int32_t blks
, blocksize
;
380 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) || (fp
->ff_blocks
== 0))
384 (void) cluster_push(vp
, IO_CLOSE
);
385 hfs_lock(cp
, HFS_FORCE_LOCK
);
388 * Explicitly zero out the areas of file
389 * that are currently marked invalid.
391 while ((invalid_range
= TAILQ_FIRST(&fp
->ff_invalidranges
))) {
392 off_t start
= invalid_range
->rl_start
;
393 off_t end
= invalid_range
->rl_end
;
395 /* The range about to be written must be validated
396 * first, so that VNOP_BLOCKMAP() will return the
397 * appropriate mapping for the cluster code:
399 rl_remove(start
, end
, &fp
->ff_invalidranges
);
402 (void) cluster_write(vp
, (struct uio
*) 0,
403 leof
, end
+ 1, start
, (off_t
)0,
404 IO_HEADZEROFILL
| IO_NOZERODIRTY
| IO_NOCACHE
);
405 hfs_lock(cp
, HFS_FORCE_LOCK
);
406 cp
->c_flag
|= C_MODIFIED
;
408 cp
->c_flag
&= ~C_ZFWANTSYNC
;
410 blocksize
= VTOVCB(vp
)->blockSize
;
411 blks
= leof
/ blocksize
;
412 if (((off_t
)blks
* (off_t
)blocksize
) != leof
)
415 * Shrink the peof to the smallest size neccessary to contain the leof.
417 if (blks
< fp
->ff_blocks
)
418 (void) hfs_truncate(vp
, leof
, IO_NDELAY
, 0, 0, context
);
420 (void) cluster_push(vp
, IO_CLOSE
);
421 hfs_lock(cp
, HFS_FORCE_LOCK
);
424 * If the hfs_truncate didn't happen to flush the vnode's
425 * information out to disk, force it to be updated now that
426 * all invalid ranges have been zero-filled and validated:
428 if (cp
->c_flag
& C_MODIFIED
) {
436 * Reclaim a cnode so that it can be used for other purposes.
440 hfs_vnop_reclaim(struct vnop_reclaim_args
*ap
)
442 struct vnode
*vp
= ap
->a_vp
;
444 struct filefork
*fp
= NULL
;
445 struct filefork
*altfp
= NULL
;
446 struct hfsmount
*hfsmp
= VTOHFS(vp
);
447 int reclaim_cnode
= 0;
449 (void) hfs_lock(VTOC(vp
), HFS_FORCE_LOCK
);
453 * A file may have had delayed allocations, in which case hfs_update
454 * would not have updated the catalog record (cat_update). We need
455 * to do that now, before we lose our fork data. We also need to
456 * force the update, or hfs_update will again skip the cat_update.
458 if ((cp
->c_flag
& C_MODIFIED
) ||
459 cp
->c_touch_acctime
|| cp
->c_touch_chgtime
|| cp
->c_touch_modtime
) {
460 if ((cp
->c_flag
& C_MODIFIED
) || cp
->c_touch_modtime
){
461 cp
->c_flag
|= C_FORCEUPDATE
;
467 * Keep track of an inactive hot file.
469 if (!vnode_isdir(vp
) &&
470 !vnode_issystem(vp
) &&
471 !(cp
->c_flag
& (C_DELETED
| C_NOEXISTS
)) ) {
472 (void) hfs_addhotfile(vp
);
474 vnode_removefsref(vp
);
477 * Find file fork for this vnode (if any)
478 * Also check if another fork is active
480 if (cp
->c_vp
== vp
) {
482 altfp
= cp
->c_rsrcfork
;
484 cp
->c_datafork
= NULL
;
486 } else if (cp
->c_rsrc_vp
== vp
) {
488 altfp
= cp
->c_datafork
;
490 cp
->c_rsrcfork
= NULL
;
491 cp
->c_rsrc_vp
= NULL
;
493 panic("hfs_vnop_reclaim: vp points to wrong cnode (vp=%p cp->c_vp=%p cp->c_rsrc_vp=%p)\n", vp
, cp
->c_vp
, cp
->c_rsrc_vp
);
496 * On the last fork, remove the cnode from its hash chain.
499 /* If we can't remove it then the cnode must persist! */
500 if (hfs_chashremove(hfsmp
, cp
) == 0)
503 * Remove any directory hints
505 if (vnode_isdir(vp
)) {
506 hfs_reldirhints(cp
, 0);
509 if(cp
->c_flag
& C_HARDLINK
) {
513 /* Release the file fork and related data */
515 /* Dump cached symlink data */
516 if (vnode_islnk(vp
) && (fp
->ff_symlinkptr
!= NULL
)) {
517 FREE(fp
->ff_symlinkptr
, M_TEMP
);
519 FREE_ZONE(fp
, sizeof(struct filefork
), M_HFSFORK
);
523 * If there was only one active fork then we can release the cnode.
526 hfs_chashwakeup(hfsmp
, cp
, H_ALLOC
| H_TRANSIT
);
527 hfs_reclaim_cnode(cp
);
528 } else /* cnode in use */ {
532 vnode_clearfsnode(vp
);
537 extern int (**hfs_vnodeop_p
) (void *);
538 extern int (**hfs_std_vnodeop_p
) (void *);
539 extern int (**hfs_specop_p
) (void *);
541 extern int (**hfs_fifoop_p
) (void *);
545 * hfs_getnewvnode - get new default vnode
547 * The vnode is returned with an iocount and the cnode locked
552 struct hfsmount
*hfsmp
,
554 struct componentname
*cnp
,
555 struct cat_desc
*descp
,
557 struct cat_attr
*attrp
,
558 struct cat_fork
*forkp
,
561 struct mount
*mp
= HFSTOVFS(hfsmp
);
562 struct vnode
*vp
= NULL
;
564 struct vnode
*tvp
= NULLVP
;
565 struct cnode
*cp
= NULL
;
566 struct filefork
*fp
= NULL
;
567 int hfs_standard
= 0;
571 struct vnode_fsparam vfsp
;
577 hfs_standard
= (hfsmp
->hfs_flags
& HFS_STANDARD
);
579 if (attrp
->ca_fileid
== 0) {
585 if (IFTOVT(attrp
->ca_mode
) == VFIFO
) {
590 vtype
= IFTOVT(attrp
->ca_mode
);
591 issystemfile
= (descp
->cd_flags
& CD_ISMETA
) && (vtype
== VREG
);
592 wantrsrc
= flags
& GNV_WANTRSRC
;
594 #ifdef HFS_CHECK_LOCK_ORDER
596 * The only case were its permissible to hold the parent cnode
597 * lock is during a create operation (hfs_makenode) or when
598 * we don't need the cnode lock (GNV_SKIPLOCK).
601 (flags
& (GNV_CREATE
| GNV_SKIPLOCK
)) == 0 &&
602 VTOC(dvp
)->c_lockowner
== current_thread()) {
603 panic("hfs_getnewvnode: unexpected hold of parent cnode %p", VTOC(dvp
));
605 #endif /* HFS_CHECK_LOCK_ORDER */
608 * Get a cnode (new or existing)
610 cp
= hfs_chash_getcnode(hfsmp
, attrp
->ca_fileid
, vpp
, wantrsrc
, (flags
& GNV_SKIPLOCK
));
613 * If the id is no longer valid for lookups we'll get back a NULL cp.
620 * Hardlinks may need an updated catalog descriptor. However, if
621 * the cnode has already been marked as open-unlinked (C_DELETED), then don't
622 * replace its descriptor.
624 if (!(hfs_checkdeleted(cp
))) {
625 if ((cp
->c_flag
& C_HARDLINK
) && descp
->cd_nameptr
&& descp
->cd_namelen
> 0) {
626 replace_desc(cp
, descp
);
629 /* Check if we found a matching vnode */
634 * If this is a new cnode then initialize it.
636 if (ISSET(cp
->c_hflag
, H_ALLOC
)) {
637 lck_rw_init(&cp
->c_truncatelock
, hfs_rwlock_group
, hfs_lock_attr
);
642 /* Make sure its still valid (ie exists on disk). */
643 if (!(flags
& GNV_CREATE
) &&
644 !hfs_valid_cnode(hfsmp
, dvp
, (wantrsrc
? NULL
: cnp
), cp
->c_fileid
)) {
645 hfs_chash_abort(hfsmp
, cp
);
646 hfs_reclaim_cnode(cp
);
650 bcopy(attrp
, &cp
->c_attr
, sizeof(struct cat_attr
));
651 bcopy(descp
, &cp
->c_desc
, sizeof(struct cat_desc
));
653 /* The name was inherited so clear descriptor state... */
654 descp
->cd_namelen
= 0;
655 descp
->cd_nameptr
= NULL
;
656 descp
->cd_flags
&= ~CD_HASBUF
;
659 if ((vtype
== VREG
|| vtype
== VDIR
) &&
660 ((descp
->cd_cnid
!= attrp
->ca_fileid
) ||
661 (attrp
->ca_recflags
& kHFSHasLinkChainMask
))) {
662 cp
->c_flag
|= C_HARDLINK
;
665 * Fix-up dir link counts.
667 * Earlier versions of Leopard used ca_linkcount for posix
668 * nlink support (effectively the sub-directory count + 2).
669 * That is now accomplished using the ca_dircount field with
670 * the corresponding kHFSHasFolderCountMask flag.
672 * For directories the ca_linkcount is the true link count,
673 * tracking the number of actual hardlinks to a directory.
675 * We only do this if the mount has HFS_FOLDERCOUNT set;
676 * at the moment, we only set that for HFSX volumes.
678 if ((hfsmp
->hfs_flags
& HFS_FOLDERCOUNT
) &&
680 !(attrp
->ca_recflags
& kHFSHasFolderCountMask
) &&
681 (cp
->c_attr
.ca_linkcount
> 1)) {
682 if (cp
->c_attr
.ca_entries
== 0)
683 cp
->c_attr
.ca_dircount
= 0;
685 cp
->c_attr
.ca_dircount
= cp
->c_attr
.ca_linkcount
- 2;
687 cp
->c_attr
.ca_linkcount
= 1;
688 cp
->c_attr
.ca_recflags
|= kHFSHasFolderCountMask
;
689 if ( !(hfsmp
->hfs_flags
& HFS_READ_ONLY
) )
690 cp
->c_flag
|= C_MODIFIED
;
693 if (hfsmp
->hfs_flags
& HFS_QUOTAS
) {
694 for (i
= 0; i
< MAXQUOTAS
; i
++)
695 cp
->c_dquot
[i
] = NODQUOT
;
701 if (cp
->c_vp
!= NULL
)
702 panic("hfs_getnewvnode: orphaned vnode (data)");
705 if (forkp
&& attrp
->ca_blocks
< forkp
->cf_blocks
)
706 panic("hfs_getnewvnode: bad ca_blocks (too small)");
708 * Allocate and initialize a file fork...
710 MALLOC_ZONE(fp
, struct filefork
*, sizeof(struct filefork
),
711 M_HFSFORK
, M_WAITOK
);
714 bcopy(forkp
, &fp
->ff_data
, sizeof(struct cat_fork
));
716 bzero(&fp
->ff_data
, sizeof(struct cat_fork
));
717 rl_init(&fp
->ff_invalidranges
);
718 fp
->ff_sysfileinfo
= 0;
721 if (cp
->c_rsrcfork
!= NULL
)
722 panic("hfs_getnewvnode: orphaned rsrc fork");
723 if (cp
->c_rsrc_vp
!= NULL
)
724 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
726 cvpp
= &cp
->c_rsrc_vp
;
727 if ( (tvp
= cp
->c_vp
) != NULLVP
)
728 cp
->c_flag
|= C_NEED_DVNODE_PUT
;
730 if (cp
->c_datafork
!= NULL
)
731 panic("hfs_getnewvnode: orphaned data fork");
732 if (cp
->c_vp
!= NULL
)
733 panic("hfs_getnewvnode: orphaned vnode (data)");
736 if ( (tvp
= cp
->c_rsrc_vp
) != NULLVP
)
737 cp
->c_flag
|= C_NEED_RVNODE_PUT
;
742 * grab an iocount on the vnode we weren't
743 * interested in (i.e. we want the resource fork
744 * but the cnode already has the data fork)
745 * to prevent it from being
746 * recycled by us when we call vnode_create
747 * which will result in a deadlock when we
748 * try to take the cnode lock in hfs_vnop_fsync or
749 * hfs_vnop_reclaim... vnode_get can be called here
750 * because we already hold the cnode lock which will
751 * prevent the vnode from changing identity until
752 * we drop it.. vnode_get will not block waiting for
753 * a change of state... however, it will return an
754 * error if the current iocount == 0 and we've already
755 * started to terminate the vnode... we don't need/want to
756 * grab an iocount in the case since we can't cause
757 * the fileystem to be re-entered on this thread for this vp
759 * the matching vnode_put will happen in hfs_unlock
760 * after we've dropped the cnode lock
762 if ( vnode_get(tvp
) != 0)
763 cp
->c_flag
&= ~(C_NEED_RVNODE_PUT
| C_NEED_DVNODE_PUT
);
766 vfsp
.vnfs_vtype
= vtype
;
767 vfsp
.vnfs_str
= "hfs";
768 if ((cp
->c_flag
& C_HARDLINK
) && (vtype
== VDIR
)) {
769 vfsp
.vnfs_dvp
= NULL
; /* no parent for me! */
770 vfsp
.vnfs_cnp
= NULL
; /* no name for me! */
775 vfsp
.vnfs_fsnode
= cp
;
778 * Special Case HFS Standard VNOPs from HFS+, since
779 * HFS standard is readonly/deprecated as of 10.6
784 vfsp
.vnfs_vops
= hfs_fifoop_p
;
787 if (vtype
== VBLK
|| vtype
== VCHR
)
788 vfsp
.vnfs_vops
= hfs_specop_p
;
789 else if (hfs_standard
)
790 vfsp
.vnfs_vops
= hfs_std_vnodeop_p
;
792 vfsp
.vnfs_vops
= hfs_vnodeop_p
;
794 if (vtype
== VBLK
|| vtype
== VCHR
)
795 vfsp
.vnfs_rdev
= attrp
->ca_rdev
;
800 vfsp
.vnfs_filesize
= forkp
->cf_size
;
802 vfsp
.vnfs_filesize
= 0;
804 vfsp
.vnfs_flags
= VNFS_ADDFSREF
;
805 if (dvp
== NULLVP
|| cnp
== NULL
|| !(cnp
->cn_flags
& MAKEENTRY
))
806 vfsp
.vnfs_flags
|= VNFS_NOCACHE
;
808 /* Tag system files */
809 vfsp
.vnfs_marksystem
= issystemfile
;
811 /* Tag root directory */
812 if (descp
->cd_cnid
== kHFSRootFolderID
)
813 vfsp
.vnfs_markroot
= 1;
815 vfsp
.vnfs_markroot
= 0;
817 if ((retval
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, cvpp
))) {
819 if (fp
== cp
->c_datafork
)
820 cp
->c_datafork
= NULL
;
822 cp
->c_rsrcfork
= NULL
;
824 FREE_ZONE(fp
, sizeof(struct filefork
), M_HFSFORK
);
827 * If this is a newly created cnode or a vnode reclaim
828 * occurred during the attachment, then cleanup the cnode.
830 if ((cp
->c_vp
== NULL
) && (cp
->c_rsrc_vp
== NULL
)) {
831 hfs_chash_abort(hfsmp
, cp
);
832 hfs_reclaim_cnode(cp
);
835 hfs_chashwakeup(hfsmp
, cp
, H_ALLOC
| H_ATTACH
);
836 if ((flags
& GNV_SKIPLOCK
) == 0){
844 vnode_settag(vp
, VT_HFS
);
845 if (cp
->c_flag
& C_HARDLINK
) {
846 vnode_setmultipath(vp
);
849 * Tag resource fork vnodes as needing an VNOP_INACTIVE
850 * so that any deferred removes (open unlinked files)
851 * have the chance to process the resource fork.
853 if (VNODE_IS_RSRC(vp
)) {
855 KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW
, 37)), cp
->c_vp
, cp
->c_rsrc_vp
, 0, 0, 0);
857 /* Force VL_NEEDINACTIVE on this vnode */
863 hfs_chashwakeup(hfsmp
, cp
, H_ALLOC
| H_ATTACH
);
866 * Stop tracking an active hot file.
868 if (!(flags
& GNV_CREATE
) && (vtype
!= VDIR
) && !issystemfile
) {
869 (void) hfs_removehotfile(vp
);
878 hfs_reclaim_cnode(struct cnode
*cp
)
883 for (i
= 0; i
< MAXQUOTAS
; i
++) {
884 if (cp
->c_dquot
[i
] != NODQUOT
) {
885 dqreclaim(cp
->c_dquot
[i
]);
886 cp
->c_dquot
[i
] = NODQUOT
;
892 * If the descriptor has a name then release it
894 if ((cp
->c_desc
.cd_flags
& CD_HASBUF
) && (cp
->c_desc
.cd_nameptr
!= 0)) {
897 nameptr
= (const char *) cp
->c_desc
.cd_nameptr
;
898 cp
->c_desc
.cd_nameptr
= 0;
899 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
900 cp
->c_desc
.cd_namelen
= 0;
901 vfs_removename(nameptr
);
904 lck_rw_destroy(&cp
->c_rwlock
, hfs_rwlock_group
);
905 lck_rw_destroy(&cp
->c_truncatelock
, hfs_rwlock_group
);
908 decmpfs_cnode_destroy(cp
->c_decmp
);
909 FREE_ZONE(cp
->c_decmp
, sizeof(*(cp
->c_decmp
)), M_DECMPFS_CNODE
);
912 bzero(cp
, sizeof(struct cnode
));
913 FREE_ZONE(cp
, sizeof(struct cnode
), M_HFSNODE
);
919 hfs_valid_cnode(struct hfsmount
*hfsmp
, struct vnode
*dvp
, struct componentname
*cnp
, cnid_t cnid
)
921 struct cat_attr attr
;
922 struct cat_desc cndesc
;
926 /* System files are always valid */
927 if (cnid
< kHFSFirstUserCatalogNodeID
)
930 /* XXX optimization: check write count in dvp */
932 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
935 bzero(&cndesc
, sizeof(cndesc
));
936 cndesc
.cd_nameptr
= (const u_int8_t
*)cnp
->cn_nameptr
;
937 cndesc
.cd_namelen
= cnp
->cn_namelen
;
938 cndesc
.cd_parentcnid
= VTOC(dvp
)->c_fileid
;
939 cndesc
.cd_hint
= VTOC(dvp
)->c_childhint
;
941 if ((cat_lookup(hfsmp
, &cndesc
, 0, NULL
, &attr
, NULL
, NULL
) == 0) &&
942 (cnid
== attr
.ca_fileid
)) {
946 if (cat_idlookup(hfsmp
, cnid
, 0, NULL
, NULL
, NULL
) == 0) {
950 hfs_systemfile_unlock(hfsmp
, lockflags
);
956 * Touch cnode times based on c_touch_xxx flags
958 * cnode must be locked exclusive
960 * This will also update the volume modify time
964 hfs_touchtimes(struct hfsmount
*hfsmp
, struct cnode
* cp
)
966 /* don't modify times if volume is read-only */
967 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
968 cp
->c_touch_acctime
= FALSE
;
969 cp
->c_touch_chgtime
= FALSE
;
970 cp
->c_touch_modtime
= FALSE
;
972 else if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
973 /* HFS Standard doesn't support access times */
974 cp
->c_touch_acctime
= FALSE
;
978 * Skip access time updates if:
979 * . MNT_NOATIME is set
980 * . a file system freeze is in progress
981 * . a file system resize is in progress
982 * . the vnode associated with this cnode is marked for rapid aging
984 if (cp
->c_touch_acctime
) {
985 if ((vfs_flags(hfsmp
->hfs_mp
) & MNT_NOATIME
) ||
986 (hfsmp
->hfs_freezing_proc
!= NULL
) ||
987 (hfsmp
->hfs_flags
& HFS_RESIZE_IN_PROGRESS
) ||
988 (cp
->c_vp
&& vnode_israge(cp
->c_vp
)))
989 cp
->c_touch_acctime
= FALSE
;
991 if (cp
->c_touch_acctime
|| cp
->c_touch_chgtime
|| cp
->c_touch_modtime
) {
997 if (cp
->c_touch_acctime
) {
998 cp
->c_atime
= tv
.tv_sec
;
1000 * When the access time is the only thing changing
1001 * then make sure its sufficiently newer before
1002 * committing it to disk.
1004 if ((((u_int32_t
)cp
->c_atime
- (u_int32_t
)(cp
)->c_attr
.ca_atimeondisk
) >
1005 ATIME_ONDISK_ACCURACY
)) {
1006 cp
->c_flag
|= C_MODIFIED
;
1008 cp
->c_touch_acctime
= FALSE
;
1010 if (cp
->c_touch_modtime
) {
1011 cp
->c_mtime
= tv
.tv_sec
;
1012 cp
->c_touch_modtime
= FALSE
;
1013 cp
->c_flag
|= C_MODIFIED
;
1017 * HFS dates that WE set must be adjusted for DST
1019 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) && gTimeZone
.tz_dsttime
) {
1020 cp
->c_mtime
+= 3600;
1024 if (cp
->c_touch_chgtime
) {
1025 cp
->c_ctime
= tv
.tv_sec
;
1026 cp
->c_touch_chgtime
= FALSE
;
1027 cp
->c_flag
|= C_MODIFIED
;
1031 /* Touch the volume modtime if needed */
1033 MarkVCBDirty(hfsmp
);
1034 HFSTOVCB(hfsmp
)->vcbLsMod
= tv
.tv_sec
;
1044 hfs_lock(struct cnode
*cp
, enum hfslocktype locktype
)
1046 void * thread
= current_thread();
1048 if (cp
->c_lockowner
== thread
) {
1050 * Only the extents and bitmap file's support lock recursion.
1052 if ((cp
->c_fileid
== kHFSExtentsFileID
) ||
1053 (cp
->c_fileid
== kHFSAllocationFileID
)) {
1054 cp
->c_syslockcount
++;
1056 panic("hfs_lock: locking against myself!");
1058 } else if (locktype
== HFS_SHARED_LOCK
) {
1059 lck_rw_lock_shared(&cp
->c_rwlock
);
1060 cp
->c_lockowner
= HFS_SHARED_OWNER
;
1062 } else /* HFS_EXCLUSIVE_LOCK */ {
1063 lck_rw_lock_exclusive(&cp
->c_rwlock
);
1064 cp
->c_lockowner
= thread
;
1067 * Only the extents and bitmap file's support lock recursion.
1069 if ((cp
->c_fileid
== kHFSExtentsFileID
) ||
1070 (cp
->c_fileid
== kHFSAllocationFileID
)) {
1071 cp
->c_syslockcount
= 1;
1075 #ifdef HFS_CHECK_LOCK_ORDER
1077 * Regular cnodes (non-system files) cannot be locked
1078 * while holding the journal lock or a system file lock.
1080 if (!(cp
->c_desc
.cd_flags
& CD_ISMETA
) &&
1081 ((cp
->c_fileid
> kHFSFirstUserCatalogNodeID
) || (cp
->c_fileid
== kHFSRootFolderID
))) {
1082 vnode_t vp
= NULLVP
;
1084 /* Find corresponding vnode. */
1085 if (cp
->c_vp
!= NULLVP
&& VTOC(cp
->c_vp
) == cp
) {
1087 } else if (cp
->c_rsrc_vp
!= NULLVP
&& VTOC(cp
->c_rsrc_vp
) == cp
) {
1091 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1093 if (hfsmp
->jnl
&& (journal_owner(hfsmp
->jnl
) == thread
)) {
1094 /* This will eventually be a panic here. */
1095 printf("hfs_lock: bad lock order (cnode after journal)\n");
1097 if (hfsmp
->hfs_catalog_cp
&& hfsmp
->hfs_catalog_cp
->c_lockowner
== thread
) {
1098 panic("hfs_lock: bad lock order (cnode after catalog)");
1100 if (hfsmp
->hfs_attribute_cp
&& hfsmp
->hfs_attribute_cp
->c_lockowner
== thread
) {
1101 panic("hfs_lock: bad lock order (cnode after attribute)");
1103 if (hfsmp
->hfs_extents_cp
&& hfsmp
->hfs_extents_cp
->c_lockowner
== thread
) {
1104 panic("hfs_lock: bad lock order (cnode after extents)");
1108 #endif /* HFS_CHECK_LOCK_ORDER */
1111 * Skip cnodes that no longer exist (were deleted).
1113 if ((locktype
!= HFS_FORCE_LOCK
) &&
1114 ((cp
->c_desc
.cd_flags
& CD_ISMETA
) == 0) &&
1115 (cp
->c_flag
& C_NOEXISTS
)) {
1123 * Lock a pair of cnodes.
1127 hfs_lockpair(struct cnode
*cp1
, struct cnode
*cp2
, enum hfslocktype locktype
)
1129 struct cnode
*first
, *last
;
1133 * If cnodes match then just lock one.
1136 return hfs_lock(cp1
, locktype
);
1140 * Lock in cnode address order.
1150 if ( (error
= hfs_lock(first
, locktype
))) {
1153 if ( (error
= hfs_lock(last
, locktype
))) {
1161 * Check ordering of two cnodes. Return true if they are are in-order.
1164 hfs_isordered(struct cnode
*cp1
, struct cnode
*cp2
)
1168 if (cp1
== NULL
|| cp2
== (struct cnode
*)0xffffffff)
1170 if (cp2
== NULL
|| cp1
== (struct cnode
*)0xffffffff)
1173 * Locking order is cnode address order.
1179 * Acquire 4 cnode locks.
1180 * - locked in cnode address order (lesser address first).
1181 * - all or none of the locks are taken
1182 * - only one lock taken per cnode (dup cnodes are skipped)
1183 * - some of the cnode pointers may be null
1187 hfs_lockfour(struct cnode
*cp1
, struct cnode
*cp2
, struct cnode
*cp3
,
1188 struct cnode
*cp4
, enum hfslocktype locktype
, struct cnode
**error_cnode
)
1190 struct cnode
* a
[3];
1191 struct cnode
* b
[3];
1192 struct cnode
* list
[4];
1197 *error_cnode
= NULL
;
1200 if (hfs_isordered(cp1
, cp2
)) {
1201 a
[0] = cp1
; a
[1] = cp2
;
1203 a
[0] = cp2
; a
[1] = cp1
;
1205 if (hfs_isordered(cp3
, cp4
)) {
1206 b
[0] = cp3
; b
[1] = cp4
;
1208 b
[0] = cp4
; b
[1] = cp3
;
1210 a
[2] = (struct cnode
*)0xffffffff; /* sentinel value */
1211 b
[2] = (struct cnode
*)0xffffffff; /* sentinel value */
1214 * Build the lock list, skipping over duplicates
1216 for (i
= 0, j
= 0, k
= 0; (i
< 2 || j
< 2); ) {
1217 tmp
= hfs_isordered(a
[i
], b
[j
]) ? a
[i
++] : b
[j
++];
1218 if (k
== 0 || tmp
!= list
[k
-1])
1223 * Now we can lock using list[0 - k].
1224 * Skip over NULL entries.
1226 for (i
= 0; i
< k
; ++i
) {
1228 if ((error
= hfs_lock(list
[i
], locktype
))) {
1229 /* Only stuff error_cnode if requested */
1231 *error_cnode
= list
[i
];
1233 /* Drop any locks we acquired. */
1236 hfs_unlock(list
[i
]);
1250 hfs_unlock(struct cnode
*cp
)
1252 vnode_t rvp
= NULLVP
;
1253 vnode_t vp
= NULLVP
;
1258 * Only the extents and bitmap file's support lock recursion.
1260 if ((cp
->c_fileid
== kHFSExtentsFileID
) ||
1261 (cp
->c_fileid
== kHFSAllocationFileID
)) {
1262 if (--cp
->c_syslockcount
> 0) {
1266 c_flag
= cp
->c_flag
;
1267 cp
->c_flag
&= ~(C_NEED_DVNODE_PUT
| C_NEED_RVNODE_PUT
| C_NEED_DATA_SETSIZE
| C_NEED_RSRC_SETSIZE
);
1269 if (c_flag
& (C_NEED_DVNODE_PUT
| C_NEED_DATA_SETSIZE
)) {
1272 if (c_flag
& (C_NEED_RVNODE_PUT
| C_NEED_RSRC_SETSIZE
)) {
1273 rvp
= cp
->c_rsrc_vp
;
1276 lockowner
= cp
->c_lockowner
;
1277 if (lockowner
== current_thread()) {
1278 cp
->c_lockowner
= NULL
;
1279 lck_rw_unlock_exclusive(&cp
->c_rwlock
);
1281 lck_rw_unlock_shared(&cp
->c_rwlock
);
1284 /* Perform any vnode post processing after cnode lock is dropped. */
1286 if (c_flag
& C_NEED_DATA_SETSIZE
)
1288 if (c_flag
& C_NEED_DVNODE_PUT
)
1292 if (c_flag
& C_NEED_RSRC_SETSIZE
)
1293 ubc_setsize(rvp
, 0);
1294 if (c_flag
& C_NEED_RVNODE_PUT
)
1300 * Unlock a pair of cnodes.
1304 hfs_unlockpair(struct cnode
*cp1
, struct cnode
*cp2
)
1312 * Unlock a group of cnodes.
1316 hfs_unlockfour(struct cnode
*cp1
, struct cnode
*cp2
, struct cnode
*cp3
, struct cnode
*cp4
)
1318 struct cnode
* list
[4];
1326 for (i
= 0; i
< k
; ++i
) {
1335 for (i
= 0; i
< k
; ++i
) {
1344 for (i
= 0; i
< k
; ++i
) {
1354 * Protect a cnode against a truncation.
1356 * Used mainly by read/write since they don't hold the
1357 * cnode lock across calls to the cluster layer.
1359 * The process doing a truncation must take the lock
1360 * exclusive. The read/write processes can take it
1365 hfs_lock_truncate(struct cnode
*cp
, int exclusive
)
1367 #ifdef HFS_CHECK_LOCK_ORDER
1368 if (cp
->c_lockowner
== current_thread())
1369 panic("hfs_lock_truncate: cnode %p locked!", cp
);
1370 #endif /* HFS_CHECK_LOCK_ORDER */
1373 lck_rw_lock_exclusive(&cp
->c_truncatelock
);
1375 lck_rw_lock_shared(&cp
->c_truncatelock
);
1380 hfs_unlock_truncate(struct cnode
*cp
, int exclusive
)
1383 lck_rw_unlock_exclusive(&cp
->c_truncatelock
);
1385 lck_rw_unlock_shared(&cp
->c_truncatelock
);