2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <sys/param.h>
29 #include <sys/systm.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
40 #include <kern/locks.h>
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
52 extern lck_attr_t
* hfs_lock_attr
;
53 extern lck_grp_t
* hfs_mutex_group
;
54 extern lck_grp_t
* hfs_rwlock_group
;
56 static int hfs_filedone(struct vnode
*vp
, vfs_context_t context
);
58 static void hfs_reclaim_cnode(struct cnode
*);
60 static int hfs_valid_cnode(struct hfsmount
*, struct vnode
*, struct componentname
*, cnid_t
);
62 static int hfs_isordered(struct cnode
*, struct cnode
*);
64 int hfs_vnop_inactive(struct vnop_inactive_args
*);
66 int hfs_vnop_reclaim(struct vnop_reclaim_args
*);
70 * Last reference to an cnode. If necessary, write or delete it.
74 hfs_vnop_inactive(struct vnop_inactive_args
*ap
)
76 struct vnode
*vp
= ap
->a_vp
;
78 struct hfsmount
*hfsmp
= VTOHFS(vp
);
79 struct proc
*p
= vfs_context_proc(ap
->a_context
);
85 int took_trunc_lock
= 0;
91 v_type
= vnode_vtype(vp
);
94 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) || vnode_issystem(vp
) ||
95 (hfsmp
->hfs_freezing_proc
== p
)) {
100 * Ignore nodes related to stale file handles.
102 if (cp
->c_mode
== 0) {
107 if ((v_type
== VREG
) &&
108 (ISSET(cp
->c_flag
, C_DELETED
) || VTOF(vp
)->ff_blocks
)) {
109 hfs_lock_truncate(cp
, TRUE
);
114 * We do the ubc_setsize before we take the cnode
115 * lock and before the hfs_truncate (since we'll
116 * be inside a transaction).
118 if ((v_type
== VREG
|| v_type
== VLNK
) &&
119 (cp
->c_flag
& C_DELETED
) &&
120 (VTOF(vp
)->ff_blocks
!= 0)) {
124 (void) hfs_lock(cp
, HFS_FORCE_LOCK
);
126 if (v_type
== VREG
&& !ISSET(cp
->c_flag
, C_DELETED
) && VTOF(vp
)->ff_blocks
) {
127 hfs_filedone(vp
, ap
->a_context
);
130 * Remove any directory hints
133 hfs_reldirhints(cp
, 0);
140 /* If needed, get rid of any fork's data for a deleted file */
141 if ((v_type
== VREG
|| v_type
== VLNK
) && (cp
->c_flag
& C_DELETED
)) {
142 if (VTOF(vp
)->ff_blocks
!= 0) {
143 // start the transaction out here so that
144 // the truncate and the removal of the file
145 // are all in one transaction. otherwise
146 // because this cnode is marked for deletion
147 // the truncate won't cause the catalog entry
148 // to get updated which means that we could
149 // free blocks but still keep a reference to
150 // them in the catalog entry and then double
153 // if (hfs_start_transaction(hfsmp) != 0) {
160 * Since we're already inside a transaction,
161 * tell hfs_truncate to skip the ubc_setsize.
163 error
= hfs_truncate(vp
, (off_t
)0, IO_NDELAY
, 1, ap
->a_context
);
172 * Check for a postponed deletion.
173 * (only delete cnode when the last fork goes inactive)
175 if ((cp
->c_flag
& C_DELETED
) && (forkcount
<= 1)) {
177 * Mark cnode in transit so that no one can get this
178 * cnode from cnode hash.
180 hfs_chash_mark_in_transit(cp
);
182 cp
->c_flag
&= ~C_DELETED
;
183 cp
->c_flag
|= C_NOEXISTS
; // XXXdbg
186 if (started_tr
== 0) {
187 if (hfs_start_transaction(hfsmp
) != 0) {
195 * Reserve some space in the Catalog file.
197 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
202 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
| SFL_ATTRIBUTE
, HFS_EXCLUSIVE_LOCK
);
204 if (cp
->c_blocks
> 0)
205 printf("hfs_inactive: attempting to delete a non-empty file!");
209 // release the name pointer in the descriptor so that
210 // cat_delete() will use the file-id to do the deletion.
211 // in the case of hard links this is imperative (in the
212 // case of regular files the fileid and cnid are the
213 // same so it doesn't matter).
215 cat_releasedesc(&cp
->c_desc
);
218 * The descriptor name may be zero,
219 * in which case the fileid is used.
221 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
223 if (error
&& truncated
&& (error
!= ENXIO
))
224 printf("hfs_inactive: couldn't delete a truncated file!");
226 /* Update HFS Private Data dir */
228 hfsmp
->hfs_privdir_attr
.ca_entries
--;
229 (void)cat_update(hfsmp
, &hfsmp
->hfs_privdir_desc
,
230 &hfsmp
->hfs_privdir_attr
, NULL
, NULL
);
234 /* Delete any attributes, ignore errors */
235 (void) hfs_removeallattr(hfsmp
, cp
->c_fileid
);
238 hfs_systemfile_unlock(hfsmp
, lockflags
);
244 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
248 cp
->c_flag
|= C_NOEXISTS
;
249 cp
->c_touch_chgtime
= TRUE
;
250 cp
->c_touch_modtime
= TRUE
;
253 hfs_volupdate(hfsmp
, VOL_RMFILE
, 0);
256 if ((cp
->c_flag
& C_MODIFIED
) ||
257 cp
->c_touch_acctime
|| cp
->c_touch_chgtime
|| cp
->c_touch_modtime
) {
262 cat_postflight(hfsmp
, &cookie
, p
);
264 // XXXdbg - have to do this because a goto could have come here
266 hfs_end_transaction(hfsmp
);
273 hfs_unlock_truncate(cp
);
276 * If we are done with the vnode, reclaim it
277 * so that it can be reused immediately.
279 if (cp
->c_mode
== 0 || recycle
)
286 * File clean-up (zero fill and shrink peof).
289 hfs_filedone(struct vnode
*vp
, vfs_context_t context
)
293 struct hfsmount
*hfsmp
;
295 u_long blks
, blocksize
;
302 if ((hfsmp
->hfs_flags
& HFS_READ_ONLY
) || (fp
->ff_blocks
== 0))
306 (void) cluster_push(vp
, IO_CLOSE
);
307 hfs_lock(cp
, HFS_FORCE_LOCK
);
310 * Explicitly zero out the areas of file
311 * that are currently marked invalid.
313 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
314 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
315 off_t start
= invalid_range
->rl_start
;
316 off_t end
= invalid_range
->rl_end
;
318 /* The range about to be written must be validated
319 * first, so that VNOP_BLOCKMAP() will return the
320 * appropriate mapping for the cluster code:
322 rl_remove(start
, end
, &fp
->ff_invalidranges
);
325 (void) cluster_write(vp
, (struct uio
*) 0,
326 leof
, end
+ 1, start
, (off_t
)0,
327 IO_HEADZEROFILL
| IO_NOZERODIRTY
| IO_NOCACHE
);
328 hfs_lock(cp
, HFS_FORCE_LOCK
);
329 cp
->c_flag
|= C_MODIFIED
;
331 cp
->c_flag
&= ~C_ZFWANTSYNC
;
333 blocksize
= VTOVCB(vp
)->blockSize
;
334 blks
= leof
/ blocksize
;
335 if (((off_t
)blks
* (off_t
)blocksize
) != leof
)
338 * Shrink the peof to the smallest size neccessary to contain the leof.
340 if (blks
< fp
->ff_blocks
)
341 (void) hfs_truncate(vp
, leof
, IO_NDELAY
, 0, context
);
343 (void) cluster_push(vp
, IO_CLOSE
);
344 hfs_lock(cp
, HFS_FORCE_LOCK
);
347 * If the hfs_truncate didn't happen to flush the vnode's
348 * information out to disk, force it to be updated now that
349 * all invalid ranges have been zero-filled and validated:
351 if (cp
->c_flag
& C_MODIFIED
) {
359 * Reclaim a cnode so that it can be used for other purposes.
363 hfs_vnop_reclaim(struct vnop_reclaim_args
*ap
)
365 struct vnode
*vp
= ap
->a_vp
;
367 struct filefork
*fp
= NULL
;
368 struct filefork
*altfp
= NULL
;
369 int reclaim_cnode
= 0;
371 (void) hfs_lock(VTOC(vp
), HFS_FORCE_LOCK
);
375 * Keep track of an inactive hot file.
377 if (!vnode_isdir(vp
) && !vnode_issystem(vp
))
378 (void) hfs_addhotfile(vp
);
380 vnode_removefsref(vp
);
383 * Find file fork for this vnode (if any)
384 * Also check if another fork is active
386 if (cp
->c_vp
== vp
) {
388 altfp
= cp
->c_rsrcfork
;
390 cp
->c_datafork
= NULL
;
392 } else if (cp
->c_rsrc_vp
== vp
) {
394 altfp
= cp
->c_datafork
;
396 cp
->c_rsrcfork
= NULL
;
397 cp
->c_rsrc_vp
= NULL
;
399 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
402 * On the last fork, remove the cnode from its hash chain.
405 /* If we can't remove it then the cnode must persist! */
406 if (hfs_chashremove(cp
) == 0)
409 * Remove any directory hints
411 if (vnode_isdir(vp
)) {
412 hfs_reldirhints(cp
, 0);
415 /* Release the file fork and related data */
417 /* Dump cached symlink data */
418 if (vnode_islnk(vp
) && (fp
->ff_symlinkptr
!= NULL
)) {
419 FREE(fp
->ff_symlinkptr
, M_TEMP
);
421 FREE_ZONE(fp
, sizeof(struct filefork
), M_HFSFORK
);
425 * If there was only one active fork then we can release the cnode.
428 hfs_chashwakeup(cp
, H_ALLOC
| H_TRANSIT
);
429 hfs_reclaim_cnode(cp
);
430 } else /* cnode in use */ {
434 vnode_clearfsnode(vp
);
439 extern int (**hfs_vnodeop_p
) (void *);
440 extern int (**hfs_specop_p
) (void *);
441 extern int (**hfs_fifoop_p
) (void *);
444 * hfs_getnewvnode - get new default vnode
446 * The vnode is returned with an iocount and the cnode locked
451 struct hfsmount
*hfsmp
,
453 struct componentname
*cnp
,
454 struct cat_desc
*descp
,
456 struct cat_attr
*attrp
,
457 struct cat_fork
*forkp
,
460 struct mount
*mp
= HFSTOVFS(hfsmp
);
461 struct vnode
*vp
= NULL
;
463 struct vnode
*tvp
= NULLVP
;
464 struct cnode
*cp
= NULL
;
465 struct filefork
*fp
= NULL
;
469 struct vnode_fsparam vfsp
;
472 if (attrp
->ca_fileid
== 0) {
478 if (IFTOVT(attrp
->ca_mode
) == VFIFO
) {
483 vtype
= IFTOVT(attrp
->ca_mode
);
484 issystemfile
= (descp
->cd_flags
& CD_ISMETA
) && (vtype
== VREG
);
487 * Get a cnode (new or existing)
488 * skip getting the cnode lock if we are getting resource fork (wantrsrc == 2)
490 cp
= hfs_chash_getcnode(hfsmp
->hfs_raw_dev
, attrp
->ca_fileid
, vpp
, wantrsrc
, (wantrsrc
== 2));
492 /* Hardlinks may need an updated catalog descriptor */
493 if ((cp
->c_flag
& C_HARDLINK
) && descp
->cd_nameptr
&& descp
->cd_namelen
> 0) {
494 replace_desc(cp
, descp
);
496 /* Check if we found a matching vnode */
501 * If this is a new cnode then initialize it.
503 if (ISSET(cp
->c_hflag
, H_ALLOC
)) {
504 lck_rw_init(&cp
->c_truncatelock
, hfs_rwlock_group
, hfs_lock_attr
);
506 /* Make sure its still valid (ie exists on disk). */
507 if (!hfs_valid_cnode(hfsmp
, dvp
, (wantrsrc
? NULL
: cnp
), cp
->c_fileid
)) {
509 hfs_reclaim_cnode(cp
);
513 bcopy(attrp
, &cp
->c_attr
, sizeof(struct cat_attr
));
514 bcopy(descp
, &cp
->c_desc
, sizeof(struct cat_desc
));
516 /* The name was inherited so clear descriptor state... */
517 descp
->cd_namelen
= 0;
518 descp
->cd_nameptr
= NULL
;
519 descp
->cd_flags
&= ~CD_HASBUF
;
522 if (IFTOVT(cp
->c_mode
) == VREG
&&
523 (descp
->cd_cnid
!= attrp
->ca_fileid
)) {
524 cp
->c_flag
|= C_HARDLINK
;
527 /* Take one dev reference for each non-directory cnode */
528 if (IFTOVT(cp
->c_mode
) != VDIR
) {
529 cp
->c_devvp
= hfsmp
->hfs_devvp
;
530 vnode_ref(cp
->c_devvp
);
533 for (i
= 0; i
< MAXQUOTAS
; i
++)
534 cp
->c_dquot
[i
] = NODQUOT
;
538 if (IFTOVT(cp
->c_mode
) == VDIR
) {
539 if (cp
->c_vp
!= NULL
)
540 panic("hfs_getnewvnode: orphaned vnode (data)");
543 if (forkp
&& attrp
->ca_blocks
< forkp
->cf_blocks
)
544 panic("hfs_getnewvnode: bad ca_blocks (too small)");
546 * Allocate and initialize a file fork...
548 MALLOC_ZONE(fp
, struct filefork
*, sizeof(struct filefork
),
549 M_HFSFORK
, M_WAITOK
);
552 bcopy(forkp
, &fp
->ff_data
, sizeof(struct cat_fork
));
554 bzero(&fp
->ff_data
, sizeof(struct cat_fork
));
555 rl_init(&fp
->ff_invalidranges
);
556 fp
->ff_sysfileinfo
= 0;
559 if (cp
->c_rsrcfork
!= NULL
)
560 panic("hfs_getnewvnode: orphaned rsrc fork");
561 if (cp
->c_rsrc_vp
!= NULL
)
562 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
564 cvpp
= &cp
->c_rsrc_vp
;
565 if ( (tvp
= cp
->c_vp
) != NULLVP
)
566 cp
->c_flag
|= C_NEED_DVNODE_PUT
;
568 if (cp
->c_datafork
!= NULL
)
569 panic("hfs_getnewvnode: orphaned data fork");
570 if (cp
->c_vp
!= NULL
)
571 panic("hfs_getnewvnode: orphaned vnode (data)");
574 if ( (tvp
= cp
->c_rsrc_vp
) != NULLVP
)
575 cp
->c_flag
|= C_NEED_RVNODE_PUT
;
580 * grab an iocount on the vnode we weren't
581 * interested in (i.e. we want the resource fork
582 * but the cnode already has the data fork)
583 * to prevent it from being
584 * recycled by us when we call vnode_create
585 * which will result in a deadlock when we
586 * try to take the cnode lock in hfs_vnop_fsync or
587 * hfs_vnop_reclaim... vnode_get can be called here
588 * because we already hold the cnode lock which will
589 * prevent the vnode from changing identity until
590 * we drop it.. vnode_get will not block waiting for
591 * a change of state... however, it will return an
592 * error if the current iocount == 0 and we've already
593 * started to terminate the vnode... we don't need/want to
594 * grab an iocount in the case since we can't cause
595 * the fileystem to be re-entered on this thread for this vp
597 * the matching vnode_put will happen in hfs_unlock
598 * after we've dropped the cnode lock
600 if ( vnode_get(tvp
) != 0)
601 cp
->c_flag
&= ~(C_NEED_RVNODE_PUT
| C_NEED_DVNODE_PUT
);
604 vfsp
.vnfs_vtype
= vtype
;
605 vfsp
.vnfs_str
= "hfs";
607 vfsp
.vnfs_fsnode
= cp
;
610 vfsp
.vnfs_vops
= hfs_fifoop_p
;
611 else if (vtype
== VBLK
|| vtype
== VCHR
)
612 vfsp
.vnfs_vops
= hfs_specop_p
;
614 vfsp
.vnfs_vops
= hfs_vnodeop_p
;
616 if (vtype
== VBLK
|| vtype
== VCHR
)
617 vfsp
.vnfs_rdev
= attrp
->ca_rdev
;
622 vfsp
.vnfs_filesize
= forkp
->cf_size
;
624 vfsp
.vnfs_filesize
= 0;
626 if (dvp
&& cnp
&& (cnp
->cn_flags
& MAKEENTRY
))
629 vfsp
.vnfs_flags
= VNFS_NOCACHE
;
631 /* Tag system files */
632 vfsp
.vnfs_marksystem
= issystemfile
;
634 /* Tag root directory */
635 if (descp
->cd_cnid
== kHFSRootFolderID
)
636 vfsp
.vnfs_markroot
= 1;
638 vfsp
.vnfs_markroot
= 0;
640 if ((retval
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, cvpp
))) {
642 if (fp
== cp
->c_datafork
)
643 cp
->c_datafork
= NULL
;
645 cp
->c_rsrcfork
= NULL
;
647 FREE_ZONE(fp
, sizeof(struct filefork
), M_HFSFORK
);
650 * If this is a newly created cnode or a vnode reclaim
651 * occurred during the attachment, then cleanup the cnode.
653 if ((cp
->c_vp
== NULL
) && (cp
->c_rsrc_vp
== NULL
)) {
655 hfs_reclaim_cnode(cp
);
657 hfs_chashwakeup(cp
, H_ALLOC
| H_ATTACH
);
665 vnode_settag(vp
, VT_HFS
);
666 if (cp
->c_flag
& C_HARDLINK
)
667 vnode_set_hard_link(vp
);
668 hfs_chashwakeup(cp
, H_ALLOC
| H_ATTACH
);
671 * Stop tracking an active hot file.
673 if (!vnode_isdir(vp
) && !vnode_issystem(vp
))
674 (void) hfs_removehotfile(vp
);
682 hfs_reclaim_cnode(struct cnode
*cp
)
687 for (i
= 0; i
< MAXQUOTAS
; i
++) {
688 if (cp
->c_dquot
[i
] != NODQUOT
) {
689 dqreclaim(cp
->c_dquot
[i
]);
690 cp
->c_dquot
[i
] = NODQUOT
;
696 struct vnode
*tmp_vp
= cp
->c_devvp
;
703 * If the descriptor has a name then release it
705 if (cp
->c_desc
.cd_flags
& CD_HASBUF
) {
708 nameptr
= cp
->c_desc
.cd_nameptr
;
709 cp
->c_desc
.cd_nameptr
= 0;
710 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
711 cp
->c_desc
.cd_namelen
= 0;
712 vfs_removename(nameptr
);
715 lck_rw_destroy(&cp
->c_rwlock
, hfs_rwlock_group
);
716 lck_rw_destroy(&cp
->c_truncatelock
, hfs_rwlock_group
);
717 bzero(cp
, sizeof(struct cnode
));
718 FREE_ZONE(cp
, sizeof(struct cnode
), M_HFSNODE
);
723 hfs_valid_cnode(struct hfsmount
*hfsmp
, struct vnode
*dvp
, struct componentname
*cnp
, cnid_t cnid
)
725 struct cat_attr attr
;
726 struct cat_desc cndesc
;
730 /* System files are always valid */
731 if (cnid
< kHFSFirstUserCatalogNodeID
)
734 /* XXX optimization: check write count in dvp */
736 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
739 bzero(&cndesc
, sizeof(cndesc
));
740 cndesc
.cd_nameptr
= cnp
->cn_nameptr
;
741 cndesc
.cd_namelen
= cnp
->cn_namelen
;
742 cndesc
.cd_parentcnid
= VTOC(dvp
)->c_cnid
;
743 cndesc
.cd_hint
= VTOC(dvp
)->c_childhint
;
745 if ((cat_lookup(hfsmp
, &cndesc
, 0, NULL
, &attr
, NULL
, NULL
) == 0) &&
746 (cnid
== attr
.ca_fileid
)) {
750 if (cat_idlookup(hfsmp
, cnid
, NULL
, NULL
, NULL
) == 0) {
754 hfs_systemfile_unlock(hfsmp
, lockflags
);
760 * Touch cnode times based on c_touch_xxx flags
762 * cnode must be locked exclusive
764 * This will also update the volume modify time
768 hfs_touchtimes(struct hfsmount
*hfsmp
, struct cnode
* cp
)
770 /* HFS Standard doesn't support access times */
771 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
772 cp
->c_touch_acctime
= FALSE
;
775 if (cp
->c_touch_acctime
|| cp
->c_touch_chgtime
|| cp
->c_touch_modtime
) {
781 if (cp
->c_touch_acctime
) {
782 cp
->c_atime
= tv
.tv_sec
;
784 * When the access time is the only thing changing
785 * then make sure its sufficiently newer before
786 * committing it to disk.
788 if ((((u_int32_t
)cp
->c_atime
- (u_int32_t
)(cp
)->c_attr
.ca_atimeondisk
) >
789 ATIME_ONDISK_ACCURACY
)) {
790 cp
->c_flag
|= C_MODIFIED
;
792 cp
->c_touch_acctime
= FALSE
;
794 if (cp
->c_touch_modtime
) {
795 cp
->c_mtime
= tv
.tv_sec
;
796 cp
->c_touch_modtime
= FALSE
;
797 cp
->c_flag
|= C_MODIFIED
;
801 * HFS dates that WE set must be adjusted for DST
803 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) && gTimeZone
.tz_dsttime
) {
808 if (cp
->c_touch_chgtime
) {
809 cp
->c_ctime
= tv
.tv_sec
;
810 cp
->c_touch_chgtime
= FALSE
;
811 cp
->c_flag
|= C_MODIFIED
;
815 /* Touch the volume modtime if needed */
817 HFSTOVCB(hfsmp
)->vcbFlags
|= 0xFF00;
818 HFSTOVCB(hfsmp
)->vcbLsMod
= tv
.tv_sec
;
828 hfs_lock(struct cnode
*cp
, enum hfslocktype locktype
)
830 void * thread
= current_thread();
832 /* System files need to keep track of owner */
833 if ((cp
->c_fileid
< kHFSFirstUserCatalogNodeID
) &&
834 (cp
->c_fileid
> kHFSRootFolderID
) &&
835 (locktype
!= HFS_SHARED_LOCK
)) {
838 * The extents and bitmap file locks support
839 * recursion and are always taken exclusive.
841 if (cp
->c_fileid
== kHFSExtentsFileID
||
842 cp
->c_fileid
== kHFSAllocationFileID
) {
843 if (cp
->c_lockowner
== thread
) {
844 cp
->c_syslockcount
++;
846 lck_rw_lock_exclusive(&cp
->c_rwlock
);
847 cp
->c_lockowner
= thread
;
848 cp
->c_syslockcount
= 1;
851 lck_rw_lock_exclusive(&cp
->c_rwlock
);
852 cp
->c_lockowner
= thread
;
854 } else if (locktype
== HFS_SHARED_LOCK
) {
855 lck_rw_lock_shared(&cp
->c_rwlock
);
856 cp
->c_lockowner
= HFS_SHARED_OWNER
;
858 lck_rw_lock_exclusive(&cp
->c_rwlock
);
859 cp
->c_lockowner
= thread
;
862 * Skip cnodes that no longer exist (were deleted).
864 if ((locktype
!= HFS_FORCE_LOCK
) &&
865 ((cp
->c_desc
.cd_flags
& CD_ISMETA
) == 0) &&
866 (cp
->c_flag
& C_NOEXISTS
)) {
874 * Lock a pair of cnodes.
878 hfs_lockpair(struct cnode
*cp1
, struct cnode
*cp2
, enum hfslocktype locktype
)
880 struct cnode
*first
, *last
;
884 * If cnodes match then just lock one.
887 return hfs_lock(cp1
, locktype
);
891 * Lock in cnode parent-child order (if there is a relationship);
892 * otherwise lock in cnode address order.
894 if ((IFTOVT(cp1
->c_mode
) == VDIR
) && (cp1
->c_fileid
== cp2
->c_parentcnid
)) {
897 } else if (cp1
< cp2
) {
905 if ( (error
= hfs_lock(first
, locktype
))) {
908 if ( (error
= hfs_lock(last
, locktype
))) {
916 * Check ordering of two cnodes. Return true if they are are in-order.
919 hfs_isordered(struct cnode
*cp1
, struct cnode
*cp2
)
923 if (cp1
== NULL
|| cp2
== (struct cnode
*)0xffffffff)
925 if (cp2
== NULL
|| cp1
== (struct cnode
*)0xffffffff)
927 if (cp1
->c_fileid
== cp2
->c_parentcnid
)
928 return (1); /* cp1 is the parent and should go first */
929 if (cp2
->c_fileid
== cp1
->c_parentcnid
)
930 return (0); /* cp1 is the child and should go last */
932 return (cp1
< cp2
); /* fall-back is to use address order */
936 * Acquire 4 cnode locks.
937 * - locked in cnode parent-child order (if there is a relationship)
938 * otherwise lock in cnode address order (lesser address first).
939 * - all or none of the locks are taken
940 * - only one lock taken per cnode (dup cnodes are skipped)
941 * - some of the cnode pointers may be null
945 hfs_lockfour(struct cnode
*cp1
, struct cnode
*cp2
, struct cnode
*cp3
,
946 struct cnode
*cp4
, enum hfslocktype locktype
)
950 struct cnode
* list
[4];
955 if (hfs_isordered(cp1
, cp2
)) {
956 a
[0] = cp1
; a
[1] = cp2
;
958 a
[0] = cp2
; a
[1] = cp1
;
960 if (hfs_isordered(cp3
, cp4
)) {
961 b
[0] = cp3
; b
[1] = cp4
;
963 b
[0] = cp4
; b
[1] = cp3
;
965 a
[2] = (struct cnode
*)0xffffffff; /* sentinel value */
966 b
[2] = (struct cnode
*)0xffffffff; /* sentinel value */
969 * Build the lock list, skipping over duplicates
971 for (i
= 0, j
= 0, k
= 0; (i
< 2 || j
< 2); ) {
972 tmp
= hfs_isordered(a
[i
], b
[j
]) ? a
[i
++] : b
[j
++];
973 if (k
== 0 || tmp
!= list
[k
-1])
978 * Now we can lock using list[0 - k].
979 * Skip over NULL entries.
981 for (i
= 0; i
< k
; ++i
) {
983 if ((error
= hfs_lock(list
[i
], locktype
))) {
984 /* Drop any locks we acquired. */
1001 hfs_unlock(struct cnode
*cp
)
1003 vnode_t rvp
= NULLVP
;
1004 vnode_t vp
= NULLVP
;
1007 /* System files need to keep track of owner */
1008 if ((cp
->c_fileid
< kHFSFirstUserCatalogNodeID
) &&
1009 (cp
->c_fileid
> kHFSRootFolderID
) &&
1010 (cp
->c_datafork
!= NULL
)) {
1012 * The extents and bitmap file locks support
1013 * recursion and are always taken exclusive.
1015 if (cp
->c_fileid
== kHFSExtentsFileID
||
1016 cp
->c_fileid
== kHFSAllocationFileID
) {
1017 if (--cp
->c_syslockcount
> 0) {
1022 c_flag
= cp
->c_flag
;
1023 cp
->c_flag
&= ~(C_NEED_DVNODE_PUT
| C_NEED_RVNODE_PUT
| C_NEED_DATA_SETSIZE
| C_NEED_RSRC_SETSIZE
);
1024 if (c_flag
& (C_NEED_DVNODE_PUT
| C_NEED_DATA_SETSIZE
)) {
1027 if (c_flag
& (C_NEED_RVNODE_PUT
| C_NEED_RSRC_SETSIZE
)) {
1028 rvp
= cp
->c_rsrc_vp
;
1031 cp
->c_lockowner
= NULL
;
1032 lck_rw_done(&cp
->c_rwlock
);
1034 /* Perform any vnode post processing after cnode lock is dropped. */
1036 if (c_flag
& C_NEED_DATA_SETSIZE
)
1038 if (c_flag
& C_NEED_DVNODE_PUT
)
1042 if (c_flag
& C_NEED_RSRC_SETSIZE
)
1043 ubc_setsize(rvp
, 0);
1044 if (c_flag
& C_NEED_RVNODE_PUT
)
1050 * Unlock a pair of cnodes.
1054 hfs_unlockpair(struct cnode
*cp1
, struct cnode
*cp2
)
1062 * Unlock a group of cnodes.
1066 hfs_unlockfour(struct cnode
*cp1
, struct cnode
*cp2
, struct cnode
*cp3
, struct cnode
*cp4
)
1068 struct cnode
* list
[4];
1076 for (i
= 0; i
< k
; ++i
) {
1085 for (i
= 0; i
< k
; ++i
) {
1094 for (i
= 0; i
< k
; ++i
) {
1104 * Protect a cnode against a truncation.
1106 * Used mainly by read/write since they don't hold the
1107 * cnode lock across calls to the cluster layer.
1109 * The process doing a truncation must take the lock
1110 * exclusive. The read/write processes can take it
1115 hfs_lock_truncate(struct cnode
*cp
, int exclusive
)
1117 if (cp
->c_lockowner
== current_thread())
1118 panic("hfs_lock_truncate: cnode 0x%08x locked!", cp
);
1121 lck_rw_lock_exclusive(&cp
->c_truncatelock
);
1123 lck_rw_lock_shared(&cp
->c_truncatelock
);
1128 hfs_unlock_truncate(struct cnode
*cp
)
1130 lck_rw_done(&cp
->c_truncatelock
);