2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/systm.h>
24 #include <sys/kernel.h>
26 #include <sys/dirent.h>
29 #include <sys/mount.h>
30 #include <sys/vnode.h>
31 #include <sys/malloc.h>
32 #include <sys/namei.h>
34 #include <sys/quota.h>
38 #include <miscfs/specfs/specdev.h>
39 #include <miscfs/fifofs/fifo.h>
40 #include <vfs/vfs_support.h>
41 #include <machine/spl.h>
43 #include <sys/kdebug.h>
46 #include "hfs_catalog.h"
47 #include "hfs_cnode.h"
48 #include "hfs_lockf.h"
50 #include "hfs_mount.h"
51 #include "hfs_quota.h"
52 #include "hfs_endian.h"
54 #include "hfscommon/headers/BTreesInternal.h"
55 #include "hfscommon/headers/FileMgrInternal.h"
57 #define MAKE_DELETED_NAME(NAME,FID) \
58 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
60 #define KNDETACH_VNLOCKED 0x00000001
62 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
65 /* Global vfs data structures for hfs */
68 extern unsigned long strtoul(const char *, char **, int);
70 extern int groupmember(gid_t gid
, struct ucred
*cred
);
72 static int hfs_makenode(int mode
, struct vnode
*dvp
, struct vnode
**vpp
,
73 struct componentname
*cnp
);
75 static int hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
,
76 struct vnode
**rvpp
, struct proc
*p
);
78 static int hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
);
80 static int hfs_removedir(struct vnode
*, struct vnode
*, struct componentname
*,
83 static int hfs_removefile(struct vnode
*, struct vnode
*, struct componentname
*,
86 /* Options for hfs_removedir and hfs_removefile */
87 #define HFSRM_PARENT_LOCKED 0x01
88 #define HFSRM_SKIP_RESERVE 0x02
89 #define HFSRM_SAVE_NAME 0x04
90 #define HFSRM_RENAMEOPTS 0x07
93 int hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
);
95 int hfs_chflags(struct vnode
*vp
, u_long flags
, struct ucred
*cred
,
97 int hfs_chmod(struct vnode
*vp
, int mode
, struct ucred
*cred
,
99 int hfs_chown(struct vnode
*vp
, uid_t uid
, gid_t gid
,
100 struct ucred
*cred
, struct proc
*p
);
102 /*****************************************************************************
104 * Common Operations on vnodes
106 *****************************************************************************/
109 * Create a regular file
114 IN WILLRELE struct vnode *dvp;
115 OUT struct vnode **vpp;
116 IN struct componentname *cnp;
117 IN struct vattr *vap;
119 We are responsible for freeing the namei buffer,
120 it is done in hfs_makenode()
125 struct vop_create_args
/* {
127 struct vnode **a_vpp;
128 struct componentname *a_cnp;
132 struct vattr
*vap
= ap
->a_vap
;
134 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
135 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
146 IN WILLRELE struct vnode *dvp;
147 OUT WILLRELE struct vnode **vpp;
148 IN struct componentname *cnp;
149 IN struct vattr *vap;
155 struct vop_mknod_args
/* {
157 struct vnode **a_vpp;
158 struct componentname *a_cnp;
162 struct vattr
*vap
= ap
->a_vap
;
163 struct vnode
**vpp
= ap
->a_vpp
;
167 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
168 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
173 /* Create the vnode */
174 error
= hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
175 ap
->a_dvp
, vpp
, ap
->a_cnp
);
179 cp
->c_flag
|= C_ACCESS
| C_CHANGE
| C_UPDATE
;
180 if ((vap
->va_rdev
!= VNOVAL
) &&
181 (vap
->va_type
== VBLK
|| vap
->va_type
== VCHR
))
182 cp
->c_rdev
= vap
->va_rdev
;
184 * Remove cnode so that it will be reloaded by lookup and
185 * checked to see if it is an alias of an existing vnode.
186 * Note: unlike UFS, we don't bash v_type here.
202 IN struct ucred *cred;
209 struct vop_open_args
/* {
212 struct ucred *a_cred;
216 struct vnode
*vp
= ap
->a_vp
;
217 struct filefork
*fp
= VTOF(vp
);
221 * Files marked append-only must be opened for appending.
223 if ((vp
->v_type
!= VDIR
) && (VTOC(vp
)->c_flags
& APPEND
) &&
224 (ap
->a_mode
& (FWRITE
| O_APPEND
)) == FWRITE
)
227 if (ap
->a_mode
& O_EVTONLY
) {
228 if (vp
->v_type
== VREG
) {
229 ++VTOF(vp
)->ff_evtonly_refs
;
231 ++VTOC(vp
)->c_evtonly_refs
;
236 * On the first (non-busy) open of a fragmented
237 * file attempt to de-frag it (if its less than 20MB).
239 if ((VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) ||
240 !UBCISVALID(vp
) || ubc_isinuse(vp
, 1)) {
245 fp
->ff_extents
[7].blockCount
!= 0 &&
246 fp
->ff_size
<= (20 * 1024 * 1024)) {
248 * Wait until system bootup is done (3 min).
251 if (tv
.tv_sec
< (60 * 3)) {
254 (void) hfs_relocate(vp
, VTOVCB(vp
)->nextAllocation
+ 4096, ap
->a_cred
, ap
->a_p
);
263 * Update the times on the cnode.
269 IN struct ucred *cred;
276 struct vop_close_args
/* {
279 struct ucred *a_cred;
283 register struct vnode
*vp
= ap
->a_vp
;
284 register struct cnode
*cp
= VTOC(vp
);
285 register struct filefork
*fp
= VTOF(vp
);
286 struct proc
*p
= ap
->a_p
;
289 u_long blks
, blocksize
;
293 simple_lock(&vp
->v_interlock
);
294 if ((!UBCISVALID(vp
) && vp
->v_usecount
> 1)
295 || (UBCISVALID(vp
) && ubc_isinuse(vp
, 1))) {
297 CTIMES(cp
, &tv
, &tv
);
299 simple_unlock(&vp
->v_interlock
);
301 if (ap
->a_fflag
& O_EVTONLY
) {
302 if (vp
->v_type
== VREG
) {
303 --VTOF(vp
)->ff_evtonly_refs
;
305 --VTOC(vp
)->c_evtonly_refs
;
310 * VOP_CLOSE can be called with vp locked (from vclean).
311 * We check for this case using VOP_ISLOCKED and bail.
313 * XXX During a force unmount we won't do the cleanup below!
315 if (vp
->v_type
== VDIR
|| VOP_ISLOCKED(vp
))
320 if ((fp
->ff_blocks
> 0) &&
321 !ISSET(cp
->c_flag
, C_DELETED
) &&
322 ((VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) == 0)) {
323 enum vtype our_type
= vp
->v_type
;
324 u_long our_id
= vp
->v_id
;
325 int was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
327 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
331 * Since we can context switch in vn_lock our vnode
332 * could get recycled (eg umount -f). Double check
333 * that its still ours.
335 if (vp
->v_type
!= our_type
|| vp
->v_id
!= our_id
336 || cp
!= VTOC(vp
) || !UBCINFOEXISTS(vp
)) {
337 VOP_UNLOCK(vp
, 0, p
);
342 * Last chance to explicitly zero out the areas
343 * that are currently marked invalid:
345 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devBlockSize
);
346 (void) cluster_push(vp
);
347 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
348 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
349 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
350 off_t start
= invalid_range
->rl_start
;
351 off_t end
= invalid_range
->rl_end
;
353 /* The range about to be written must be validated
354 * first, so that VOP_CMAP() will return the
355 * appropriate mapping for the cluster code:
357 rl_remove(start
, end
, &fp
->ff_invalidranges
);
359 (void) cluster_write(vp
, (struct uio
*) 0, leof
,
360 invalid_range
->rl_end
+ 1, invalid_range
->rl_start
,
361 (off_t
)0, devBlockSize
, IO_HEADZEROFILL
| IO_NOZERODIRTY
);
363 if (ISSET(vp
->v_flag
, VHASDIRTY
))
364 (void) cluster_push(vp
);
366 cp
->c_flag
|= C_MODIFIED
;
368 cp
->c_flag
&= ~C_ZFWANTSYNC
;
370 blocksize
= VTOVCB(vp
)->blockSize
;
371 blks
= leof
/ blocksize
;
372 if (((off_t
)blks
* (off_t
)blocksize
) != leof
)
375 * Shrink the peof to the smallest size neccessary to contain the leof.
377 if (blks
< fp
->ff_blocks
)
378 (void) VOP_TRUNCATE(vp
, leof
, IO_NDELAY
, ap
->a_cred
, p
);
379 (void) cluster_push(vp
);
382 CLR(vp
->v_flag
, VNOCACHE_DATA
);
385 * If the VOP_TRUNCATE didn't happen to flush the vnode's
386 * information out to disk, force it to be updated now that
387 * all invalid ranges have been zero-filled and validated:
389 if (cp
->c_flag
& C_MODIFIED
) {
391 VOP_UPDATE(vp
, &tv
, &tv
, 0);
393 VOP_UNLOCK(vp
, 0, p
);
395 if ((vp
->v_flag
& VSYSTEM
) && (vp
->v_usecount
== 1))
406 IN struct ucred *cred;
413 struct vop_access_args
/* {
416 struct ucred *a_cred;
420 struct vnode
*vp
= ap
->a_vp
;
421 struct cnode
*cp
= VTOC(vp
);
422 struct ucred
*cred
= ap
->a_cred
;
424 mode_t mode
= ap
->a_mode
;
430 * Disallow write attempts on read-only file systems;
431 * unless the file is a socket, fifo, or a block or
432 * character device resident on the file system.
435 switch (vp
->v_type
) {
439 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
442 if ((error
= hfs_getinoquota(cp
)))
447 /* If immutable bit set, nobody gets to write it. */
448 if (cp
->c_flags
& IMMUTABLE
)
453 /* Otherwise, user id 0 always gets access. */
454 if (cred
->cr_uid
== 0)
459 /* Otherwise, check the owner. */
460 if ( (cp
->c_uid
== cred
->cr_uid
) || (cp
->c_uid
== UNKNOWNUID
) ) {
467 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
470 /* Otherwise, check the groups. */
471 if (! (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)) {
472 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++)
473 if (cp
->c_gid
== *gp
) {
480 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
484 /* Otherwise, check everyone else. */
491 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
501 IN struct vattr *vap;
502 IN struct ucred *cred;
511 struct vop_getattr_args
/* {
514 struct ucred *a_cred;
518 struct vnode
*vp
= ap
->a_vp
;
519 struct cnode
*cp
= VTOC(vp
);
520 struct vattr
*vap
= ap
->a_vap
;
524 CTIMES(cp
, &tv
, &tv
);
526 vap
->va_type
= vp
->v_type
;
527 vap
->va_mode
= cp
->c_mode
;
528 vap
->va_nlink
= cp
->c_nlink
;
530 * [2856576] Since we are dynamically changing the owner, also
531 * effectively turn off the set-user-id and set-group-id bits,
532 * just like chmod(2) would when changing ownership. This prevents
533 * a security hole where set-user-id programs run as whoever is
534 * logged on (or root if nobody is logged in yet!)
536 if (cp
->c_uid
== UNKNOWNUID
) {
537 vap
->va_mode
&= ~(S_ISUID
| S_ISGID
);
538 vap
->va_uid
= ap
->a_cred
->cr_uid
;
540 vap
->va_uid
= cp
->c_uid
;
542 vap
->va_gid
= cp
->c_gid
;
543 vap
->va_fsid
= cp
->c_dev
;
545 * Exporting file IDs from HFS Plus:
547 * For "normal" files the c_fileid is the same value as the
548 * c_cnid. But for hard link files, they are different - the
549 * c_cnid belongs to the active directory entry (ie the link)
550 * and the c_fileid is for the actual inode (ie the data file).
552 * The stat call (getattr) will always return the c_fileid
553 * and Carbon APIs, which are hardlink-ignorant, will always
554 * receive the c_cnid (from getattrlist).
556 vap
->va_fileid
= cp
->c_fileid
;
557 vap
->va_atime
.tv_sec
= cp
->c_atime
;
558 vap
->va_atime
.tv_nsec
= 0;
559 vap
->va_mtime
.tv_sec
= cp
->c_mtime
;
560 vap
->va_mtime
.tv_nsec
= cp
->c_mtime_nsec
;
561 vap
->va_ctime
.tv_sec
= cp
->c_ctime
;
562 vap
->va_ctime
.tv_nsec
= 0;
564 vap
->va_flags
= cp
->c_flags
;
566 vap
->va_blocksize
= VTOVFS(vp
)->mnt_stat
.f_iosize
;
568 if (vp
->v_type
== VDIR
) {
569 vap
->va_size
= cp
->c_nlink
* AVERAGE_HFSDIRENTRY_SIZE
;
572 vap
->va_size
= VTOF(vp
)->ff_size
;
573 vap
->va_bytes
= (u_quad_t
)cp
->c_blocks
*
574 (u_quad_t
)VTOVCB(vp
)->blockSize
;
575 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
)
576 vap
->va_rdev
= cp
->c_rdev
;
582 * Set attribute vnode op. called from several syscalls
587 IN struct vattr *vap;
588 IN struct ucred *cred;
595 struct vop_setattr_args
/* {
598 struct ucred *a_cred;
602 struct vattr
*vap
= ap
->a_vap
;
603 struct vnode
*vp
= ap
->a_vp
;
604 struct cnode
*cp
= VTOC(vp
);
605 struct ucred
*cred
= ap
->a_cred
;
606 struct proc
*p
= ap
->a_p
;
607 struct timeval atimeval
, mtimeval
;
611 * Check for unsettable attributes.
613 if ((vap
->va_type
!= VNON
) || (vap
->va_nlink
!= VNOVAL
) ||
614 (vap
->va_fsid
!= VNOVAL
) || (vap
->va_fileid
!= VNOVAL
) ||
615 (vap
->va_blocksize
!= VNOVAL
) || (vap
->va_rdev
!= VNOVAL
) ||
616 ((int)vap
->va_bytes
!= VNOVAL
) || (vap
->va_gen
!= VNOVAL
)) {
621 // don't allow people to set the attributes of symlinks
622 // (nfs has a bad habit of doing ths and it can cause
623 // problems for journaling).
625 if (vp
->v_type
== VLNK
) {
631 if (vap
->va_flags
!= VNOVAL
) {
632 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
634 if ((error
= hfs_chflags(vp
, vap
->va_flags
, cred
, p
)))
636 if (vap
->va_flags
& (IMMUTABLE
| APPEND
))
640 if (cp
->c_flags
& (IMMUTABLE
| APPEND
))
643 // XXXdbg - don't allow modification of the journal or journal_info_block
644 if (VTOHFS(vp
)->jnl
&& cp
->c_datafork
) {
645 struct HFSPlusExtentDescriptor
*extd
;
647 extd
= &cp
->c_datafork
->ff_extents
[0];
648 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
654 * Go through the fields and update iff not VNOVAL.
656 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
657 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
659 if ((error
= hfs_chown(vp
, vap
->va_uid
, vap
->va_gid
, cred
, p
)))
662 if (vap
->va_size
!= VNOVAL
) {
664 * Disallow write attempts on read-only file systems;
665 * unless the file is a socket, fifo, or a block or
666 * character device resident on the file system.
668 switch (vp
->v_type
) {
673 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
679 if ((error
= VOP_TRUNCATE(vp
, vap
->va_size
, 0, cred
, p
)))
683 if (vap
->va_atime
.tv_sec
!= VNOVAL
|| vap
->va_mtime
.tv_sec
!= VNOVAL
) {
684 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
686 if (((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) &&
687 ((vap
->va_vaflags
& VA_UTIMES_NULL
) == 0 ||
688 (error
= VOP_ACCESS(vp
, VWRITE
, cred
, p
)))) {
691 if (vap
->va_atime
.tv_sec
!= VNOVAL
)
692 cp
->c_flag
|= C_ACCESS
;
693 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
694 cp
->c_flag
|= C_CHANGE
| C_UPDATE
;
696 * The utimes system call can reset the modification
697 * time but it doesn't know about HFS create times.
698 * So we need to insure that the creation time is
699 * always at least as old as the modification time.
701 if ((VTOVCB(vp
)->vcbSigWord
== kHFSPlusSigWord
) &&
702 (cp
->c_cnid
!= kRootDirID
) &&
703 (vap
->va_mtime
.tv_sec
< cp
->c_itime
)) {
704 cp
->c_itime
= vap
->va_mtime
.tv_sec
;
707 atimeval
.tv_sec
= vap
->va_atime
.tv_sec
;
708 atimeval
.tv_usec
= 0;
709 mtimeval
.tv_sec
= vap
->va_mtime
.tv_sec
;
710 mtimeval
.tv_usec
= 0;
711 if ((error
= VOP_UPDATE(vp
, &atimeval
, &mtimeval
, 1)))
715 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
716 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
718 error
= hfs_chmod(vp
, (int)vap
->va_mode
, cred
, p
);
720 HFS_KNOTE(vp
, NOTE_ATTRIB
);
726 * Change the mode on a file.
727 * cnode must be locked before calling.
731 hfs_chmod(vp
, mode
, cred
, p
)
732 register struct vnode
*vp
;
734 register struct ucred
*cred
;
737 register struct cnode
*cp
= VTOC(vp
);
740 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
743 // XXXdbg - don't allow modification of the journal or journal_info_block
744 if (VTOHFS(vp
)->jnl
&& cp
&& cp
->c_datafork
) {
745 struct HFSPlusExtentDescriptor
*extd
;
747 extd
= &cp
->c_datafork
->ff_extents
[0];
748 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
753 #if OVERRIDE_UNKNOWN_PERMISSIONS
754 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
758 if ((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0)
761 if (vp
->v_type
!= VDIR
&& (mode
& S_ISTXT
))
763 if (!groupmember(cp
->c_gid
, cred
) && (mode
& S_ISGID
))
766 cp
->c_mode
&= ~ALLPERMS
;
767 cp
->c_mode
|= (mode
& ALLPERMS
);
768 cp
->c_flag
|= C_CHANGE
;
775 hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
)
777 struct cnode
*cp
= VTOC(vp
);
783 * Disallow write attempts on read-only file systems;
784 * unless the file is a socket, fifo, or a block or
785 * character device resident on the file system.
787 switch (vp
->v_type
) {
791 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
798 /* If immutable bit set, nobody gets to write it. */
799 if (considerFlags
&& (cp
->c_flags
& IMMUTABLE
))
802 /* Otherwise, user id 0 always gets access. */
803 if (cred
->cr_uid
== 0)
806 /* Otherwise, check the owner. */
807 if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, false)) == 0)
808 return ((cp
->c_mode
& S_IWUSR
) == S_IWUSR
? 0 : EACCES
);
810 /* Otherwise, check the groups. */
811 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++) {
812 if (cp
->c_gid
== *gp
)
813 return ((cp
->c_mode
& S_IWGRP
) == S_IWGRP
? 0 : EACCES
);
816 /* Otherwise, check everyone else. */
817 return ((cp
->c_mode
& S_IWOTH
) == S_IWOTH
? 0 : EACCES
);
823 * Change the flags on a file or directory.
824 * cnode must be locked before calling.
828 hfs_chflags(vp
, flags
, cred
, p
)
829 register struct vnode
*vp
;
830 register u_long flags
;
831 register struct ucred
*cred
;
834 register struct cnode
*cp
= VTOC(vp
);
837 if (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) {
838 if ((retval
= hfs_write_access(vp
, cred
, p
, false)) != 0) {
841 } else if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) {
845 if (cred
->cr_uid
== 0) {
846 if ((cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
)) &&
852 if (cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
) ||
853 (flags
& UF_SETTABLE
) != flags
) {
856 cp
->c_flags
&= SF_SETTABLE
;
857 cp
->c_flags
|= (flags
& UF_SETTABLE
);
859 cp
->c_flag
|= C_CHANGE
;
866 * Perform chown operation on cnode cp;
867 * code must be locked prior to call.
871 hfs_chown(vp
, uid
, gid
, cred
, p
)
872 register struct vnode
*vp
;
878 register struct cnode
*cp
= VTOC(vp
);
887 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
890 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)
893 if (uid
== (uid_t
)VNOVAL
)
895 if (gid
== (gid_t
)VNOVAL
)
898 * If we don't own the file, are trying to change the owner
899 * of the file, or are not a member of the target group,
900 * the caller must be superuser or the call fails.
902 if ((cred
->cr_uid
!= cp
->c_uid
|| uid
!= cp
->c_uid
||
903 (gid
!= cp
->c_gid
&& !groupmember((gid_t
)gid
, cred
))) &&
904 (error
= suser(cred
, &p
->p_acflag
)))
910 if ((error
= hfs_getinoquota(cp
)))
913 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
914 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
917 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
918 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
922 * Eventually need to account for (fake) a block per directory
923 *if (vp->v_type == VDIR)
924 *change = VTOVCB(vp)->blockSize;
928 change
= (int64_t)(cp
->c_blocks
) * (int64_t)VTOVCB(vp
)->blockSize
;
929 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
);
930 (void) hfs_chkiq(cp
, -1, cred
, CHOWN
);
931 for (i
= 0; i
< MAXQUOTAS
; i
++) {
932 dqrele(vp
, cp
->c_dquot
[i
]);
933 cp
->c_dquot
[i
] = NODQUOT
;
939 if ((error
= hfs_getinoquota(cp
)) == 0) {
941 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
942 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
945 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
946 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
948 if ((error
= hfs_chkdq(cp
, change
, cred
, CHOWN
)) == 0) {
949 if ((error
= hfs_chkiq(cp
, 1, cred
, CHOWN
)) == 0)
952 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
|FORCE
);
954 for (i
= 0; i
< MAXQUOTAS
; i
++) {
955 dqrele(vp
, cp
->c_dquot
[i
]);
956 cp
->c_dquot
[i
] = NODQUOT
;
961 if (hfs_getinoquota(cp
) == 0) {
963 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
964 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
967 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
968 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
970 (void) hfs_chkdq(cp
, change
, cred
, FORCE
|CHOWN
);
971 (void) hfs_chkiq(cp
, 1, cred
, FORCE
|CHOWN
);
972 (void) hfs_getinoquota(cp
);
976 if (hfs_getinoquota(cp
))
977 panic("hfs_chown: lost quota");
980 if (ouid
!= uid
|| ogid
!= gid
)
981 cp
->c_flag
|= C_CHANGE
;
982 if (ouid
!= uid
&& cred
->cr_uid
!= 0)
983 cp
->c_mode
&= ~S_ISUID
;
984 if (ogid
!= gid
&& cred
->cr_uid
!= 0)
985 cp
->c_mode
&= ~S_ISGID
;
992 #% exchange fvp L L L
993 #% exchange tvp L L L
997 * The hfs_exchange routine swaps the fork data in two files by
998 * exchanging some of the information in the cnode. It is used
999 * to preserve the file ID when updating an existing file, in
1000 * case the file is being tracked through its file ID. Typically
1001 * its used after creating a new file during a safe-save.
1006 struct vop_exchange_args
/* {
1007 struct vnode *a_fvp;
1008 struct vnode *a_tvp;
1009 struct ucred *a_cred;
1013 struct vnode
*from_vp
= ap
->a_fvp
;
1014 struct vnode
*to_vp
= ap
->a_tvp
;
1015 struct cnode
*from_cp
= VTOC(from_vp
);
1016 struct cnode
*to_cp
= VTOC(to_vp
);
1017 struct hfsmount
*hfsmp
= VTOHFS(from_vp
);
1018 struct cat_desc tempdesc
;
1019 struct cat_attr tempattr
;
1020 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
1021 cat_cookie_t cookie
= {0};
1023 /* The files must be on the same volume. */
1024 if (from_vp
->v_mount
!= to_vp
->v_mount
)
1027 /* Only normal files can be exchanged. */
1028 if ((from_vp
->v_type
!= VREG
) || (to_vp
->v_type
!= VREG
) ||
1029 (from_cp
->c_flag
& C_HARDLINK
) || (to_cp
->c_flag
& C_HARDLINK
) ||
1030 VNODE_IS_RSRC(from_vp
) || VNODE_IS_RSRC(to_vp
))
1033 // XXXdbg - don't allow modification of the journal or journal_info_block
1035 struct HFSPlusExtentDescriptor
*extd
;
1037 if (from_cp
->c_datafork
) {
1038 extd
= &from_cp
->c_datafork
->ff_extents
[0];
1039 if (extd
->startBlock
== VTOVCB(from_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1044 if (to_cp
->c_datafork
) {
1045 extd
= &to_cp
->c_datafork
->ff_extents
[0];
1046 if (extd
->startBlock
== VTOVCB(to_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1053 hfs_global_shared_lock_acquire(hfsmp
);
1056 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1063 * Reserve some space in the Catalog file.
1065 if ((error
= cat_preflight(hfsmp
, CAT_EXCHANGE
, &cookie
, ap
->a_p
))) {
1069 /* Lock catalog b-tree */
1070 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1071 if (error
) goto Err_Exit
;
1073 /* The backend code always tries to delete the virtual
1074 * extent id for exchanging files so we neeed to lock
1075 * the extents b-tree.
1077 error
= hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1079 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1083 /* Do the exchange */
1084 error
= MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp
),
1085 from_cp
->c_desc
.cd_nameptr
, to_cp
->c_desc
.cd_nameptr
,
1086 from_cp
->c_parentcnid
, to_cp
->c_parentcnid
,
1087 from_cp
->c_hint
, to_cp
->c_hint
));
1089 (void) hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1090 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1092 if (error
!= E_NONE
) {
1096 /* Purge the vnodes from the name cache */
1098 cache_purge(from_vp
);
1102 /* Save a copy of from attributes before swapping. */
1103 bcopy(&from_cp
->c_desc
, &tempdesc
, sizeof(struct cat_desc
));
1104 bcopy(&from_cp
->c_attr
, &tempattr
, sizeof(struct cat_attr
));
1107 * Swap the descriptors and all non-fork related attributes.
1108 * (except the modify date)
1110 bcopy(&to_cp
->c_desc
, &from_cp
->c_desc
, sizeof(struct cat_desc
));
1112 from_cp
->c_hint
= 0;
1113 from_cp
->c_fileid
= from_cp
->c_cnid
;
1114 from_cp
->c_itime
= to_cp
->c_itime
;
1115 from_cp
->c_btime
= to_cp
->c_btime
;
1116 from_cp
->c_atime
= to_cp
->c_atime
;
1117 from_cp
->c_ctime
= to_cp
->c_ctime
;
1118 from_cp
->c_gid
= to_cp
->c_gid
;
1119 from_cp
->c_uid
= to_cp
->c_uid
;
1120 from_cp
->c_flags
= to_cp
->c_flags
;
1121 from_cp
->c_mode
= to_cp
->c_mode
;
1122 bcopy(to_cp
->c_finderinfo
, from_cp
->c_finderinfo
, 32);
1124 bcopy(&tempdesc
, &to_cp
->c_desc
, sizeof(struct cat_desc
));
1126 to_cp
->c_fileid
= to_cp
->c_cnid
;
1127 to_cp
->c_itime
= tempattr
.ca_itime
;
1128 to_cp
->c_btime
= tempattr
.ca_btime
;
1129 to_cp
->c_atime
= tempattr
.ca_atime
;
1130 to_cp
->c_ctime
= tempattr
.ca_ctime
;
1131 to_cp
->c_gid
= tempattr
.ca_gid
;
1132 to_cp
->c_uid
= tempattr
.ca_uid
;
1133 to_cp
->c_flags
= tempattr
.ca_flags
;
1134 to_cp
->c_mode
= tempattr
.ca_mode
;
1135 bcopy(tempattr
.ca_finderinfo
, to_cp
->c_finderinfo
, 32);
1137 /* Reinsert into the cnode hash under new file IDs*/
1138 hfs_chashremove(from_cp
);
1139 hfs_chashremove(to_cp
);
1141 hfs_chashinsert(from_cp
);
1142 hfs_chashinsert(to_cp
);
1145 * When a file moves out of "Cleanup At Startup"
1146 * we can drop its NODUMP status.
1148 if ((from_cp
->c_flags
& UF_NODUMP
) &&
1149 (from_cp
->c_parentcnid
!= to_cp
->c_parentcnid
)) {
1150 from_cp
->c_flags
&= ~UF_NODUMP
;
1151 from_cp
->c_flag
|= C_CHANGE
;
1153 if ((to_cp
->c_flags
& UF_NODUMP
) &&
1154 (to_cp
->c_parentcnid
!= from_cp
->c_parentcnid
)) {
1155 to_cp
->c_flags
&= ~UF_NODUMP
;
1156 to_cp
->c_flag
|= C_CHANGE
;
1159 HFS_KNOTE(from_vp
, NOTE_ATTRIB
);
1160 HFS_KNOTE(to_vp
, NOTE_ATTRIB
);
1163 cat_postflight(hfsmp
, &cookie
, ap
->a_p
);
1167 journal_end_transaction(hfsmp
->jnl
);
1170 hfs_global_shared_lock_release(hfsmp
);
1182 IN struct vnode *vp;
1183 IN struct ucred *cred;
1190 struct vop_fsync_args
/* {
1192 struct ucred *a_cred;
1197 struct vnode
*vp
= ap
->a_vp
;
1198 struct cnode
*cp
= VTOC(vp
);
1199 struct filefork
*fp
= NULL
;
1201 register struct buf
*bp
;
1204 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
1209 wait
= (ap
->a_waitfor
== MNT_WAIT
);
1211 /* HFS directories don't have any data blocks. */
1212 if (vp
->v_type
== VDIR
)
1216 * For system files flush the B-tree header and
1217 * for regular files write out any clusters
1219 if (vp
->v_flag
& VSYSTEM
) {
1220 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
) {
1222 if (hfsmp
->jnl
== NULL
) {
1223 BTFlushPath(VTOF(vp
));
1226 } else if (UBCINFOEXISTS(vp
))
1227 (void) cluster_push(vp
);
1230 * When MNT_WAIT is requested and the zero fill timeout
1231 * has expired then we must explicitly zero out any areas
1232 * that are currently marked invalid (holes).
1234 * Files with NODUMP can bypass zero filling here.
1236 if ((wait
|| (cp
->c_flag
& C_ZFWANTSYNC
)) &&
1237 ((cp
->c_flags
& UF_NODUMP
) == 0) &&
1238 UBCINFOEXISTS(vp
) && (fp
= VTOF(vp
)) &&
1239 cp
->c_zftimeout
!= 0) {
1243 if (time
.tv_sec
< cp
->c_zftimeout
) {
1244 /* Remember that a force sync was requested. */
1245 cp
->c_flag
|= C_ZFWANTSYNC
;
1248 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devblksize
);
1249 was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
1250 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
1252 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
1253 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
1254 off_t start
= invalid_range
->rl_start
;
1255 off_t end
= invalid_range
->rl_end
;
1257 /* The range about to be written must be validated
1258 * first, so that VOP_CMAP() will return the
1259 * appropriate mapping for the cluster code:
1261 rl_remove(start
, end
, &fp
->ff_invalidranges
);
1263 (void) cluster_write(vp
, (struct uio
*) 0,
1265 invalid_range
->rl_end
+ 1,
1266 invalid_range
->rl_start
,
1267 (off_t
)0, devblksize
,
1268 IO_HEADZEROFILL
| IO_NOZERODIRTY
);
1269 cp
->c_flag
|= C_MODIFIED
;
1271 (void) cluster_push(vp
);
1273 CLR(vp
->v_flag
, VNOCACHE_DATA
);
1274 cp
->c_flag
&= ~C_ZFWANTSYNC
;
1275 cp
->c_zftimeout
= 0;
1279 * Flush all dirty buffers associated with a vnode.
1283 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1284 nbp
= bp
->b_vnbufs
.le_next
;
1285 if ((bp
->b_flags
& B_BUSY
))
1287 if ((bp
->b_flags
& B_DELWRI
) == 0)
1288 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp
, hfsmp
);
1290 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1291 if ((bp
->b_flags
& B_META
) == 0) {
1292 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1295 // if journal_active() returns >= 0 then the journal is ok and we
1296 // shouldn't do anything to this locked block (because it is part
1297 // of a transaction). otherwise we'll just go through the normal
1298 // code path and flush the buffer.
1299 if (journal_active(hfsmp
->jnl
) >= 0) {
1305 bp
->b_flags
|= B_BUSY
;
1306 /* Clear B_LOCKED, should only be set on meta files */
1307 bp
->b_flags
&= ~B_LOCKED
;
1311 * Wait for I/O associated with indirect blocks to complete,
1312 * since there is no way to quickly wait for them below.
1314 if (bp
->b_vp
== vp
|| ap
->a_waitfor
== MNT_NOWAIT
)
1317 (void) VOP_BWRITE(bp
);
1322 while (vp
->v_numoutput
) {
1323 vp
->v_flag
|= VBWAIT
;
1324 tsleep((caddr_t
)&vp
->v_numoutput
, PRIBIO
+ 1, "hfs_fsync", 0);
1327 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1329 if (hfsmp
->jnl
== NULL
&& vp
->v_dirtyblkhd
.lh_first
) {
1330 /* still have some dirty buffers */
1332 vprint("hfs_fsync: dirty", vp
);
1335 * Looks like the requests are not
1336 * getting queued to the driver.
1337 * Retrying here causes a cpu bound loop.
1338 * Yield to the other threads and hope
1341 (void)tsleep((caddr_t
)&vp
->v_numoutput
,
1342 PRIBIO
+ 1, "hfs_fsync", hz
/10);
1355 if (vp
->v_flag
& VSYSTEM
) {
1356 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
)
1357 BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1358 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1359 } else /* User file */ {
1360 retval
= VOP_UPDATE(ap
->a_vp
, &tv
, &tv
, wait
);
1362 /* When MNT_WAIT is requested push out any delayed meta data */
1363 if ((retval
== 0) && wait
&& cp
->c_hint
&&
1364 !ISSET(cp
->c_flag
, C_DELETED
| C_NOEXISTS
)) {
1365 hfs_metasync(VTOHFS(vp
), cp
->c_hint
, ap
->a_p
);
1368 // make sure that we've really been called from the user
1369 // fsync() and if so push out any pending transactions
1370 // that this file might is a part of (and get them on
1372 if (vp
->v_flag
& VFULLFSYNC
) {
1374 journal_flush(hfsmp
->jnl
);
1376 VOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, NOCRED
, ap
->a_p
);
1384 /* Sync an hfs catalog b-tree node */
1386 hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
)
1393 vp
= HFSTOVCB(hfsmp
)->catalogRefNum
;
1395 // XXXdbg - don't need to do this on a journaled volume
1400 if (hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
) != 0)
1404 * Look for a matching node that has been delayed
1405 * but is not part of a set (B_LOCKED).
1408 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1409 nbp
= bp
->b_vnbufs
.le_next
;
1410 if (bp
->b_flags
& B_BUSY
)
1412 if (bp
->b_lblkno
== node
) {
1413 if (bp
->b_flags
& B_LOCKED
)
1417 bp
->b_flags
|= B_BUSY
;
1419 (void) VOP_BWRITE(bp
);
1425 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1432 hfs_btsync(struct vnode
*vp
, int sync_transaction
)
1434 struct cnode
*cp
= VTOC(vp
);
1435 register struct buf
*bp
;
1438 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1442 * Flush all dirty buffers associated with b-tree.
1447 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1448 nbp
= bp
->b_vnbufs
.le_next
;
1449 if ((bp
->b_flags
& B_BUSY
))
1451 if ((bp
->b_flags
& B_DELWRI
) == 0)
1452 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp
, hfsmp
);
1455 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1456 if ((bp
->b_flags
& B_META
) == 0) {
1457 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1460 // if journal_active() returns >= 0 then the journal is ok and we
1461 // shouldn't do anything to this locked block (because it is part
1462 // of a transaction). otherwise we'll just go through the normal
1463 // code path and flush the buffer.
1464 if (journal_active(hfsmp
->jnl
) >= 0) {
1469 if (sync_transaction
&& !(bp
->b_flags
& B_LOCKED
))
1473 bp
->b_flags
|= B_BUSY
;
1474 bp
->b_flags
&= ~B_LOCKED
;
1485 if ((vp
->v_flag
& VSYSTEM
) && (VTOF(vp
)->fcbBTCBPtr
!= NULL
))
1486 (void) BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1487 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1493 * Rmdir system call.
1498 IN WILLRELE struct vnode *dvp;
1499 IN WILLRELE struct vnode *vp;
1500 IN struct componentname *cnp;
1505 struct vop_rmdir_args
/* {
1506 struct vnode *a_dvp;
1508 struct componentname *a_cnp;
1511 return (hfs_removedir(ap
->a_dvp
, ap
->a_vp
, ap
->a_cnp
, 0));
1518 hfs_removedir(dvp
, vp
, cnp
, options
)
1521 struct componentname
*cnp
;
1524 struct proc
*p
= cnp
->cn_proc
;
1527 struct hfsmount
* hfsmp
;
1529 cat_cookie_t cookie
= {0};
1530 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
1539 return (EINVAL
); /* cannot remove "." */
1543 (void)hfs_getinoquota(cp
);
1546 hfs_global_shared_lock_acquire(hfsmp
);
1549 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1555 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1557 * Reserve some space in the Catalog file.
1559 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
1565 * Verify the directory is empty (and valid).
1566 * (Rmdir ".." won't be valid since
1567 * ".." will contain a reference to
1568 * the current directory and thus be
1571 if (cp
->c_entries
!= 0) {
1575 if ((dcp
->c_flags
& APPEND
) || (cp
->c_flags
& (IMMUTABLE
| APPEND
))) {
1580 /* Remove the entry from the namei cache: */
1583 /* Lock catalog b-tree */
1584 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1585 if (error
) goto out
;
1587 if (cp
->c_entries
> 0)
1588 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1589 /* Remove entry from catalog */
1590 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1592 /* Unlock catalog b-tree */
1593 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1594 if (error
) goto out
;
1597 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
1600 /* The parent lost a child */
1601 if (dcp
->c_entries
> 0)
1603 if (dcp
->c_nlink
> 0)
1605 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
1607 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
1608 HFS_KNOTE(dvp
, NOTE_WRITE
| NOTE_LINK
);
1610 hfs_volupdate(hfsmp
, VOL_RMDIR
, (dcp
->c_cnid
== kHFSRootFolderID
));
1612 cp
->c_mode
= 0; /* Makes the vnode go away...see inactive */
1613 cp
->c_flag
|= C_NOEXISTS
;
1615 if (!(options
& HFSRM_PARENT_LOCKED
)) {
1618 HFS_KNOTE(vp
, NOTE_DELETE
);
1621 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1622 cat_postflight(hfsmp
, &cookie
, p
);
1626 journal_end_transaction(hfsmp
->jnl
);
1629 hfs_global_shared_lock_release(hfsmp
);
1641 IN WILLRELE struct vnode *dvp;
1642 IN WILLRELE struct vnode *vp;
1643 IN struct componentname *cnp;
1649 struct vop_remove_args
/* {
1650 struct vnode *a_dvp;
1652 struct componentname *a_cnp;
1655 return (hfs_removefile(ap
->a_dvp
, ap
->a_vp
, ap
->a_cnp
, 0));
1663 * Similar to hfs_remove except there are additional options.
1666 hfs_removefile(dvp
, vp
, cnp
, options
)
1669 struct componentname
*cnp
;
1672 struct vnode
*rvp
= NULL
;
1675 struct hfsmount
*hfsmp
;
1676 struct proc
*p
= cnp
->cn_proc
;
1677 int dataforkbusy
= 0;
1678 int rsrcforkbusy
= 0;
1681 cat_cookie_t cookie
= {0};
1683 int started_tr
= 0, grabbed_lock
= 0;
1684 int refcount
, isbigfile
= 0;
1686 /* Directories should call hfs_rmdir! */
1687 if (vp
->v_type
== VDIR
) {
1696 if (cp
->c_parentcnid
!= dcp
->c_cnid
) {
1701 /* Make sure a remove is permitted */
1702 if ((cp
->c_flags
& (IMMUTABLE
| APPEND
)) ||
1703 (VTOC(dvp
)->c_flags
& APPEND
) ||
1704 VNODE_IS_RSRC(vp
)) {
1710 * Aquire a vnode for a non-empty resource fork.
1711 * (needed for VOP_TRUNCATE)
1713 if (cp
->c_blocks
- VTOF(vp
)->ff_blocks
) {
1714 error
= hfs_vgetrsrc(hfsmp
, vp
, &rvp
, p
);
1719 // XXXdbg - don't allow deleting the journal or journal_info_block
1720 if (hfsmp
->jnl
&& cp
->c_datafork
) {
1721 struct HFSPlusExtentDescriptor
*extd
;
1723 extd
= &cp
->c_datafork
->ff_extents
[0];
1724 if (extd
->startBlock
== HFSTOVCB(hfsmp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1731 * Check if this file is being used.
1733 * The namei done for the remove took a reference on the
1734 * vnode (vp). And we took a ref on the resource vnode (rvp).
1735 * Hence set 1 in the tookref parameter of ubc_isinuse().
1737 if (VTOC(vp
)->c_flag
& C_VPREFHELD
) {
1742 if (UBCISVALID(vp
) && ubc_isinuse(vp
, refcount
))
1744 if (rvp
&& UBCISVALID(rvp
) && ubc_isinuse(rvp
, 1))
1747 // need this to check if we have to break the deletion
1748 // into multiple pieces
1749 isbigfile
= (VTOC(vp
)->c_datafork
->ff_size
>= HFS_BIGFILE_SIZE
);
1752 * Carbon semantics prohibit deleting busy files.
1753 * (enforced when NODELETEBUSY is requested)
1755 if ((dataforkbusy
|| rsrcforkbusy
) &&
1756 ((cnp
->cn_flags
& NODELETEBUSY
) ||
1757 (hfsmp
->hfs_privdir_desc
.cd_cnid
== 0))) {
1763 (void)hfs_getinoquota(cp
);
1767 hfs_global_shared_lock_acquire(hfsmp
);
1770 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1776 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1778 * Reserve some space in the Catalog file.
1780 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
1785 /* Remove our entry from the namei cache. */
1788 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1789 if (hfsmp
->jnl
&& vp
->v_type
== VLNK
&& vp
->v_dirtyblkhd
.lh_first
) {
1790 struct buf
*bp
, *nbp
;
1793 for (bp
=vp
->v_dirtyblkhd
.lh_first
; bp
; bp
=nbp
) {
1794 nbp
= bp
->b_vnbufs
.le_next
;
1796 if ((bp
->b_flags
& B_BUSY
)) {
1797 // if it was busy, someone else must be dealing
1798 // with it so just move on.
1802 if (!(bp
->b_flags
& B_META
)) {
1803 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp
);
1806 // if it's part of the current transaction, kill it.
1807 if (bp
->b_flags
& B_LOCKED
) {
1809 bp
->b_flags
|= B_BUSY
;
1810 journal_kill_block(hfsmp
->jnl
, bp
);
1818 * Truncate any non-busy forks. Busy forks will
1819 * get trucated when their vnode goes inactive.
1821 * (Note: hard links are truncated in VOP_INACTIVE)
1823 if ((cp
->c_flag
& C_HARDLINK
) == 0) {
1824 int mode
= cp
->c_mode
;
1826 if (!dataforkbusy
&& !isbigfile
&& cp
->c_datafork
->ff_blocks
!= 0) {
1827 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1828 error
= VOP_TRUNCATE(vp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1834 if (!rsrcforkbusy
&& rvp
) {
1835 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1836 error
= VOP_TRUNCATE(rvp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1844 * There are 3 remove cases to consider:
1845 * 1. File is a hardlink ==> remove the link
1846 * 2. File is busy (in use) ==> move/rename the file
1847 * 3. File is not in use ==> remove the file
1850 if (cp
->c_flag
& C_HARDLINK
) {
1851 struct cat_desc desc
;
1853 if ((cnp
->cn_flags
& HASBUF
) == 0 ||
1854 cnp
->cn_nameptr
[0] == '\0') {
1855 error
= ENOENT
; /* name missing! */
1859 /* Setup a descriptor for the link */
1860 bzero(&desc
, sizeof(desc
));
1861 desc
.cd_nameptr
= cnp
->cn_nameptr
;
1862 desc
.cd_namelen
= cnp
->cn_namelen
;
1863 desc
.cd_parentcnid
= dcp
->c_cnid
;
1864 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1865 desc
.cd_cnid
= cp
->c_cnid
;
1867 /* Lock catalog b-tree */
1868 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1872 /* Delete the link record */
1873 error
= cat_delete(hfsmp
, &desc
, &cp
->c_attr
);
1875 if ((error
== 0) && (--cp
->c_nlink
< 1)) {
1878 struct cat_desc to_desc
;
1879 struct cat_desc from_desc
;
1882 * This is now esentially an open deleted file.
1883 * Rename it to reflect this state which makes
1884 * orphan file cleanup easier (see hfs_remove_orphans).
1885 * Note: a rename failure here is not fatal.
1887 MAKE_INODE_NAME(inodename
, cp
->c_rdev
);
1888 bzero(&from_desc
, sizeof(from_desc
));
1889 from_desc
.cd_nameptr
= inodename
;
1890 from_desc
.cd_namelen
= strlen(inodename
);
1891 from_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1892 from_desc
.cd_flags
= 0;
1893 from_desc
.cd_cnid
= cp
->c_fileid
;
1895 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1896 bzero(&to_desc
, sizeof(to_desc
));
1897 to_desc
.cd_nameptr
= delname
;
1898 to_desc
.cd_namelen
= strlen(delname
);
1899 to_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1900 to_desc
.cd_flags
= 0;
1901 to_desc
.cd_cnid
= cp
->c_fileid
;
1903 (void) cat_rename(hfsmp
, &from_desc
, &hfsmp
->hfs_privdir_desc
,
1904 &to_desc
, (struct cat_desc
*)NULL
);
1905 cp
->c_flag
|= C_DELETED
;
1908 /* Unlock the Catalog */
1909 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1914 cp
->c_flag
|= C_CHANGE
;
1916 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1918 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
1920 } else if (dataforkbusy
|| rsrcforkbusy
|| isbigfile
) {
1922 struct cat_desc to_desc
;
1923 struct cat_desc todir_desc
;
1926 * Orphan this file (move to hidden directory).
1928 bzero(&todir_desc
, sizeof(todir_desc
));
1929 todir_desc
.cd_parentcnid
= 2;
1931 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1932 bzero(&to_desc
, sizeof(to_desc
));
1933 to_desc
.cd_nameptr
= delname
;
1934 to_desc
.cd_namelen
= strlen(delname
);
1935 to_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1936 to_desc
.cd_flags
= 0;
1937 to_desc
.cd_cnid
= cp
->c_cnid
;
1939 /* Lock catalog b-tree */
1940 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1944 error
= cat_rename(hfsmp
, &cp
->c_desc
, &todir_desc
,
1945 &to_desc
, (struct cat_desc
*)NULL
);
1947 // XXXdbg - only bump this count if we were successful
1949 hfsmp
->hfs_privdir_attr
.ca_entries
++;
1951 (void)cat_update(hfsmp
, &hfsmp
->hfs_privdir_desc
,
1952 &hfsmp
->hfs_privdir_attr
, NULL
, NULL
);
1954 /* Unlock the Catalog */
1955 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1956 if (error
) goto out
;
1958 cp
->c_flag
|= C_CHANGE
| C_DELETED
| C_NOEXISTS
;
1961 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1963 } else /* Not busy */ {
1965 if (cp
->c_blocks
> 0) {
1967 panic("hfs_remove: attempting to delete a non-empty file!");
1969 printf("hfs_remove: attempting to delete a non-empty file %s\n",
1970 cp
->c_desc
.cd_nameptr
);
1976 /* Lock catalog b-tree */
1977 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1981 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1983 if (error
&& error
!= ENXIO
&& error
!= ENOENT
&& truncated
) {
1984 if ((cp
->c_datafork
&& cp
->c_datafork
->ff_size
!= 0) ||
1985 (cp
->c_rsrcfork
&& cp
->c_rsrcfork
->ff_size
!= 0)) {
1986 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1987 error
, cp
->c_datafork
->ff_size
, cp
->c_rsrcfork
->ff_size
);
1989 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1990 cp
->c_desc
.cd_nameptr
, cp
->c_attr
.ca_fileid
, error
);
1994 /* Unlock the Catalog */
1995 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1996 if (error
) goto out
;
1999 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
2003 truncated
= 0; // because the catalog entry is gone
2004 cp
->c_flag
|= C_CHANGE
| C_NOEXISTS
;
2006 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
2010 * All done with this cnode's descriptor...
2012 * Note: all future catalog calls for this cnode must be
2013 * by fileid only. This is OK for HFS (which doesn't have
2014 * file thread records) since HFS doesn't support hard
2015 * links or the removal of busy files.
2017 cat_releasedesc(&cp
->c_desc
);
2019 /* In all three cases the parent lost a child */
2020 if (dcp
->c_entries
> 0)
2022 if (dcp
->c_nlink
> 0)
2024 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2026 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
2027 HFS_KNOTE(dvp
, NOTE_WRITE
);
2030 /* All done with component name... */
2031 if ((options
& HFSRM_SAVE_NAME
) == 0 &&
2033 (cnp
->cn_flags
& (HASBUF
| SAVENAME
)) == (HASBUF
| SAVENAME
)) {
2034 char *tmp
= cnp
->cn_pnbuf
;
2035 cnp
->cn_pnbuf
= NULL
;
2036 cnp
->cn_flags
&= ~HASBUF
;
2037 FREE_ZONE(tmp
, cnp
->cn_pnlen
, M_NAMEI
);
2040 if (!(options
& HFSRM_SKIP_RESERVE
)) {
2041 cat_postflight(hfsmp
, &cookie
, p
);
2044 /* Commit the truncation to the catalog record */
2046 cp
->c_flag
|= C_CHANGE
| C_UPDATE
| C_FORCEUPDATE
;
2048 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
2053 journal_end_transaction(hfsmp
->jnl
);
2056 hfs_global_shared_lock_release(hfsmp
);
2059 HFS_KNOTE(vp
, NOTE_DELETE
);
2061 HFS_KNOTE(rvp
, NOTE_DELETE
);
2068 VOP_UNLOCK(vp
, 0, p
);
2069 // XXXdbg - try to prevent the lost ubc_info panic
2070 if ((cp
->c_flag
& C_HARDLINK
) == 0 || cp
->c_nlink
== 0) {
2071 (void) ubc_uncache(vp
);
2075 if (!(options
& HFSRM_PARENT_LOCKED
)) {
2083 __private_extern__
void
2084 replace_desc(struct cnode
*cp
, struct cat_desc
*cdp
)
2086 /* First release allocated name buffer */
2087 if (cp
->c_desc
.cd_flags
& CD_HASBUF
&& cp
->c_desc
.cd_nameptr
!= 0) {
2088 char *name
= cp
->c_desc
.cd_nameptr
;
2090 cp
->c_desc
.cd_nameptr
= 0;
2091 cp
->c_desc
.cd_namelen
= 0;
2092 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
2095 bcopy(cdp
, &cp
->c_desc
, sizeof(cp
->c_desc
));
2097 /* Cnode now owns the name buffer */
2098 cdp
->cd_nameptr
= 0;
2099 cdp
->cd_namelen
= 0;
2100 cdp
->cd_flags
&= ~CD_HASBUF
;
2106 #% rename fdvp U U U
2108 #% rename tdvp L U U
2115 * The VFS layer guarantees that source and destination will
2116 * either both be directories, or both not be directories.
2118 * When the target is a directory, hfs_rename must ensure
2121 * The rename system call is responsible for freeing
2122 * the pathname buffers (ie no need to call VOP_ABORTOP).
2127 struct vop_rename_args
/* {
2128 struct vnode *a_fdvp;
2129 struct vnode *a_fvp;
2130 struct componentname *a_fcnp;
2131 struct vnode *a_tdvp;
2132 struct vnode *a_tvp;
2133 struct componentname *a_tcnp;
2136 struct vnode
*tvp
= ap
->a_tvp
;
2137 struct vnode
*tdvp
= ap
->a_tdvp
;
2138 struct vnode
*fvp
= ap
->a_fvp
;
2139 struct vnode
*fdvp
= ap
->a_fdvp
;
2140 struct componentname
*tcnp
= ap
->a_tcnp
;
2141 struct componentname
*fcnp
= ap
->a_fcnp
;
2142 struct proc
*p
= fcnp
->cn_proc
;
2143 struct cnode
*fcp
= NULL
;
2144 struct cnode
*fdcp
= NULL
;
2145 struct cnode
*tdcp
= VTOC(tdvp
);
2146 struct cat_desc from_desc
;
2147 struct cat_desc to_desc
;
2148 struct cat_desc out_desc
;
2149 struct hfsmount
*hfsmp
= NULL
;
2151 cat_cookie_t cookie
= {0};
2152 int fdvp_locked
, fvp_locked
, tdvp_locked
, tvp_locked
;
2154 int started_tr
= 0, grabbed_lock
= 0;
2158 /* Establish our vnode lock state. */
2160 tvp_locked
= (tvp
!= 0);
2166 * Check for cross-device rename.
2168 if ((fvp
->v_mount
!= tdvp
->v_mount
) ||
2169 (tvp
&& (fvp
->v_mount
!= tvp
->v_mount
))) {
2175 * When fvp matches tvp they must be case variants
2178 * In some cases tvp will be locked in other cases
2179 * it be unlocked with no reference. Normalize the
2180 * state here (unlocked with a reference) so that
2181 * we can exit in a known state.
2184 if (VOP_ISLOCKED(tvp
) &&
2185 (VTOC(tvp
)->c_lock
.lk_lockholder
== p
->p_pid
) &&
2186 (VTOC(tvp
)->c_lock
.lk_lockthread
== current_thread())) {
2193 * If this a hard link with different parents
2194 * and its not a case variant then keep tvp
2195 * around for removal.
2197 if ((VTOC(fvp
)->c_flag
& C_HARDLINK
) &&
2199 (hfs_namecmp(fcnp
->cn_nameptr
, fcnp
->cn_namelen
,
2200 tcnp
->cn_nameptr
, tcnp
->cn_namelen
) != 0))) {
2207 * The following edge case is caught here:
2208 * (to cannot be a descendent of from)
2221 if (tdcp
->c_parentcnid
== VTOC(fvp
)->c_cnid
) {
2227 * The following two edge cases are caught here:
2228 * (note tvp is not empty)
2241 if (tvp
&& (tvp
->v_type
== VDIR
) && (VTOC(tvp
)->c_entries
!= 0)) {
2247 * The following edge case is caught here:
2248 * (the from child and parent are the same)
2261 * Make sure "from" vnode and its parent are changeable.
2263 if ((VTOC(fvp
)->c_flags
& (IMMUTABLE
| APPEND
)) ||
2264 (VTOC(fdvp
)->c_flags
& APPEND
)) {
2269 hfsmp
= VTOHFS(tdvp
);
2272 * If the destination parent directory is "sticky", then the
2273 * user must own the parent directory, or the destination of
2274 * the rename, otherwise the destination may not be changed
2275 * (except by root). This implements append-only directories.
2277 * Note that checks for immutable and write access are done
2278 * by the call to VOP_REMOVE.
2280 if (tvp
&& (tdcp
->c_mode
& S_ISTXT
) &&
2281 (tcnp
->cn_cred
->cr_uid
!= 0) &&
2282 (tcnp
->cn_cred
->cr_uid
!= tdcp
->c_uid
) &&
2283 (hfs_owner_rights(hfsmp
, VTOC(tvp
)->c_uid
, tcnp
->cn_cred
, p
, false)) ) {
2290 (void)hfs_getinoquota(VTOC(tvp
));
2294 * Lock all the vnodes before starting a journal transaction.
2298 * Simple case (same parent) - just lock child (fvp).
2301 if (error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
))
2308 * If fdvp is the parent of tdvp then we'll need to
2309 * drop tdvp's lock before acquiring a lock on fdvp.
2321 * If the parent directories are unrelated then we'll
2322 * need to aquire their vnode locks in vnode address
2323 * order. Otherwise we can race with another rename
2324 * call that involves the same vnodes except that to
2325 * and from are switched and potentially deadlock.
2326 * [ie rename("a/b", "c/d") vs rename("c/d", "a/b")]
2328 * If its not either of the two above cases then we
2329 * can safely lock fdvp and fvp.
2331 if ((VTOC(fdvp
)->c_cnid
== VTOC(tdvp
)->c_parentcnid
) ||
2332 ((VTOC(tdvp
)->c_cnid
!= VTOC(fdvp
)->c_parentcnid
) &&
2335 /* Drop locks on tvp and tdvp */
2337 VOP_UNLOCK(tvp
, 0, p
);
2340 VOP_UNLOCK(tdvp
, 0, p
);
2343 /* Aquire locks in correct order */
2344 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2347 if ((error
= vn_lock(tdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2352 * Now that the parents are locked only one thread
2353 * can continue. So the lock order of the children
2354 * doesn't really matter
2357 if ((error
= vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2362 if ((error
= vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2366 if ((error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2371 } else /* OK to lock fdvp and fvp */ {
2372 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2375 if (error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
))
2388 * While fvp is still locked, purge it from the name cache and
2389 * grab it's c_cnid value. Note that the removal of tvp (below)
2390 * can drop fvp's lock when fvp == tvp.
2395 * When a file moves out of "Cleanup At Startup"
2396 * we can drop its NODUMP status.
2398 if ((fcp
->c_flags
& UF_NODUMP
) &&
2399 (fvp
->v_type
== VREG
) &&
2401 (fdcp
->c_desc
.cd_nameptr
!= NULL
) &&
2402 (strcmp(fdcp
->c_desc
.cd_nameptr
, CARBON_TEMP_DIR_NAME
) == 0)) {
2403 fcp
->c_flags
&= ~UF_NODUMP
;
2404 fcp
->c_flag
|= C_CHANGE
;
2406 (void) VOP_UPDATE(fvp
, &tv
, &tv
, 0);
2409 bzero(&from_desc
, sizeof(from_desc
));
2410 from_desc
.cd_nameptr
= fcnp
->cn_nameptr
;
2411 from_desc
.cd_namelen
= fcnp
->cn_namelen
;
2412 from_desc
.cd_parentcnid
= fdcp
->c_cnid
;
2413 from_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2414 from_desc
.cd_cnid
= fcp
->c_cnid
;
2416 bzero(&to_desc
, sizeof(to_desc
));
2417 to_desc
.cd_nameptr
= tcnp
->cn_nameptr
;
2418 to_desc
.cd_namelen
= tcnp
->cn_namelen
;
2419 to_desc
.cd_parentcnid
= tdcp
->c_cnid
;
2420 to_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2421 to_desc
.cd_cnid
= fcp
->c_cnid
;
2423 hfs_global_shared_lock_acquire(hfsmp
);
2426 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2433 * Reserve some space in the Catalog file.
2435 if ((error
= cat_preflight(hfsmp
, CAT_RENAME
+ CAT_DELETE
, &cookie
, p
))) {
2440 * If the destination exists then it needs to be removed.
2447 * Note that hfs_removedir and hfs_removefile
2448 * will keep tdvp locked with a reference.
2449 * But tvp will lose its lock and reference.
2451 if (tvp
->v_type
== VDIR
)
2452 error
= hfs_removedir(tdvp
, tvp
, tcnp
, HFSRM_RENAMEOPTS
);
2454 error
= hfs_removefile(tdvp
, tvp
, tcnp
, HFSRM_RENAMEOPTS
);
2466 * All done with tvp and fvp
2469 /* Lock catalog b-tree */
2470 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
2474 error
= cat_rename(hfsmp
, &from_desc
, &tdcp
->c_desc
, &to_desc
, &out_desc
);
2476 /* Unlock catalog b-tree */
2477 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2483 /* Update cnode's catalog descriptor */
2485 replace_desc(fcp
, &out_desc
);
2486 fcp
->c_parentcnid
= tdcp
->c_cnid
;
2490 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_RMDIR
: VOL_RMFILE
,
2491 (fdcp
->c_cnid
== kHFSRootFolderID
));
2492 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
2493 (tdcp
->c_cnid
== kHFSRootFolderID
));
2495 /* Update both parent directories. */
2500 if (fdcp
->c_nlink
> 0)
2502 if (fdcp
->c_entries
> 0)
2504 fdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2505 (void) VOP_UPDATE(fdvp
, &tv
, &tv
, 0);
2507 tdcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
2508 tdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2509 (void) VOP_UPDATE(tdvp
, &tv
, &tv
, 0);
2513 cat_postflight(hfsmp
, &cookie
, p
);
2516 journal_end_transaction(hfsmp
->jnl
);
2519 hfs_global_shared_lock_release(hfsmp
);
2522 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2523 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2526 HFS_KNOTE(fvp
, NOTE_RENAME
);
2527 HFS_KNOTE(fdvp
, NOTE_WRITE
);
2528 if (tdvp
!= fdvp
) HFS_KNOTE(tdvp
, NOTE_WRITE
);
2531 VOP_UNLOCK(fvp
, 0, p
);
2534 VOP_UNLOCK(fdvp
, 0, p
);
2537 VOP_UNLOCK(tdvp
, 0, p
);
2540 VOP_UNLOCK(tvp
, 0, p
);
2549 /* After tvp is removed the only acceptable error is EIO */
2550 if (error
&& tvp_deleted
)
2564 IN WILLRELE struct vnode *dvp;
2565 OUT struct vnode **vpp;
2566 IN struct componentname *cnp;
2567 IN struct vattr *vap;
2569 We are responsible for freeing the namei buffer,
2570 it is done in hfs_makenode()
2575 struct vop_mkdir_args
/* {
2576 struct vnode *a_dvp;
2577 struct vnode **a_vpp;
2578 struct componentname *a_cnp;
2579 struct vattr *a_vap;
2582 struct vattr
*vap
= ap
->a_vap
;
2584 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
2585 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
2590 * symlink -- make a symbolic link
2591 #% symlink dvp L U U
2592 #% symlink vpp - U -
2594 # XXX - note that the return vnode has already been VRELE'ed
2595 # by the filesystem layer. To use it you must use vget,
2596 # possibly with a further namei.
2599 IN WILLRELE struct vnode *dvp;
2600 OUT WILLRELE struct vnode **vpp;
2601 IN struct componentname *cnp;
2602 IN struct vattr *vap;
2605 We are responsible for freeing the namei buffer,
2606 it is done in hfs_makenode().
2612 struct vop_symlink_args
/* {
2613 struct vnode *a_dvp;
2614 struct vnode **a_vpp;
2615 struct componentname *a_cnp;
2616 struct vattr *a_vap;
2620 register struct vnode
*vp
, **vpp
= ap
->a_vpp
;
2621 struct hfsmount
*hfsmp
;
2622 struct filefork
*fp
;
2624 struct buf
*bp
= NULL
;
2626 /* HFS standard disks don't support symbolic links */
2627 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
2628 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2630 return (EOPNOTSUPP
);
2633 /* Check for empty target name */
2634 if (ap
->a_target
[0] == 0) {
2635 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2641 hfsmp
= VTOHFS(ap
->a_dvp
);
2643 /* Create the vnode */
2644 if ((error
= hfs_makenode(S_IFLNK
| ap
->a_vap
->va_mode
,
2645 ap
->a_dvp
, vpp
, ap
->a_cnp
))) {
2650 len
= strlen(ap
->a_target
);
2654 (void)hfs_getinoquota(VTOC(vp
));
2658 hfs_global_shared_lock_acquire(hfsmp
);
2660 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2661 hfs_global_shared_lock_release(hfsmp
);
2667 /* Allocate space for the link */
2668 error
= VOP_TRUNCATE(vp
, len
, IO_NOZEROFILL
,
2669 ap
->a_cnp
->cn_cred
, ap
->a_cnp
->cn_proc
);
2671 goto out
; /* XXX need to remove link */
2673 /* Write the link to disk */
2674 bp
= getblk(vp
, 0, roundup((int)fp
->ff_size
, VTOHFS(vp
)->hfs_phys_block_size
),
2677 journal_modify_block_start(hfsmp
->jnl
, bp
);
2679 bzero(bp
->b_data
, bp
->b_bufsize
);
2680 bcopy(ap
->a_target
, bp
->b_data
, len
);
2682 journal_modify_block_end(hfsmp
->jnl
, bp
);
2688 journal_end_transaction(hfsmp
->jnl
);
2690 hfs_global_shared_lock_release(hfsmp
);
2697 * Dummy dirents to simulate the "." and ".." entries of the directory
2698 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2699 * the size of these entries is the smallest needed to represent them
2700 * (only 12 byte each).
2702 static hfsdotentry rootdots
[2] = {
2705 sizeof(struct hfsdotentry
), /* d_reclen */
2706 DT_DIR
, /* d_type */
2712 sizeof(struct hfsdotentry
), /* d_reclen */
2713 DT_DIR
, /* d_type */
2720 * There is some confusion as to what the semantics of uio_offset are.
2721 * In ufs, it represents the actual byte offset within the directory
2722 * "file." HFS, however, just uses it as an entry counter - essentially
2723 * assuming that it has no meaning except to the hfs_readdir function.
2724 * This approach would be more efficient here, but some callers may
2725 * assume the uio_offset acts like a byte offset. NFS in fact
2726 * monkeys around with the offset field a lot between readdir calls.
2728 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2729 * fields is a mess as well. The libc function readdir() returns
2730 * NULL (indicating the end of a directory) when either
2731 * the getdirentries() syscall (which calls this and returns
2732 * the size of the buffer passed in less the value of uiop->uio_resid)
2733 * returns 0, or a direct record with a d_reclen of zero.
2734 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2735 * of the directory by testing uiop->uio_resid == 0. The solution
2736 * is to pad the size of the last struct direct in a given
2737 * block to fill the block if we are not at the end of the directory.
2742 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2743 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2744 * "Nbytes must be greater than or equal to the block size associated with the file,
2745 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2746 * room for the . and .. entries that have to added manually.
2753 IN struct vnode *vp;
2754 INOUT struct uio *uio;
2755 IN struct ucred *cred;
2758 INOUT u_long **cookies;
2762 struct vop_readdir_args
/* {
2771 register struct uio
*uio
= ap
->a_uio
;
2772 struct cnode
*cp
= VTOC(ap
->a_vp
);
2773 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
2774 struct proc
*p
= current_proc();
2775 off_t off
= uio
->uio_offset
;
2778 void *user_start
= NULL
;
2782 u_long
*cookies
=NULL
;
2783 u_long
*cookiep
=NULL
;
2785 /* We assume it's all one big buffer... */
2786 if (uio
->uio_iovcnt
> 1 || uio
->uio_resid
< AVERAGE_HFSDIRENTRY_SIZE
)
2790 // We have to lock the user's buffer here so that we won't
2791 // fault on it after we've acquired a shared lock on the
2792 // catalog file. The issue is that you can get a 3-way
2793 // deadlock if someone else starts a transaction and then
2794 // tries to lock the catalog file but can't because we're
2795 // here and we can't service our page fault because VM is
2796 // blocked trying to start a transaction as a result of
2797 // trying to free up pages for our page fault. It's messy
2798 // but it does happen on dual-procesors that are paging
2799 // heavily (see radar 3082639 for more info). By locking
2800 // the buffer up-front we prevent ourselves from faulting
2801 // while holding the shared catalog file lock.
2803 // Fortunately this and hfs_search() are the only two places
2804 // currently (10/30/02) that can fault on user data with a
2805 // shared lock on the catalog file.
2807 if (hfsmp
->jnl
&& uio
->uio_segflg
== UIO_USERSPACE
) {
2808 user_start
= uio
->uio_iov
->iov_base
;
2809 user_len
= uio
->uio_iov
->iov_len
;
2811 if ((retval
= vslock(user_start
, user_len
)) != 0) {
2816 /* Create the entries for . and .. */
2817 if (uio
->uio_offset
< sizeof(rootdots
)) {
2821 rootdots
[0].d_fileno
= cp
->c_cnid
;
2822 rootdots
[1].d_fileno
= cp
->c_parentcnid
;
2824 if (uio
->uio_offset
== 0) {
2825 dep
= (caddr_t
) &rootdots
[0];
2826 dotsize
= 2* sizeof(struct hfsdotentry
);
2827 } else if (uio
->uio_offset
== sizeof(struct hfsdotentry
)) {
2828 dep
= (caddr_t
) &rootdots
[1];
2829 dotsize
= sizeof(struct hfsdotentry
);
2835 retval
= uiomove(dep
, dotsize
, uio
);
2840 if (ap
->a_ncookies
!= NULL
) {
2842 * These cookies are handles that allow NFS to restart
2843 * scanning through a directory. If a directory is large
2844 * enough, NFS will issue a successive readdir() with a
2845 * uio->uio_offset that is equal to one of these cookies.
2847 * The cookies that we generate are synthesized byte-offsets.
2848 * The offset is where the dirent the dirent would be if the
2849 * directory were an array of packed dirent structs. It is
2850 * synthetic because that's not how directories are stored in
2851 * HFS but other code expects that the cookie is a byte offset.
2853 * We have to pre-allocate the cookies because cat_getdirentries()
2854 * is the only one that can properly synthesize the offsets (since
2855 * it may have to skip over entries and only it knows the true
2856 * virtual offset of any particular directory entry). So we allocate
2857 * a cookie table here and pass it in to cat_getdirentries().
2859 * Note that the handling of "." and ".." is mostly done here but
2860 * cat_getdirentries() is aware of.
2862 * Only the NFS server uses cookies so fortunately this code is
2863 * not executed unless the NFS server is issuing the readdir
2866 * Also note that the NFS server is the one responsible for
2867 * free'ing the cookies even though we allocated them. Ick.
2869 * We allocate a reasonable number of entries for the size of
2870 * the buffer that we're going to fill in. cat_getdirentries()
2871 * is smart enough to not overflow if there's more room in the
2872 * buffer but not enough room in the cookie table.
2874 if (uio
->uio_segflg
!= UIO_SYSSPACE
)
2875 panic("hfs_readdir: unexpected uio from NFS server");
2877 ncookies
= uio
->uio_iov
->iov_len
/ (AVERAGE_HFSDIRENTRY_SIZE
/2);
2878 MALLOC(cookies
, u_long
*, ncookies
* sizeof(u_long
), M_TEMP
, M_WAITOK
);
2880 *ap
->a_ncookies
= ncookies
;
2881 *ap
->a_cookies
= cookies
;
2883 /* handle cookies for "." and ".." */
2886 cookies
[1] = sizeof(struct hfsdotentry
);
2887 } else if (off
== sizeof(struct hfsdotentry
)) {
2888 cookies
[0] = sizeof(struct hfsdotentry
);
2892 /* If there are no children then we're done */
2893 if (cp
->c_entries
== 0) {
2898 cookies
[1] = sizeof(struct hfsdotentry
);
2903 /* Lock catalog b-tree */
2904 retval
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
2905 if (retval
) goto Exit
;
2907 retval
= cat_getdirentries(hfsmp
, &cp
->c_desc
, cp
->c_entries
, uio
, &eofflag
, cookies
, ncookies
);
2909 /* Unlock catalog b-tree */
2910 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2912 if (retval
!= E_NONE
) {
2916 /* were we already past eof ? */
2917 if (uio
->uio_offset
== off
) {
2922 cp
->c_flag
|= C_ACCESS
;
2925 if (hfsmp
->jnl
&& user_start
) {
2926 vsunlock(user_start
, user_len
, TRUE
);
2930 *ap
->a_eofflag
= eofflag
;
2937 * Return target name of a symbolic link
2938 #% readlink vp L L L
2941 IN struct vnode *vp;
2942 INOUT struct uio *uio;
2943 IN struct ucred *cred;
2948 struct vop_readlink_args
/* {
2951 struct ucred *a_cred;
2955 struct vnode
*vp
= ap
->a_vp
;
2957 struct filefork
*fp
;
2959 if (vp
->v_type
!= VLNK
)
2965 /* Zero length sym links are not allowed */
2966 if (fp
->ff_size
== 0 || fp
->ff_size
> MAXPATHLEN
) {
2967 VTOVCB(vp
)->vcbFlags
|= kHFS_DamagedVolume
;
2971 /* Cache the path so we don't waste buffer cache resources */
2972 if (fp
->ff_symlinkptr
== NULL
) {
2973 struct buf
*bp
= NULL
;
2975 MALLOC(fp
->ff_symlinkptr
, char *, fp
->ff_size
, M_TEMP
, M_WAITOK
);
2976 retval
= meta_bread(vp
, 0,
2977 roundup((int)fp
->ff_size
,
2978 VTOHFS(vp
)->hfs_phys_block_size
),
2983 if (fp
->ff_symlinkptr
) {
2984 FREE(fp
->ff_symlinkptr
, M_TEMP
);
2985 fp
->ff_symlinkptr
= NULL
;
2989 bcopy(bp
->b_data
, fp
->ff_symlinkptr
, (size_t)fp
->ff_size
);
2991 if (VTOHFS(vp
)->jnl
&& (bp
->b_flags
& B_LOCKED
) == 0) {
2992 bp
->b_flags
|= B_INVAL
; /* data no longer needed */
2997 retval
= uiomove((caddr_t
)fp
->ff_symlinkptr
, (int)fp
->ff_size
, ap
->a_uio
);
3000 * Keep track blocks read
3002 if ((VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) && (retval
== 0)) {
3005 * If this file hasn't been seen since the start of
3006 * the current sampling period then start over.
3008 if (cp
->c_atime
< VTOHFS(vp
)->hfc_timebase
)
3009 VTOF(vp
)->ff_bytesread
= fp
->ff_size
;
3011 VTOF(vp
)->ff_bytesread
+= fp
->ff_size
;
3013 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3014 // cp->c_flag |= C_ACCESS;
3021 * Lock an cnode. If its already locked, set the WANT bit and sleep.
3025 IN struct vnode *vp;
3032 struct vop_lock_args
/* {
3038 struct vnode
*vp
= ap
->a_vp
;
3039 struct cnode
*cp
= VTOC(vp
);
3041 return (lockmgr(&cp
->c_lock
, ap
->a_flags
, &vp
->v_interlock
, ap
->a_p
));
3049 IN struct vnode *vp;
3056 struct vop_unlock_args
/* {
3062 struct vnode
*vp
= ap
->a_vp
;
3063 struct cnode
*cp
= VTOC(vp
);
3065 if (!lockstatus(&cp
->c_lock
)) {
3066 printf("hfs_unlock: vnode %s wasn't locked!\n",
3067 cp
->c_desc
.cd_nameptr
? cp
->c_desc
.cd_nameptr
: "");
3070 return (lockmgr(&cp
->c_lock
, ap
->a_flags
| LK_RELEASE
,
3071 &vp
->v_interlock
, ap
->a_p
));
3076 * Print out the contents of a cnode.
3080 IN struct vnode *vp;
3084 struct vop_print_args
/* {
3088 struct vnode
* vp
= ap
->a_vp
;
3089 struct cnode
*cp
= VTOC(vp
);
3091 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp
->c_cnid
,
3092 major(cp
->c_dev
), minor(cp
->c_dev
));
3094 if (vp
->v_type
== VFIFO
)
3097 lockmgr_printinfo(&cp
->c_lock
);
3104 * Check for a locked cnode.
3105 #% islocked vp = = =
3108 IN struct vnode *vp;
3113 struct vop_islocked_args
/* {
3117 return (lockstatus(&VTOC(ap
->a_vp
)->c_lock
));
3122 #% pathconf vp L L L
3125 IN struct vnode *vp;
3127 OUT register_t *retval;
3132 struct vop_pathconf_args
/* {
3140 switch (ap
->a_name
) {
3142 if (VTOVCB(ap
->a_vp
)->vcbSigWord
== kHFSPlusSigWord
)
3143 *ap
->a_retval
= HFS_LINK_MAX
;
3148 *ap
->a_retval
= kHFSPlusMaxFileNameBytes
; /* max # of characters x max utf8 representation */
3151 *ap
->a_retval
= PATH_MAX
; /* 1024 */
3154 *ap
->a_retval
= PIPE_BUF
;
3156 case _PC_CHOWN_RESTRICTED
:
3162 case _PC_NAME_CHARS_MAX
:
3163 *ap
->a_retval
= kHFSPlusMaxFileNameChars
;
3165 case _PC_CASE_SENSITIVE
:
3166 if (VTOHFS(ap
->a_vp
)->hfs_flags
& HFS_CASE_SENSITIVE
)
3171 case _PC_CASE_PRESERVING
:
3183 * Advisory record locking support
3187 IN struct vnode *vp;
3190 IN struct flock *fl;
3196 struct vop_advlock_args
/* {
3204 struct vnode
*vp
= ap
->a_vp
;
3205 struct flock
*fl
= ap
->a_fl
;
3206 struct hfslockf
*lock
;
3207 struct filefork
*fork
;
3211 /* Only regular files can have locks */
3212 if (vp
->v_type
!= VREG
)
3215 fork
= VTOF(ap
->a_vp
);
3217 * Avoid the common case of unlocking when cnode has no locks.
3219 if (fork
->ff_lockf
== (struct hfslockf
*)0) {
3220 if (ap
->a_op
!= F_SETLK
) {
3221 fl
->l_type
= F_UNLCK
;
3226 * Convert the flock structure into a start and end.
3229 switch (fl
->l_whence
) {
3233 * Caller is responsible for adding any necessary offset
3234 * when SEEK_CUR is used.
3236 start
= fl
->l_start
;
3239 start
= fork
->ff_size
+ fl
->l_start
;
3247 else if (fl
->l_len
> 0)
3248 end
= start
+ fl
->l_len
- 1;
3249 else { /* l_len is negative */
3257 * Create the hfslockf structure
3259 MALLOC(lock
, struct hfslockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
3260 lock
->lf_start
= start
;
3262 lock
->lf_id
= ap
->a_id
;
3263 lock
->lf_fork
= fork
;
3264 lock
->lf_type
= fl
->l_type
;
3265 lock
->lf_next
= (struct hfslockf
*)0;
3266 TAILQ_INIT(&lock
->lf_blkhd
);
3267 lock
->lf_flags
= ap
->a_flags
;
3269 * Do the requested operation.
3273 retval
= hfs_setlock(lock
);
3276 retval
= hfs_clearlock(lock
);
3277 FREE(lock
, M_LOCKF
);
3280 retval
= hfs_getlock(lock
, fl
);
3281 FREE(lock
, M_LOCKF
);
3285 _FREE(lock
, M_LOCKF
);
3295 * Update the access, modified, and node change times as specified
3296 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3297 * C_MODIFIED flag is used to specify that the node needs to be
3298 * updated but that the times have already been set. The access and
3299 * modified times are input parameters but the node change time is
3300 * always taken from the current time. If waitfor is set, then wait
3301 * for the disk write of the node to complete.
3305 IN struct vnode *vp;
3306 IN struct timeval *access;
3307 IN struct timeval *modify;
3312 struct vop_update_args
/* {
3314 struct timeval *a_access;
3315 struct timeval *a_modify;
3319 struct vnode
*vp
= ap
->a_vp
;
3320 struct cnode
*cp
= VTOC(ap
->a_vp
);
3322 struct cat_fork
*dataforkp
= NULL
;
3323 struct cat_fork
*rsrcforkp
= NULL
;
3324 struct cat_fork datafork
;
3326 struct hfsmount
*hfsmp
;
3331 /* XXX do we really want to clear the sytem cnode flags here???? */
3332 if (((vp
->v_flag
& VSYSTEM
) && (cp
->c_cnid
< kHFSFirstUserCatalogNodeID
))||
3333 (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) ||
3334 (cp
->c_mode
== 0)) {
3335 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
3339 updateflag
= cp
->c_flag
& (C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
| C_FORCEUPDATE
);
3341 /* Nothing to update. */
3342 if (updateflag
== 0) {
3345 /* HFS standard doesn't have access times. */
3346 if ((updateflag
== C_ACCESS
) && (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
)) {
3349 if (updateflag
& C_ACCESS
) {
3351 * When the access time is the only thing changing
3352 * then make sure its sufficiently newer before
3353 * committing it to disk.
3355 if ((updateflag
== C_ACCESS
) &&
3356 (ap
->a_access
->tv_sec
< (cp
->c_atime
+ ATIME_ONDISK_ACCURACY
))) {
3359 cp
->c_atime
= ap
->a_access
->tv_sec
;
3361 if (updateflag
& C_UPDATE
) {
3362 cp
->c_mtime
= ap
->a_modify
->tv_sec
;
3363 cp
->c_mtime_nsec
= ap
->a_modify
->tv_usec
* 1000;
3365 if (updateflag
& C_CHANGE
) {
3366 cp
->c_ctime
= time
.tv_sec
;
3368 * HFS dates that WE set must be adjusted for DST
3370 if ((VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3371 cp
->c_ctime
+= 3600;
3372 cp
->c_mtime
= cp
->c_ctime
;
3377 dataforkp
= &cp
->c_datafork
->ff_data
;
3379 rsrcforkp
= &cp
->c_rsrcfork
->ff_data
;
3384 * For delayed allocations updates are
3385 * postponed until an fsync or the file
3386 * gets written to disk.
3388 * Deleted files can defer meta data updates until inactive.
3390 * If we're ever called with the C_FORCEUPDATE flag though
3391 * we have to do the update.
3393 if (ISSET(cp
->c_flag
, C_FORCEUPDATE
) == 0 &&
3394 (ISSET(cp
->c_flag
, C_DELETED
) ||
3395 (dataforkp
&& cp
->c_datafork
->ff_unallocblocks
) ||
3396 (rsrcforkp
&& cp
->c_rsrcfork
->ff_unallocblocks
))) {
3397 if (updateflag
& (C_CHANGE
| C_UPDATE
))
3398 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3399 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_UPDATE
);
3400 cp
->c_flag
|= C_MODIFIED
;
3402 HFS_KNOTE(vp
, NOTE_ATTRIB
);
3409 hfs_global_shared_lock_acquire(hfsmp
);
3411 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3412 hfs_global_shared_lock_release(hfsmp
);
3419 * For files with invalid ranges (holes) the on-disk
3420 * field representing the size of the file (cf_size)
3421 * must be no larger than the start of the first hole.
3423 if (dataforkp
&& !CIRCLEQ_EMPTY(&cp
->c_datafork
->ff_invalidranges
)) {
3424 bcopy(dataforkp
, &datafork
, sizeof(datafork
));
3425 datafork
.cf_size
= CIRCLEQ_FIRST(&cp
->c_datafork
->ff_invalidranges
)->rl_start
;
3426 dataforkp
= &datafork
;
3427 } else if (dataforkp
&& (cp
->c_datafork
->ff_unallocblocks
!= 0)) {
3428 // always make sure the block count and the size
3429 // of the file match the number of blocks actually
3430 // allocated to the file on disk
3431 bcopy(dataforkp
, &datafork
, sizeof(datafork
));
3432 // make sure that we don't assign a negative block count
3433 if (cp
->c_datafork
->ff_blocks
< cp
->c_datafork
->ff_unallocblocks
) {
3434 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3435 cp
->c_datafork
->ff_blocks
, cp
->c_datafork
->ff_unallocblocks
);
3437 datafork
.cf_blocks
= (cp
->c_datafork
->ff_blocks
- cp
->c_datafork
->ff_unallocblocks
);
3438 datafork
.cf_size
= datafork
.cf_blocks
* HFSTOVCB(hfsmp
)->blockSize
;
3439 dataforkp
= &datafork
;
3443 * Lock the Catalog b-tree file.
3444 * A shared lock is sufficient since an update doesn't change
3445 * the tree and the lock on vp protects the cnode.
3447 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3450 journal_end_transaction(hfsmp
->jnl
);
3452 hfs_global_shared_lock_release(hfsmp
);
3456 /* XXX - waitfor is not enforced */
3457 error
= cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, dataforkp
, rsrcforkp
);
3459 /* Unlock the Catalog b-tree file. */
3460 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3462 if (updateflag
& (C_CHANGE
| C_UPDATE
| C_FORCEUPDATE
))
3463 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3465 /* After the updates are finished, clear the flags */
3466 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
| C_FORCEUPDATE
);
3470 journal_end_transaction(hfsmp
->jnl
);
3472 hfs_global_shared_lock_release(hfsmp
);
3474 HFS_KNOTE(vp
, NOTE_ATTRIB
);
3480 * Allocate a new node
3482 * Upon leaving, namei buffer must be freed.
3486 hfs_makenode(mode
, dvp
, vpp
, cnp
)
3490 struct componentname
*cnp
;
3495 struct hfsmount
*hfsmp
;
3498 struct cat_desc in_desc
, out_desc
;
3499 struct cat_attr attr
;
3500 cat_cookie_t cookie
= {0};
3501 int error
, started_tr
= 0, grabbed_lock
= 0;
3502 enum vtype vnodetype
;
3506 hfsmp
= VTOHFS(dvp
);
3509 bzero(&out_desc
, sizeof(out_desc
));
3511 if ((mode
& S_IFMT
) == 0)
3513 vnodetype
= IFTOVT(mode
);
3515 /* Check if unmount in progress */
3516 if (VTOVFS(dvp
)->mnt_kern_flag
& MNTK_UNMOUNT
) {
3520 /* Check if were out of usable disk space. */
3521 if ((suser(cnp
->cn_cred
, NULL
) != 0) && (hfs_freeblks(hfsmp
, 1) <= 0)) {
3526 /* Setup the default attributes */
3527 bzero(&attr
, sizeof(attr
));
3528 attr
.ca_mode
= mode
;
3529 attr
.ca_nlink
= vnodetype
== VDIR
? 2 : 1;
3530 attr
.ca_mtime
= time
.tv_sec
;
3531 attr
.ca_mtime_nsec
= time
.tv_usec
* 1000;
3532 if ((VTOVCB(dvp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3533 attr
.ca_mtime
+= 3600; /* Same as what hfs_update does */
3535 attr
.ca_atime
= attr
.ca_ctime
= attr
.ca_itime
= attr
.ca_mtime
;
3536 if (VTOVFS(dvp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
3537 attr
.ca_uid
= hfsmp
->hfs_uid
;
3538 attr
.ca_gid
= hfsmp
->hfs_gid
;
3540 if (vnodetype
== VLNK
)
3541 attr
.ca_uid
= dcp
->c_uid
;
3543 attr
.ca_uid
= cnp
->cn_cred
->cr_uid
;
3544 attr
.ca_gid
= dcp
->c_gid
;
3547 * Don't tag as a special file (BLK or CHR) until *after*
3548 * hfs_getnewvnode is called. This insures that any
3549 * alias checking is defered until hfs_mknod completes.
3551 if (vnodetype
== VBLK
|| vnodetype
== VCHR
)
3552 attr
.ca_mode
= (attr
.ca_mode
& ~S_IFMT
) | S_IFREG
;
3554 /* Tag symlinks with a type and creator. */
3555 if (vnodetype
== VLNK
) {
3556 struct FndrFileInfo
*fip
;
3558 fip
= (struct FndrFileInfo
*)&attr
.ca_finderinfo
;
3559 fip
->fdType
= SWAP_BE32(kSymLinkFileType
);
3560 fip
->fdCreator
= SWAP_BE32(kSymLinkCreator
);
3562 if ((attr
.ca_mode
& S_ISGID
) &&
3563 !groupmember(dcp
->c_gid
, cnp
->cn_cred
) &&
3564 suser(cnp
->cn_cred
, NULL
)) {
3565 attr
.ca_mode
&= ~S_ISGID
;
3567 if (cnp
->cn_flags
& ISWHITEOUT
)
3568 attr
.ca_flags
|= UF_OPAQUE
;
3570 /* Setup the descriptor */
3571 bzero(&in_desc
, sizeof(in_desc
));
3572 in_desc
.cd_nameptr
= cnp
->cn_nameptr
;
3573 in_desc
.cd_namelen
= cnp
->cn_namelen
;
3574 in_desc
.cd_parentcnid
= dcp
->c_cnid
;
3575 in_desc
.cd_flags
= S_ISDIR(mode
) ? CD_ISDIR
: 0;
3578 hfs_global_shared_lock_acquire(hfsmp
);
3581 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3588 * Reserve some space in the Catalog file.
3590 * (we also add CAT_DELETE since our getnewvnode
3591 * request can cause an hfs_inactive call to
3592 * delete an unlinked file)
3594 if ((error
= cat_preflight(hfsmp
, CAT_CREATE
| CAT_DELETE
, &cookie
, p
))) {
3598 /* Lock catalog b-tree */
3599 error
= hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
3603 error
= cat_create(hfsmp
, &in_desc
, &attr
, &out_desc
);
3605 /* Unlock catalog b-tree */
3606 (void) hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_RELEASE
, p
);
3610 /* Update the parent directory */
3611 dcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
3614 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
3616 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
3617 if (vnodetype
== VDIR
) {
3618 HFS_KNOTE(dvp
, NOTE_WRITE
| NOTE_LINK
);
3620 HFS_KNOTE(dvp
, NOTE_WRITE
);
3623 hfs_volupdate(hfsmp
, vnodetype
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
3624 (dcp
->c_cnid
== kHFSRootFolderID
));
3627 // have to end the transaction here before we call hfs_getnewvnode()
3628 // because that can cause us to try and reclaim a vnode on a different
3629 // file system which could cause us to start a transaction which can
3630 // deadlock with someone on that other file system (since we could be
3631 // holding two transaction locks as well as various vnodes and we did
3632 // not obtain the locks on them in the proper order).
3634 // NOTE: this means that if the quota check fails or we have to update
3635 // the change time on a block-special device that those changes
3636 // will happen as part of independent transactions.
3639 journal_end_transaction(hfsmp
->jnl
);
3643 hfs_global_shared_lock_release(hfsmp
);
3647 /* Create a vnode for the object just created: */
3648 error
= hfs_getnewvnode(hfsmp
, NULL
, &out_desc
, 0, &attr
, NULL
, &tvp
);
3653 cache_enter(dvp
, tvp
, cnp
);
3658 * We call hfs_chkiq with FORCE flag so that if we
3659 * fall through to the rmdir we actually have
3660 * accounted for the inode
3662 if ((error
= hfs_getinoquota(cp
)) ||
3663 (error
= hfs_chkiq(cp
, 1, cnp
->cn_cred
, FORCE
))) {
3664 if (tvp
->v_type
== VDIR
)
3665 VOP_RMDIR(dvp
,tvp
, cnp
);
3667 VOP_REMOVE(dvp
,tvp
, cnp
);
3669 // because VOP_RMDIR and VOP_REMOVE already
3670 // have done the vput()
3677 * restore vtype and mode for VBLK and VCHR
3679 if (vnodetype
== VBLK
|| vnodetype
== VCHR
) {
3684 tvp
->v_type
= IFTOVT(mode
);
3685 cp
->c_flag
|= C_CHANGE
;
3687 if ((error
= VOP_UPDATE(tvp
, &tv
, &tv
, 1))) {
3695 cat_releasedesc(&out_desc
);
3697 cat_postflight(hfsmp
, &cookie
, p
);
3699 if ((cnp
->cn_flags
& (HASBUF
| SAVESTART
)) == HASBUF
) {
3700 char *tmp
= cnp
->cn_pnbuf
;
3701 cnp
->cn_pnbuf
= NULL
;
3702 cnp
->cn_flags
&= ~HASBUF
;
3703 FREE_ZONE(tmp
, cnp
->cn_pnlen
, M_NAMEI
);
3706 * Check if a file is located in the "Cleanup At Startup"
3707 * directory. If it is then tag it as NODUMP so that we
3708 * can be lazy about zero filling data holes.
3710 if ((error
== 0) && dvp
&& (vnodetype
== VREG
) &&
3711 (dcp
->c_desc
.cd_nameptr
!= NULL
) &&
3712 (strcmp(dcp
->c_desc
.cd_nameptr
, CARBON_TEMP_DIR_NAME
) == 0)) {
3716 parid
= dcp
->c_parentcnid
;
3721 * The parent of "Cleanup At Startup" should
3722 * have the ASCII name of the userid.
3724 if (VFS_VGET(HFSTOVFS(hfsmp
), &parid
, &ddvp
) == 0) {
3725 if (VTOC(ddvp
)->c_desc
.cd_nameptr
&&
3726 (cp
->c_uid
== strtoul(VTOC(ddvp
)->c_desc
.cd_nameptr
, 0, 0))) {
3727 cp
->c_flags
|= UF_NODUMP
;
3728 cp
->c_flag
|= C_CHANGE
;
3737 journal_end_transaction(hfsmp
->jnl
);
3741 hfs_global_shared_lock_release(hfsmp
);
3750 hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
, struct vnode
**rvpp
, struct proc
*p
)
3753 struct cnode
*cp
= VTOC(vp
);
3756 if ((rvp
= cp
->c_rsrc_vp
)) {
3757 /* Use exising vnode */
3758 error
= vget(rvp
, 0, p
);
3760 char * name
= VTOC(vp
)->c_desc
.cd_nameptr
;
3763 printf("hfs_vgetrsrc: couldn't get"
3764 " resource fork for %s\n", name
);
3768 struct cat_fork rsrcfork
;
3770 /* Lock catalog b-tree */
3771 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3775 /* Get resource fork data */
3776 error
= cat_lookup(hfsmp
, &cp
->c_desc
, 1, (struct cat_desc
*)0,
3777 (struct cat_attr
*)0, &rsrcfork
);
3779 /* Unlock the Catalog */
3780 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3784 error
= hfs_getnewvnode(hfsmp
, cp
, &cp
->c_desc
, 1, &cp
->c_attr
,
3796 filt_hfsdetach(struct knote
*kn
)
3800 struct proc
*p
= current_proc();
3802 vp
= (struct vnode
*)kn
->kn_hook
;
3803 if (1) { /* ! KNDETACH_VNLOCKED */
3804 result
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
3808 result
= KNOTE_DETACH(&VTOC(vp
)->c_knotes
, kn
);
3810 if (1) { /* ! KNDETACH_VNLOCKED */
3811 VOP_UNLOCK(vp
, 0, p
);
3817 filt_hfsread(struct knote
*kn
, long hint
)
3819 struct vnode
*vp
= (struct vnode
*)kn
->kn_fp
->f_data
;
3821 if (hint
== NOTE_REVOKE
) {
3823 * filesystem is gone, so set the EOF flag and schedule
3824 * the knote for deletion.
3826 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3830 kn
->kn_data
= VTOF(vp
)->ff_size
- kn
->kn_fp
->f_offset
;
3831 return (kn
->kn_data
!= 0);
3836 filt_hfswrite(struct knote
*kn
, long hint
)
3838 if (hint
== NOTE_REVOKE
) {
3840 * filesystem is gone, so set the EOF flag and schedule
3841 * the knote for deletion.
3843 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3851 filt_hfsvnode(struct knote
*kn
, long hint
)
3854 if (kn
->kn_sfflags
& hint
)
3855 kn
->kn_fflags
|= hint
;
3856 if (hint
== NOTE_REVOKE
) {
3857 kn
->kn_flags
|= EV_EOF
;
3860 return (kn
->kn_fflags
!= 0);
3863 static struct filterops hfsread_filtops
=
3864 { 1, NULL
, filt_hfsdetach
, filt_hfsread
};
3865 static struct filterops hfswrite_filtops
=
3866 { 1, NULL
, filt_hfsdetach
, filt_hfswrite
};
3867 static struct filterops hfsvnode_filtops
=
3868 { 1, NULL
, filt_hfsdetach
, filt_hfsvnode
};
3872 #% kqfilt_add vp L L L
3875 IN struct vnode *vp;
3876 IN struct knote *kn;
3881 struct vop_kqfilt_add_args
/* {
3887 struct vnode
*vp
= ap
->a_vp
;
3888 struct knote
*kn
= ap
->a_kn
;
3890 switch (kn
->kn_filter
) {
3892 if (vp
->v_type
== VREG
) {
3893 kn
->kn_fop
= &hfsread_filtops
;
3899 if (vp
->v_type
== VREG
) {
3900 kn
->kn_fop
= &hfswrite_filtops
;
3906 kn
->kn_fop
= &hfsvnode_filtops
;
3912 kn
->kn_hook
= (caddr_t
)vp
;
3914 /* simple_lock(&vp->v_pollinfo.vpi_lock); */
3915 KNOTE_ATTACH(&VTOC(vp
)->c_knotes
, kn
);
3916 /* simple_unlock(&vp->v_pollinfo.vpi_lock); */
3923 #% kqfilt_remove vp L L L
3926 IN struct vnode *vp;
3931 hfs_kqfilt_remove(ap
)
3932 struct vop_kqfilt_remove_args
/* {
3938 struct vnode
*vp
= ap
->a_vp
;
3939 uintptr_t ident
= ap
->a_ident
;
3942 result
= ENOTSUP
; /* XXX */
3948 * Wrapper for special device reads
3952 struct vop_read_args
/* {
3956 struct ucred *a_cred;
3962 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
3963 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_read
), ap
));
3967 * Wrapper for special device writes
3971 struct vop_write_args
/* {
3975 struct ucred *a_cred;
3979 * Set update and change flags.
3981 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
3982 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_write
), ap
));
3986 * Wrapper for special device close
3988 * Update the times on the cnode then do device close.
3992 struct vop_close_args
/* {
3995 struct ucred *a_cred;
3999 struct vnode
*vp
= ap
->a_vp
;
4000 struct cnode
*cp
= VTOC(vp
);
4002 simple_lock(&vp
->v_interlock
);
4003 if (ap
->a_vp
->v_usecount
> 1)
4004 CTIMES(cp
, &time
, &time
);
4005 simple_unlock(&vp
->v_interlock
);
4006 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_close
), ap
));
4011 * Wrapper for fifo reads
4015 struct vop_read_args
/* {
4019 struct ucred *a_cred;
4022 extern int (**fifo_vnodeop_p
)(void *);
4027 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
4028 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_read
), ap
));
4032 * Wrapper for fifo writes
4036 struct vop_write_args
/* {
4040 struct ucred *a_cred;
4043 extern int (**fifo_vnodeop_p
)(void *);
4046 * Set update and change flags.
4048 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
4049 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_write
), ap
));
4053 * Wrapper for fifo close
4055 * Update the times on the cnode then do device close.
4059 struct vop_close_args
/* {
4062 struct ucred *a_cred;
4066 extern int (**fifo_vnodeop_p
)(void *);
4067 struct vnode
*vp
= ap
->a_vp
;
4068 struct cnode
*cp
= VTOC(vp
);
4070 simple_lock(&vp
->v_interlock
);
4071 if (ap
->a_vp
->v_usecount
> 1)
4072 CTIMES(cp
, &time
, &time
);
4073 simple_unlock(&vp
->v_interlock
);
4074 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_close
), ap
));
4078 * kqfilt_add wrapper for fifos.
4080 * Fall through to hfs kqfilt_add routines if needed
4083 hfsfifo_kqfilt_add(ap
)
4084 struct vop_kqfilt_add_args
*ap
;
4086 extern int (**fifo_vnodeop_p
)(void *);
4089 error
= VOCALL(fifo_vnodeop_p
, VOFFSET(vop_kqfilt_add
), ap
);
4091 error
= hfs_kqfilt_add(ap
);
4096 * kqfilt_remove wrapper for fifos.
4098 * Fall through to hfs kqfilt_remove routines if needed
4101 hfsfifo_kqfilt_remove(ap
)
4102 struct vop_kqfilt_remove_args
*ap
;
4104 extern int (**fifo_vnodeop_p
)(void *);
4107 error
= VOCALL(fifo_vnodeop_p
, VOFFSET(vop_kqfilt_remove
), ap
);
4109 error
= hfs_kqfilt_remove(ap
);
4116 /*****************************************************************************
4120 *****************************************************************************/
4121 int hfs_cache_lookup(); /* in hfs_lookup.c */
4122 int hfs_lookup(); /* in hfs_lookup.c */
4123 int hfs_read(); /* in hfs_readwrite.c */
4124 int hfs_write(); /* in hfs_readwrite.c */
4125 int hfs_ioctl(); /* in hfs_readwrite.c */
4126 int hfs_select(); /* in hfs_readwrite.c */
4127 int hfs_bmap(); /* in hfs_readwrite.c */
4128 int hfs_strategy(); /* in hfs_readwrite.c */
4129 int hfs_truncate(); /* in hfs_readwrite.c */
4130 int hfs_allocate(); /* in hfs_readwrite.c */
4131 int hfs_pagein(); /* in hfs_readwrite.c */
4132 int hfs_pageout(); /* in hfs_readwrite.c */
4133 int hfs_search(); /* in hfs_search.c */
4134 int hfs_bwrite(); /* in hfs_readwrite.c */
4135 int hfs_link(); /* in hfs_link.c */
4136 int hfs_blktooff(); /* in hfs_readwrite.c */
4137 int hfs_offtoblk(); /* in hfs_readwrite.c */
4138 int hfs_cmap(); /* in hfs_readwrite.c */
4139 int hfs_getattrlist(); /* in hfs_attrlist.c */
4140 int hfs_setattrlist(); /* in hfs_attrlist.c */
4141 int hfs_readdirattr(); /* in hfs_attrlist.c */
4142 int hfs_inactive(); /* in hfs_cnode.c */
4143 int hfs_reclaim(); /* in hfs_cnode.c */
4145 int (**hfs_vnodeop_p
)(void *);
4147 #define VOPFUNC int (*)(void *)
4149 struct vnodeopv_entry_desc hfs_vnodeop_entries
[] = {
4150 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4151 { &vop_lookup_desc
, (VOPFUNC
)hfs_cache_lookup
}, /* lookup */
4152 { &vop_create_desc
, (VOPFUNC
)hfs_create
}, /* create */
4153 { &vop_mknod_desc
, (VOPFUNC
)hfs_mknod
}, /* mknod */
4154 { &vop_open_desc
, (VOPFUNC
)hfs_open
}, /* open */
4155 { &vop_close_desc
, (VOPFUNC
)hfs_close
}, /* close */
4156 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4157 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4158 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4159 { &vop_read_desc
, (VOPFUNC
)hfs_read
}, /* read */
4160 { &vop_write_desc
, (VOPFUNC
)hfs_write
}, /* write */
4161 { &vop_ioctl_desc
, (VOPFUNC
)hfs_ioctl
}, /* ioctl */
4162 { &vop_select_desc
, (VOPFUNC
)hfs_select
}, /* select */
4163 { &vop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
4164 { &vop_exchange_desc
, (VOPFUNC
)hfs_exchange
}, /* exchange */
4165 { &vop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
4166 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4167 { &vop_seek_desc
, (VOPFUNC
)nop_seek
}, /* seek */
4168 { &vop_remove_desc
, (VOPFUNC
)hfs_remove
}, /* remove */
4169 { &vop_link_desc
, (VOPFUNC
)hfs_link
}, /* link */
4170 { &vop_rename_desc
, (VOPFUNC
)hfs_rename
}, /* rename */
4171 { &vop_mkdir_desc
, (VOPFUNC
)hfs_mkdir
}, /* mkdir */
4172 { &vop_rmdir_desc
, (VOPFUNC
)hfs_rmdir
}, /* rmdir */
4173 { &vop_mkcomplex_desc
, (VOPFUNC
)err_mkcomplex
}, /* mkcomplex */
4174 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
}, /* getattrlist */
4175 { &vop_setattrlist_desc
, (VOPFUNC
)hfs_setattrlist
}, /* setattrlist */
4176 { &vop_symlink_desc
, (VOPFUNC
)hfs_symlink
}, /* symlink */
4177 { &vop_readdir_desc
, (VOPFUNC
)hfs_readdir
}, /* readdir */
4178 { &vop_readdirattr_desc
, (VOPFUNC
)hfs_readdirattr
}, /* readdirattr */
4179 { &vop_readlink_desc
, (VOPFUNC
)hfs_readlink
}, /* readlink */
4180 { &vop_abortop_desc
, (VOPFUNC
)nop_abortop
}, /* abortop */
4181 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4182 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4183 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4184 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4185 { &vop_bmap_desc
, (VOPFUNC
)hfs_bmap
}, /* bmap */
4186 { &vop_strategy_desc
, (VOPFUNC
)hfs_strategy
}, /* strategy */
4187 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4188 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4189 { &vop_pathconf_desc
, (VOPFUNC
)hfs_pathconf
}, /* pathconf */
4190 { &vop_advlock_desc
, (VOPFUNC
)hfs_advlock
}, /* advlock */
4191 { &vop_reallocblks_desc
, (VOPFUNC
)err_reallocblks
}, /* reallocblks */
4192 { &vop_truncate_desc
, (VOPFUNC
)hfs_truncate
}, /* truncate */
4193 { &vop_allocate_desc
, (VOPFUNC
)hfs_allocate
}, /* allocate */
4194 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4195 { &vop_searchfs_desc
, (VOPFUNC
)hfs_search
}, /* search fs */
4196 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
}, /* bwrite */
4197 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* pagein */
4198 { &vop_pageout_desc
,(VOPFUNC
) hfs_pageout
}, /* pageout */
4199 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4200 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4201 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4202 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
4203 { &vop_kqfilt_add_desc
, (VOPFUNC
)hfs_kqfilt_add
}, /* kqfilt_add */
4204 { &vop_kqfilt_remove_desc
, (VOPFUNC
)hfs_kqfilt_remove
}, /* kqfilt_remove */
4205 { NULL
, (VOPFUNC
)NULL
}
4208 struct vnodeopv_desc hfs_vnodeop_opv_desc
=
4209 { &hfs_vnodeop_p
, hfs_vnodeop_entries
};
4211 int (**hfs_specop_p
)(void *);
4212 struct vnodeopv_entry_desc hfs_specop_entries
[] = {
4213 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4214 { &vop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
4215 { &vop_create_desc
, (VOPFUNC
)spec_create
}, /* create */
4216 { &vop_mknod_desc
, (VOPFUNC
)spec_mknod
}, /* mknod */
4217 { &vop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
4218 { &vop_close_desc
, (VOPFUNC
)hfsspec_close
}, /* close */
4219 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4220 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4221 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4222 { &vop_read_desc
, (VOPFUNC
)hfsspec_read
}, /* read */
4223 { &vop_write_desc
, (VOPFUNC
)hfsspec_write
}, /* write */
4224 { &vop_lease_desc
, (VOPFUNC
)spec_lease_check
}, /* lease */
4225 { &vop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
4226 { &vop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
4227 { &vop_revoke_desc
, (VOPFUNC
)spec_revoke
}, /* revoke */
4228 { &vop_mmap_desc
, (VOPFUNC
)spec_mmap
}, /* mmap */
4229 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4230 { &vop_seek_desc
, (VOPFUNC
)spec_seek
}, /* seek */
4231 { &vop_remove_desc
, (VOPFUNC
)spec_remove
}, /* remove */
4232 { &vop_link_desc
, (VOPFUNC
)spec_link
}, /* link */
4233 { &vop_rename_desc
, (VOPFUNC
)spec_rename
}, /* rename */
4234 { &vop_mkdir_desc
, (VOPFUNC
)spec_mkdir
}, /* mkdir */
4235 { &vop_rmdir_desc
, (VOPFUNC
)spec_rmdir
}, /* rmdir */
4236 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
},
4237 { &vop_symlink_desc
, (VOPFUNC
)spec_symlink
}, /* symlink */
4238 { &vop_readdir_desc
, (VOPFUNC
)spec_readdir
}, /* readdir */
4239 { &vop_readlink_desc
, (VOPFUNC
)spec_readlink
}, /* readlink */
4240 { &vop_abortop_desc
, (VOPFUNC
)spec_abortop
}, /* abortop */
4241 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4242 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4243 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4244 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4245 { &vop_bmap_desc
, (VOPFUNC
)spec_bmap
}, /* bmap */
4246 { &vop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
4247 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4248 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4249 { &vop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
4250 { &vop_advlock_desc
, (VOPFUNC
)spec_advlock
}, /* advlock */
4251 { &vop_blkatoff_desc
, (VOPFUNC
)spec_blkatoff
}, /* blkatoff */
4252 { &vop_valloc_desc
, (VOPFUNC
)spec_valloc
}, /* valloc */
4253 { &vop_reallocblks_desc
, (VOPFUNC
)spec_reallocblks
}, /* reallocblks */
4254 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
4255 { &vop_truncate_desc
, (VOPFUNC
)spec_truncate
}, /* truncate */
4256 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4257 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
4258 { &vop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
4259 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
4260 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
4261 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4262 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4263 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4264 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
4266 struct vnodeopv_desc hfs_specop_opv_desc
=
4267 { &hfs_specop_p
, hfs_specop_entries
};
4270 int (**hfs_fifoop_p
)(void *);
4271 struct vnodeopv_entry_desc hfs_fifoop_entries
[] = {
4272 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4273 { &vop_lookup_desc
, (VOPFUNC
)fifo_lookup
}, /* lookup */
4274 { &vop_create_desc
, (VOPFUNC
)fifo_create
}, /* create */
4275 { &vop_mknod_desc
, (VOPFUNC
)fifo_mknod
}, /* mknod */
4276 { &vop_open_desc
, (VOPFUNC
)fifo_open
}, /* open */
4277 { &vop_close_desc
, (VOPFUNC
)hfsfifo_close
}, /* close */
4278 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4279 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4280 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4281 { &vop_read_desc
, (VOPFUNC
)hfsfifo_read
}, /* read */
4282 { &vop_write_desc
, (VOPFUNC
)hfsfifo_write
}, /* write */
4283 { &vop_lease_desc
, (VOPFUNC
)fifo_lease_check
}, /* lease */
4284 { &vop_ioctl_desc
, (VOPFUNC
)fifo_ioctl
}, /* ioctl */
4285 { &vop_select_desc
, (VOPFUNC
)fifo_select
}, /* select */
4286 { &vop_revoke_desc
, (VOPFUNC
)fifo_revoke
}, /* revoke */
4287 { &vop_mmap_desc
, (VOPFUNC
)fifo_mmap
}, /* mmap */
4288 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4289 { &vop_seek_desc
, (VOPFUNC
)fifo_seek
}, /* seek */
4290 { &vop_remove_desc
, (VOPFUNC
)fifo_remove
}, /* remove */
4291 { &vop_link_desc
, (VOPFUNC
)fifo_link
}, /* link */
4292 { &vop_rename_desc
, (VOPFUNC
)fifo_rename
}, /* rename */
4293 { &vop_mkdir_desc
, (VOPFUNC
)fifo_mkdir
}, /* mkdir */
4294 { &vop_rmdir_desc
, (VOPFUNC
)fifo_rmdir
}, /* rmdir */
4295 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
},
4296 { &vop_symlink_desc
, (VOPFUNC
)fifo_symlink
}, /* symlink */
4297 { &vop_readdir_desc
, (VOPFUNC
)fifo_readdir
}, /* readdir */
4298 { &vop_readlink_desc
, (VOPFUNC
)fifo_readlink
}, /* readlink */
4299 { &vop_abortop_desc
, (VOPFUNC
)fifo_abortop
}, /* abortop */
4300 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4301 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4302 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4303 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4304 { &vop_bmap_desc
, (VOPFUNC
)fifo_bmap
}, /* bmap */
4305 { &vop_strategy_desc
, (VOPFUNC
)fifo_strategy
}, /* strategy */
4306 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4307 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4308 { &vop_pathconf_desc
, (VOPFUNC
)fifo_pathconf
}, /* pathconf */
4309 { &vop_advlock_desc
, (VOPFUNC
)fifo_advlock
}, /* advlock */
4310 { &vop_blkatoff_desc
, (VOPFUNC
)fifo_blkatoff
}, /* blkatoff */
4311 { &vop_valloc_desc
, (VOPFUNC
)fifo_valloc
}, /* valloc */
4312 { &vop_reallocblks_desc
, (VOPFUNC
)fifo_reallocblks
}, /* reallocblks */
4313 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
4314 { &vop_truncate_desc
, (VOPFUNC
)fifo_truncate
}, /* truncate */
4315 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4316 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
4317 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
4318 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
4319 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4320 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4321 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4322 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
4323 { &vop_kqfilt_add_desc
, (VOPFUNC
)hfsfifo_kqfilt_add
}, /* kqfilt_add */
4324 { &vop_kqfilt_remove_desc
, (VOPFUNC
)hfsfifo_kqfilt_remove
}, /* kqfilt_remove */
4325 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
4327 struct vnodeopv_desc hfs_fifoop_opv_desc
=
4328 { &hfs_fifoop_p
, hfs_fifoop_entries
};