2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
29 #include <sys/dirent.h>
32 #include <sys/mount.h>
33 #include <sys/vnode.h>
34 #include <sys/malloc.h>
35 #include <sys/namei.h>
37 #include <sys/quota.h>
41 #include <miscfs/specfs/specdev.h>
42 #include <miscfs/fifofs/fifo.h>
43 #include <vfs/vfs_support.h>
44 #include <machine/spl.h>
46 #include <sys/kdebug.h>
49 #include "hfs_catalog.h"
50 #include "hfs_cnode.h"
51 #include "hfs_lockf.h"
53 #include "hfs_mount.h"
54 #include "hfs_quota.h"
55 #include "hfs_endian.h"
57 #include "hfscommon/headers/BTreesInternal.h"
58 #include "hfscommon/headers/FileMgrInternal.h"
60 #define MAKE_DELETED_NAME(NAME,FID) \
61 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
63 #define KNDETACH_VNLOCKED 0x00000001
65 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
68 /* Global vfs data structures for hfs */
71 extern unsigned long strtoul(const char *, char **, int);
73 extern int groupmember(gid_t gid
, struct ucred
*cred
);
75 static int hfs_makenode(int mode
, struct vnode
*dvp
, struct vnode
**vpp
,
76 struct componentname
*cnp
);
78 static int hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
,
79 struct vnode
**rvpp
, struct proc
*p
);
81 static int hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
);
83 static int hfs_removedir(struct vnode
*, struct vnode
*, struct componentname
*,
86 static int hfs_removefile(struct vnode
*, struct vnode
*, struct componentname
*,
89 /* Options for hfs_removedir and hfs_removefile */
90 #define HFSRM_PARENT_LOCKED 0x01
91 #define HFSRM_SKIP_RESERVE 0x02
92 #define HFSRM_SAVE_NAME 0x04
93 #define HFSRM_RENAMEOPTS 0x07
96 int hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
);
98 int hfs_chflags(struct vnode
*vp
, u_long flags
, struct ucred
*cred
,
100 int hfs_chmod(struct vnode
*vp
, int mode
, struct ucred
*cred
,
102 int hfs_chown(struct vnode
*vp
, uid_t uid
, gid_t gid
,
103 struct ucred
*cred
, struct proc
*p
);
105 /*****************************************************************************
107 * Common Operations on vnodes
109 *****************************************************************************/
112 * Create a regular file
117 IN WILLRELE struct vnode *dvp;
118 OUT struct vnode **vpp;
119 IN struct componentname *cnp;
120 IN struct vattr *vap;
122 We are responsible for freeing the namei buffer,
123 it is done in hfs_makenode()
128 struct vop_create_args
/* {
130 struct vnode **a_vpp;
131 struct componentname *a_cnp;
135 struct vattr
*vap
= ap
->a_vap
;
137 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
138 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
149 IN WILLRELE struct vnode *dvp;
150 OUT WILLRELE struct vnode **vpp;
151 IN struct componentname *cnp;
152 IN struct vattr *vap;
158 struct vop_mknod_args
/* {
160 struct vnode **a_vpp;
161 struct componentname *a_cnp;
165 struct vattr
*vap
= ap
->a_vap
;
166 struct vnode
**vpp
= ap
->a_vpp
;
170 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
171 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
176 /* Create the vnode */
177 error
= hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
178 ap
->a_dvp
, vpp
, ap
->a_cnp
);
182 cp
->c_flag
|= C_ACCESS
| C_CHANGE
| C_UPDATE
;
183 if ((vap
->va_rdev
!= VNOVAL
) &&
184 (vap
->va_type
== VBLK
|| vap
->va_type
== VCHR
))
185 cp
->c_rdev
= vap
->va_rdev
;
187 * Remove cnode so that it will be reloaded by lookup and
188 * checked to see if it is an alias of an existing vnode.
189 * Note: unlike UFS, we don't bash v_type here.
205 IN struct ucred *cred;
212 struct vop_open_args
/* {
215 struct ucred *a_cred;
219 struct vnode
*vp
= ap
->a_vp
;
220 struct filefork
*fp
= VTOF(vp
);
224 * Files marked append-only must be opened for appending.
226 if ((vp
->v_type
!= VDIR
) && (VTOC(vp
)->c_flags
& APPEND
) &&
227 (ap
->a_mode
& (FWRITE
| O_APPEND
)) == FWRITE
)
230 if (ap
->a_mode
& O_EVTONLY
) {
231 if (vp
->v_type
== VREG
) {
232 ++VTOF(vp
)->ff_evtonly_refs
;
234 ++VTOC(vp
)->c_evtonly_refs
;
239 * On the first (non-busy) open of a fragmented
240 * file attempt to de-frag it (if its less than 20MB).
242 if ((VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) ||
243 !UBCISVALID(vp
) || ubc_isinuse(vp
, 1)) {
248 fp
->ff_extents
[7].blockCount
!= 0 &&
249 fp
->ff_size
<= (20 * 1024 * 1024)) {
251 * Wait until system bootup is done (3 min).
254 if (tv
.tv_sec
< (60 * 3)) {
257 (void) hfs_relocate(vp
, VTOVCB(vp
)->nextAllocation
+ 4096, ap
->a_cred
, ap
->a_p
);
266 * Update the times on the cnode.
272 IN struct ucred *cred;
279 struct vop_close_args
/* {
282 struct ucred *a_cred;
286 register struct vnode
*vp
= ap
->a_vp
;
287 register struct cnode
*cp
= VTOC(vp
);
288 register struct filefork
*fp
= VTOF(vp
);
289 struct proc
*p
= ap
->a_p
;
292 u_long blks
, blocksize
;
296 simple_lock(&vp
->v_interlock
);
297 if ((!UBCISVALID(vp
) && vp
->v_usecount
> 1)
298 || (UBCISVALID(vp
) && ubc_isinuse(vp
, 1))) {
300 CTIMES(cp
, &tv
, &tv
);
302 simple_unlock(&vp
->v_interlock
);
304 if (ap
->a_fflag
& O_EVTONLY
) {
305 if (vp
->v_type
== VREG
) {
306 --VTOF(vp
)->ff_evtonly_refs
;
308 --VTOC(vp
)->c_evtonly_refs
;
313 * VOP_CLOSE can be called with vp locked (from vclean).
314 * We check for this case using VOP_ISLOCKED and bail.
316 * XXX During a force unmount we won't do the cleanup below!
318 if (vp
->v_type
== VDIR
|| VOP_ISLOCKED(vp
))
323 if ((fp
->ff_blocks
> 0) &&
324 !ISSET(cp
->c_flag
, C_DELETED
) &&
325 ((VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) == 0)) {
326 enum vtype our_type
= vp
->v_type
;
327 u_long our_id
= vp
->v_id
;
328 int was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
330 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
334 * Since we can context switch in vn_lock our vnode
335 * could get recycled (eg umount -f). Double check
336 * that its still ours.
338 if (vp
->v_type
!= our_type
|| vp
->v_id
!= our_id
339 || cp
!= VTOC(vp
) || !UBCINFOEXISTS(vp
)) {
340 VOP_UNLOCK(vp
, 0, p
);
345 * Last chance to explicitly zero out the areas
346 * that are currently marked invalid:
348 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devBlockSize
);
349 (void) cluster_push(vp
);
350 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
351 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
352 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
353 off_t start
= invalid_range
->rl_start
;
354 off_t end
= invalid_range
->rl_end
;
356 /* The range about to be written must be validated
357 * first, so that VOP_CMAP() will return the
358 * appropriate mapping for the cluster code:
360 rl_remove(start
, end
, &fp
->ff_invalidranges
);
362 (void) cluster_write(vp
, (struct uio
*) 0, leof
,
363 invalid_range
->rl_end
+ 1, invalid_range
->rl_start
,
364 (off_t
)0, devBlockSize
, IO_HEADZEROFILL
| IO_NOZERODIRTY
);
366 if (ISSET(vp
->v_flag
, VHASDIRTY
))
367 (void) cluster_push(vp
);
369 cp
->c_flag
|= C_MODIFIED
;
371 cp
->c_flag
&= ~C_ZFWANTSYNC
;
373 blocksize
= VTOVCB(vp
)->blockSize
;
374 blks
= leof
/ blocksize
;
375 if (((off_t
)blks
* (off_t
)blocksize
) != leof
)
378 * Shrink the peof to the smallest size neccessary to contain the leof.
380 if (blks
< fp
->ff_blocks
)
381 (void) VOP_TRUNCATE(vp
, leof
, IO_NDELAY
, ap
->a_cred
, p
);
382 (void) cluster_push(vp
);
385 CLR(vp
->v_flag
, VNOCACHE_DATA
);
388 * If the VOP_TRUNCATE didn't happen to flush the vnode's
389 * information out to disk, force it to be updated now that
390 * all invalid ranges have been zero-filled and validated:
392 if (cp
->c_flag
& C_MODIFIED
) {
394 VOP_UPDATE(vp
, &tv
, &tv
, 0);
396 VOP_UNLOCK(vp
, 0, p
);
398 if ((vp
->v_flag
& VSYSTEM
) && (vp
->v_usecount
== 1))
409 IN struct ucred *cred;
416 struct vop_access_args
/* {
419 struct ucred *a_cred;
423 struct vnode
*vp
= ap
->a_vp
;
424 struct cnode
*cp
= VTOC(vp
);
425 struct ucred
*cred
= ap
->a_cred
;
427 mode_t mode
= ap
->a_mode
;
433 * Disallow write attempts on read-only file systems;
434 * unless the file is a socket, fifo, or a block or
435 * character device resident on the file system.
438 switch (vp
->v_type
) {
442 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
445 if ((error
= hfs_getinoquota(cp
)))
450 /* If immutable bit set, nobody gets to write it. */
451 if (cp
->c_flags
& IMMUTABLE
)
456 /* Otherwise, user id 0 always gets access. */
457 if (cred
->cr_uid
== 0)
462 /* Otherwise, check the owner. */
463 if ( (cp
->c_uid
== cred
->cr_uid
) || (cp
->c_uid
== UNKNOWNUID
) ) {
470 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
473 /* Otherwise, check the groups. */
474 if (! (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)) {
475 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++)
476 if (cp
->c_gid
== *gp
) {
483 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
487 /* Otherwise, check everyone else. */
494 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
504 IN struct vattr *vap;
505 IN struct ucred *cred;
514 struct vop_getattr_args
/* {
517 struct ucred *a_cred;
521 struct vnode
*vp
= ap
->a_vp
;
522 struct cnode
*cp
= VTOC(vp
);
523 struct vattr
*vap
= ap
->a_vap
;
527 CTIMES(cp
, &tv
, &tv
);
529 vap
->va_type
= vp
->v_type
;
530 vap
->va_mode
= cp
->c_mode
;
531 vap
->va_nlink
= cp
->c_nlink
;
533 * [2856576] Since we are dynamically changing the owner, also
534 * effectively turn off the set-user-id and set-group-id bits,
535 * just like chmod(2) would when changing ownership. This prevents
536 * a security hole where set-user-id programs run as whoever is
537 * logged on (or root if nobody is logged in yet!)
539 if (cp
->c_uid
== UNKNOWNUID
) {
540 vap
->va_mode
&= ~(S_ISUID
| S_ISGID
);
541 vap
->va_uid
= ap
->a_cred
->cr_uid
;
543 vap
->va_uid
= cp
->c_uid
;
545 vap
->va_gid
= cp
->c_gid
;
546 vap
->va_fsid
= cp
->c_dev
;
548 * Exporting file IDs from HFS Plus:
550 * For "normal" files the c_fileid is the same value as the
551 * c_cnid. But for hard link files, they are different - the
552 * c_cnid belongs to the active directory entry (ie the link)
553 * and the c_fileid is for the actual inode (ie the data file).
555 * The stat call (getattr) will always return the c_fileid
556 * and Carbon APIs, which are hardlink-ignorant, will always
557 * receive the c_cnid (from getattrlist).
559 vap
->va_fileid
= cp
->c_fileid
;
560 vap
->va_atime
.tv_sec
= cp
->c_atime
;
561 vap
->va_atime
.tv_nsec
= 0;
562 vap
->va_mtime
.tv_sec
= cp
->c_mtime
;
563 vap
->va_mtime
.tv_nsec
= cp
->c_mtime_nsec
;
564 vap
->va_ctime
.tv_sec
= cp
->c_ctime
;
565 vap
->va_ctime
.tv_nsec
= 0;
567 vap
->va_flags
= cp
->c_flags
;
569 vap
->va_blocksize
= VTOVFS(vp
)->mnt_stat
.f_iosize
;
571 if (vp
->v_type
== VDIR
) {
572 vap
->va_size
= cp
->c_nlink
* AVERAGE_HFSDIRENTRY_SIZE
;
575 vap
->va_size
= VTOF(vp
)->ff_size
;
576 vap
->va_bytes
= (u_quad_t
)cp
->c_blocks
*
577 (u_quad_t
)VTOVCB(vp
)->blockSize
;
578 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
)
579 vap
->va_rdev
= cp
->c_rdev
;
585 * Set attribute vnode op. called from several syscalls
590 IN struct vattr *vap;
591 IN struct ucred *cred;
598 struct vop_setattr_args
/* {
601 struct ucred *a_cred;
605 struct vattr
*vap
= ap
->a_vap
;
606 struct vnode
*vp
= ap
->a_vp
;
607 struct cnode
*cp
= VTOC(vp
);
608 struct ucred
*cred
= ap
->a_cred
;
609 struct proc
*p
= ap
->a_p
;
610 struct timeval atimeval
, mtimeval
;
614 * Check for unsettable attributes.
616 if ((vap
->va_type
!= VNON
) || (vap
->va_nlink
!= VNOVAL
) ||
617 (vap
->va_fsid
!= VNOVAL
) || (vap
->va_fileid
!= VNOVAL
) ||
618 (vap
->va_blocksize
!= VNOVAL
) || (vap
->va_rdev
!= VNOVAL
) ||
619 ((int)vap
->va_bytes
!= VNOVAL
) || (vap
->va_gen
!= VNOVAL
)) {
624 // don't allow people to set the attributes of symlinks
625 // (nfs has a bad habit of doing ths and it can cause
626 // problems for journaling).
628 if (vp
->v_type
== VLNK
) {
634 if (vap
->va_flags
!= VNOVAL
) {
635 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
637 if ((error
= hfs_chflags(vp
, vap
->va_flags
, cred
, p
)))
639 if (vap
->va_flags
& (IMMUTABLE
| APPEND
))
643 if (cp
->c_flags
& (IMMUTABLE
| APPEND
))
646 // XXXdbg - don't allow modification of the journal or journal_info_block
647 if (VTOHFS(vp
)->jnl
&& cp
->c_datafork
) {
648 struct HFSPlusExtentDescriptor
*extd
;
650 extd
= &cp
->c_datafork
->ff_extents
[0];
651 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
657 * Go through the fields and update iff not VNOVAL.
659 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
660 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
662 if ((error
= hfs_chown(vp
, vap
->va_uid
, vap
->va_gid
, cred
, p
)))
665 if (vap
->va_size
!= VNOVAL
) {
667 * Disallow write attempts on read-only file systems;
668 * unless the file is a socket, fifo, or a block or
669 * character device resident on the file system.
671 switch (vp
->v_type
) {
676 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
682 if ((error
= VOP_TRUNCATE(vp
, vap
->va_size
, 0, cred
, p
)))
686 if (vap
->va_atime
.tv_sec
!= VNOVAL
|| vap
->va_mtime
.tv_sec
!= VNOVAL
) {
687 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
689 if (((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) &&
690 ((vap
->va_vaflags
& VA_UTIMES_NULL
) == 0 ||
691 (error
= VOP_ACCESS(vp
, VWRITE
, cred
, p
)))) {
694 if (vap
->va_atime
.tv_sec
!= VNOVAL
)
695 cp
->c_flag
|= C_ACCESS
;
696 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
697 cp
->c_flag
|= C_CHANGE
| C_UPDATE
;
699 * The utimes system call can reset the modification
700 * time but it doesn't know about HFS create times.
701 * So we need to insure that the creation time is
702 * always at least as old as the modification time.
704 if ((VTOVCB(vp
)->vcbSigWord
== kHFSPlusSigWord
) &&
705 (cp
->c_cnid
!= kRootDirID
) &&
706 (vap
->va_mtime
.tv_sec
< cp
->c_itime
)) {
707 cp
->c_itime
= vap
->va_mtime
.tv_sec
;
710 atimeval
.tv_sec
= vap
->va_atime
.tv_sec
;
711 atimeval
.tv_usec
= 0;
712 mtimeval
.tv_sec
= vap
->va_mtime
.tv_sec
;
713 mtimeval
.tv_usec
= 0;
714 if ((error
= VOP_UPDATE(vp
, &atimeval
, &mtimeval
, 1)))
718 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
719 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
721 error
= hfs_chmod(vp
, (int)vap
->va_mode
, cred
, p
);
723 HFS_KNOTE(vp
, NOTE_ATTRIB
);
729 * Change the mode on a file.
730 * cnode must be locked before calling.
734 hfs_chmod(vp
, mode
, cred
, p
)
735 register struct vnode
*vp
;
737 register struct ucred
*cred
;
740 register struct cnode
*cp
= VTOC(vp
);
743 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
746 // XXXdbg - don't allow modification of the journal or journal_info_block
747 if (VTOHFS(vp
)->jnl
&& cp
&& cp
->c_datafork
) {
748 struct HFSPlusExtentDescriptor
*extd
;
750 extd
= &cp
->c_datafork
->ff_extents
[0];
751 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
756 #if OVERRIDE_UNKNOWN_PERMISSIONS
757 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
761 if ((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0)
764 if (vp
->v_type
!= VDIR
&& (mode
& S_ISTXT
))
766 if (!groupmember(cp
->c_gid
, cred
) && (mode
& S_ISGID
))
769 cp
->c_mode
&= ~ALLPERMS
;
770 cp
->c_mode
|= (mode
& ALLPERMS
);
771 cp
->c_flag
|= C_CHANGE
;
778 hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
)
780 struct cnode
*cp
= VTOC(vp
);
786 * Disallow write attempts on read-only file systems;
787 * unless the file is a socket, fifo, or a block or
788 * character device resident on the file system.
790 switch (vp
->v_type
) {
794 if (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
)
801 /* If immutable bit set, nobody gets to write it. */
802 if (considerFlags
&& (cp
->c_flags
& IMMUTABLE
))
805 /* Otherwise, user id 0 always gets access. */
806 if (cred
->cr_uid
== 0)
809 /* Otherwise, check the owner. */
810 if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, false)) == 0)
811 return ((cp
->c_mode
& S_IWUSR
) == S_IWUSR
? 0 : EACCES
);
813 /* Otherwise, check the groups. */
814 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++) {
815 if (cp
->c_gid
== *gp
)
816 return ((cp
->c_mode
& S_IWGRP
) == S_IWGRP
? 0 : EACCES
);
819 /* Otherwise, check everyone else. */
820 return ((cp
->c_mode
& S_IWOTH
) == S_IWOTH
? 0 : EACCES
);
826 * Change the flags on a file or directory.
827 * cnode must be locked before calling.
831 hfs_chflags(vp
, flags
, cred
, p
)
832 register struct vnode
*vp
;
833 register u_long flags
;
834 register struct ucred
*cred
;
837 register struct cnode
*cp
= VTOC(vp
);
840 if (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) {
841 if ((retval
= hfs_write_access(vp
, cred
, p
, false)) != 0) {
844 } else if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) {
848 if (cred
->cr_uid
== 0) {
849 if ((cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
)) &&
855 if (cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
) ||
856 (flags
& UF_SETTABLE
) != flags
) {
859 cp
->c_flags
&= SF_SETTABLE
;
860 cp
->c_flags
|= (flags
& UF_SETTABLE
);
862 cp
->c_flag
|= C_CHANGE
;
869 * Perform chown operation on cnode cp;
870 * code must be locked prior to call.
874 hfs_chown(vp
, uid
, gid
, cred
, p
)
875 register struct vnode
*vp
;
881 register struct cnode
*cp
= VTOC(vp
);
890 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
893 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)
896 if (uid
== (uid_t
)VNOVAL
)
898 if (gid
== (gid_t
)VNOVAL
)
901 * If we don't own the file, are trying to change the owner
902 * of the file, or are not a member of the target group,
903 * the caller must be superuser or the call fails.
905 if ((cred
->cr_uid
!= cp
->c_uid
|| uid
!= cp
->c_uid
||
906 (gid
!= cp
->c_gid
&& !groupmember((gid_t
)gid
, cred
))) &&
907 (error
= suser(cred
, &p
->p_acflag
)))
913 if ((error
= hfs_getinoquota(cp
)))
916 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
917 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
920 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
921 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
925 * Eventually need to account for (fake) a block per directory
926 *if (vp->v_type == VDIR)
927 *change = VTOVCB(vp)->blockSize;
931 change
= (int64_t)(cp
->c_blocks
) * (int64_t)VTOVCB(vp
)->blockSize
;
932 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
);
933 (void) hfs_chkiq(cp
, -1, cred
, CHOWN
);
934 for (i
= 0; i
< MAXQUOTAS
; i
++) {
935 dqrele(vp
, cp
->c_dquot
[i
]);
936 cp
->c_dquot
[i
] = NODQUOT
;
942 if ((error
= hfs_getinoquota(cp
)) == 0) {
944 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
945 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
948 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
949 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
951 if ((error
= hfs_chkdq(cp
, change
, cred
, CHOWN
)) == 0) {
952 if ((error
= hfs_chkiq(cp
, 1, cred
, CHOWN
)) == 0)
955 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
|FORCE
);
957 for (i
= 0; i
< MAXQUOTAS
; i
++) {
958 dqrele(vp
, cp
->c_dquot
[i
]);
959 cp
->c_dquot
[i
] = NODQUOT
;
964 if (hfs_getinoquota(cp
) == 0) {
966 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
967 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
970 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
971 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
973 (void) hfs_chkdq(cp
, change
, cred
, FORCE
|CHOWN
);
974 (void) hfs_chkiq(cp
, 1, cred
, FORCE
|CHOWN
);
975 (void) hfs_getinoquota(cp
);
979 if (hfs_getinoquota(cp
))
980 panic("hfs_chown: lost quota");
983 if (ouid
!= uid
|| ogid
!= gid
)
984 cp
->c_flag
|= C_CHANGE
;
985 if (ouid
!= uid
&& cred
->cr_uid
!= 0)
986 cp
->c_mode
&= ~S_ISUID
;
987 if (ogid
!= gid
&& cred
->cr_uid
!= 0)
988 cp
->c_mode
&= ~S_ISGID
;
995 #% exchange fvp L L L
996 #% exchange tvp L L L
1000 * The hfs_exchange routine swaps the fork data in two files by
1001 * exchanging some of the information in the cnode. It is used
1002 * to preserve the file ID when updating an existing file, in
1003 * case the file is being tracked through its file ID. Typically
1004 * its used after creating a new file during a safe-save.
1009 struct vop_exchange_args
/* {
1010 struct vnode *a_fvp;
1011 struct vnode *a_tvp;
1012 struct ucred *a_cred;
1016 struct vnode
*from_vp
= ap
->a_fvp
;
1017 struct vnode
*to_vp
= ap
->a_tvp
;
1018 struct cnode
*from_cp
= VTOC(from_vp
);
1019 struct cnode
*to_cp
= VTOC(to_vp
);
1020 struct hfsmount
*hfsmp
= VTOHFS(from_vp
);
1021 struct cat_desc tempdesc
;
1022 struct cat_attr tempattr
;
1023 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
1024 cat_cookie_t cookie
= {0};
1026 /* The files must be on the same volume. */
1027 if (from_vp
->v_mount
!= to_vp
->v_mount
)
1030 /* Only normal files can be exchanged. */
1031 if ((from_vp
->v_type
!= VREG
) || (to_vp
->v_type
!= VREG
) ||
1032 (from_cp
->c_flag
& C_HARDLINK
) || (to_cp
->c_flag
& C_HARDLINK
) ||
1033 VNODE_IS_RSRC(from_vp
) || VNODE_IS_RSRC(to_vp
))
1036 // XXXdbg - don't allow modification of the journal or journal_info_block
1038 struct HFSPlusExtentDescriptor
*extd
;
1040 if (from_cp
->c_datafork
) {
1041 extd
= &from_cp
->c_datafork
->ff_extents
[0];
1042 if (extd
->startBlock
== VTOVCB(from_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1047 if (to_cp
->c_datafork
) {
1048 extd
= &to_cp
->c_datafork
->ff_extents
[0];
1049 if (extd
->startBlock
== VTOVCB(to_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1056 hfs_global_shared_lock_acquire(hfsmp
);
1059 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1066 * Reserve some space in the Catalog file.
1068 if ((error
= cat_preflight(hfsmp
, CAT_EXCHANGE
, &cookie
, ap
->a_p
))) {
1072 /* Lock catalog b-tree */
1073 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1074 if (error
) goto Err_Exit
;
1076 /* The backend code always tries to delete the virtual
1077 * extent id for exchanging files so we neeed to lock
1078 * the extents b-tree.
1080 error
= hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1082 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1086 /* Do the exchange */
1087 error
= MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp
),
1088 from_cp
->c_desc
.cd_nameptr
, to_cp
->c_desc
.cd_nameptr
,
1089 from_cp
->c_parentcnid
, to_cp
->c_parentcnid
,
1090 from_cp
->c_hint
, to_cp
->c_hint
));
1092 (void) hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1093 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1095 if (error
!= E_NONE
) {
1099 /* Purge the vnodes from the name cache */
1101 cache_purge(from_vp
);
1105 /* Save a copy of from attributes before swapping. */
1106 bcopy(&from_cp
->c_desc
, &tempdesc
, sizeof(struct cat_desc
));
1107 bcopy(&from_cp
->c_attr
, &tempattr
, sizeof(struct cat_attr
));
1110 * Swap the descriptors and all non-fork related attributes.
1111 * (except the modify date)
1113 bcopy(&to_cp
->c_desc
, &from_cp
->c_desc
, sizeof(struct cat_desc
));
1115 from_cp
->c_hint
= 0;
1116 from_cp
->c_fileid
= from_cp
->c_cnid
;
1117 from_cp
->c_itime
= to_cp
->c_itime
;
1118 from_cp
->c_btime
= to_cp
->c_btime
;
1119 from_cp
->c_atime
= to_cp
->c_atime
;
1120 from_cp
->c_ctime
= to_cp
->c_ctime
;
1121 from_cp
->c_gid
= to_cp
->c_gid
;
1122 from_cp
->c_uid
= to_cp
->c_uid
;
1123 from_cp
->c_flags
= to_cp
->c_flags
;
1124 from_cp
->c_mode
= to_cp
->c_mode
;
1125 bcopy(to_cp
->c_finderinfo
, from_cp
->c_finderinfo
, 32);
1127 bcopy(&tempdesc
, &to_cp
->c_desc
, sizeof(struct cat_desc
));
1129 to_cp
->c_fileid
= to_cp
->c_cnid
;
1130 to_cp
->c_itime
= tempattr
.ca_itime
;
1131 to_cp
->c_btime
= tempattr
.ca_btime
;
1132 to_cp
->c_atime
= tempattr
.ca_atime
;
1133 to_cp
->c_ctime
= tempattr
.ca_ctime
;
1134 to_cp
->c_gid
= tempattr
.ca_gid
;
1135 to_cp
->c_uid
= tempattr
.ca_uid
;
1136 to_cp
->c_flags
= tempattr
.ca_flags
;
1137 to_cp
->c_mode
= tempattr
.ca_mode
;
1138 bcopy(tempattr
.ca_finderinfo
, to_cp
->c_finderinfo
, 32);
1140 /* Reinsert into the cnode hash under new file IDs*/
1141 hfs_chashremove(from_cp
);
1142 hfs_chashremove(to_cp
);
1144 hfs_chashinsert(from_cp
);
1145 hfs_chashinsert(to_cp
);
1148 * When a file moves out of "Cleanup At Startup"
1149 * we can drop its NODUMP status.
1151 if ((from_cp
->c_flags
& UF_NODUMP
) &&
1152 (from_cp
->c_parentcnid
!= to_cp
->c_parentcnid
)) {
1153 from_cp
->c_flags
&= ~UF_NODUMP
;
1154 from_cp
->c_flag
|= C_CHANGE
;
1156 if ((to_cp
->c_flags
& UF_NODUMP
) &&
1157 (to_cp
->c_parentcnid
!= from_cp
->c_parentcnid
)) {
1158 to_cp
->c_flags
&= ~UF_NODUMP
;
1159 to_cp
->c_flag
|= C_CHANGE
;
1162 HFS_KNOTE(from_vp
, NOTE_ATTRIB
);
1163 HFS_KNOTE(to_vp
, NOTE_ATTRIB
);
1166 cat_postflight(hfsmp
, &cookie
, ap
->a_p
);
1170 journal_end_transaction(hfsmp
->jnl
);
1173 hfs_global_shared_lock_release(hfsmp
);
1185 IN struct vnode *vp;
1186 IN struct ucred *cred;
1193 struct vop_fsync_args
/* {
1195 struct ucred *a_cred;
1200 struct vnode
*vp
= ap
->a_vp
;
1201 struct cnode
*cp
= VTOC(vp
);
1202 struct filefork
*fp
= NULL
;
1204 register struct buf
*bp
;
1207 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
1212 wait
= (ap
->a_waitfor
== MNT_WAIT
);
1214 /* HFS directories don't have any data blocks. */
1215 if (vp
->v_type
== VDIR
)
1219 * For system files flush the B-tree header and
1220 * for regular files write out any clusters
1222 if (vp
->v_flag
& VSYSTEM
) {
1223 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
) {
1225 if (hfsmp
->jnl
== NULL
) {
1226 BTFlushPath(VTOF(vp
));
1229 } else if (UBCINFOEXISTS(vp
))
1230 (void) cluster_push(vp
);
1233 * When MNT_WAIT is requested and the zero fill timeout
1234 * has expired then we must explicitly zero out any areas
1235 * that are currently marked invalid (holes).
1237 * Files with NODUMP can bypass zero filling here.
1239 if ((wait
|| (cp
->c_flag
& C_ZFWANTSYNC
)) &&
1240 ((cp
->c_flags
& UF_NODUMP
) == 0) &&
1241 UBCINFOEXISTS(vp
) && (fp
= VTOF(vp
)) &&
1242 cp
->c_zftimeout
!= 0) {
1246 if (time
.tv_sec
< cp
->c_zftimeout
) {
1247 /* Remember that a force sync was requested. */
1248 cp
->c_flag
|= C_ZFWANTSYNC
;
1251 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devblksize
);
1252 was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
1253 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
1255 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
1256 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
1257 off_t start
= invalid_range
->rl_start
;
1258 off_t end
= invalid_range
->rl_end
;
1260 /* The range about to be written must be validated
1261 * first, so that VOP_CMAP() will return the
1262 * appropriate mapping for the cluster code:
1264 rl_remove(start
, end
, &fp
->ff_invalidranges
);
1266 (void) cluster_write(vp
, (struct uio
*) 0,
1268 invalid_range
->rl_end
+ 1,
1269 invalid_range
->rl_start
,
1270 (off_t
)0, devblksize
,
1271 IO_HEADZEROFILL
| IO_NOZERODIRTY
);
1272 cp
->c_flag
|= C_MODIFIED
;
1274 (void) cluster_push(vp
);
1276 CLR(vp
->v_flag
, VNOCACHE_DATA
);
1277 cp
->c_flag
&= ~C_ZFWANTSYNC
;
1278 cp
->c_zftimeout
= 0;
1282 * Flush all dirty buffers associated with a vnode.
1286 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1287 nbp
= bp
->b_vnbufs
.le_next
;
1288 if ((bp
->b_flags
& B_BUSY
))
1290 if ((bp
->b_flags
& B_DELWRI
) == 0)
1291 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp
, hfsmp
);
1293 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1294 if ((bp
->b_flags
& B_META
) == 0) {
1295 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1298 // if journal_active() returns >= 0 then the journal is ok and we
1299 // shouldn't do anything to this locked block (because it is part
1300 // of a transaction). otherwise we'll just go through the normal
1301 // code path and flush the buffer.
1302 if (journal_active(hfsmp
->jnl
) >= 0) {
1308 bp
->b_flags
|= B_BUSY
;
1309 /* Clear B_LOCKED, should only be set on meta files */
1310 bp
->b_flags
&= ~B_LOCKED
;
1314 * Wait for I/O associated with indirect blocks to complete,
1315 * since there is no way to quickly wait for them below.
1317 if (bp
->b_vp
== vp
|| ap
->a_waitfor
== MNT_NOWAIT
)
1320 (void) VOP_BWRITE(bp
);
1325 while (vp
->v_numoutput
) {
1326 vp
->v_flag
|= VBWAIT
;
1327 tsleep((caddr_t
)&vp
->v_numoutput
, PRIBIO
+ 1, "hfs_fsync", 0);
1330 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1332 if (hfsmp
->jnl
== NULL
&& vp
->v_dirtyblkhd
.lh_first
) {
1333 /* still have some dirty buffers */
1335 vprint("hfs_fsync: dirty", vp
);
1338 * Looks like the requests are not
1339 * getting queued to the driver.
1340 * Retrying here causes a cpu bound loop.
1341 * Yield to the other threads and hope
1344 (void)tsleep((caddr_t
)&vp
->v_numoutput
,
1345 PRIBIO
+ 1, "hfs_fsync", hz
/10);
1358 if (vp
->v_flag
& VSYSTEM
) {
1359 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
)
1360 BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1361 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1362 } else /* User file */ {
1363 retval
= VOP_UPDATE(ap
->a_vp
, &tv
, &tv
, wait
);
1365 /* When MNT_WAIT is requested push out any delayed meta data */
1366 if ((retval
== 0) && wait
&& cp
->c_hint
&&
1367 !ISSET(cp
->c_flag
, C_DELETED
| C_NOEXISTS
)) {
1368 hfs_metasync(VTOHFS(vp
), cp
->c_hint
, ap
->a_p
);
1371 // make sure that we've really been called from the user
1372 // fsync() and if so push out any pending transactions
1373 // that this file might is a part of (and get them on
1375 if (vp
->v_flag
& VFULLFSYNC
) {
1377 journal_flush(hfsmp
->jnl
);
1379 VOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, NOCRED
, ap
->a_p
);
1387 /* Sync an hfs catalog b-tree node */
1389 hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
)
1396 vp
= HFSTOVCB(hfsmp
)->catalogRefNum
;
1398 // XXXdbg - don't need to do this on a journaled volume
1403 if (hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
) != 0)
1407 * Look for a matching node that has been delayed
1408 * but is not part of a set (B_LOCKED).
1411 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1412 nbp
= bp
->b_vnbufs
.le_next
;
1413 if (bp
->b_flags
& B_BUSY
)
1415 if (bp
->b_lblkno
== node
) {
1416 if (bp
->b_flags
& B_LOCKED
)
1420 bp
->b_flags
|= B_BUSY
;
1422 (void) VOP_BWRITE(bp
);
1428 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1435 hfs_btsync(struct vnode
*vp
, int sync_transaction
)
1437 struct cnode
*cp
= VTOC(vp
);
1438 register struct buf
*bp
;
1441 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1445 * Flush all dirty buffers associated with b-tree.
1450 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1451 nbp
= bp
->b_vnbufs
.le_next
;
1452 if ((bp
->b_flags
& B_BUSY
))
1454 if ((bp
->b_flags
& B_DELWRI
) == 0)
1455 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp
, hfsmp
);
1458 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1459 if ((bp
->b_flags
& B_META
) == 0) {
1460 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1463 // if journal_active() returns >= 0 then the journal is ok and we
1464 // shouldn't do anything to this locked block (because it is part
1465 // of a transaction). otherwise we'll just go through the normal
1466 // code path and flush the buffer.
1467 if (journal_active(hfsmp
->jnl
) >= 0) {
1472 if (sync_transaction
&& !(bp
->b_flags
& B_LOCKED
))
1476 bp
->b_flags
|= B_BUSY
;
1477 bp
->b_flags
&= ~B_LOCKED
;
1488 if ((vp
->v_flag
& VSYSTEM
) && (VTOF(vp
)->fcbBTCBPtr
!= NULL
))
1489 (void) BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1490 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1496 * Rmdir system call.
1501 IN WILLRELE struct vnode *dvp;
1502 IN WILLRELE struct vnode *vp;
1503 IN struct componentname *cnp;
1508 struct vop_rmdir_args
/* {
1509 struct vnode *a_dvp;
1511 struct componentname *a_cnp;
1514 return (hfs_removedir(ap
->a_dvp
, ap
->a_vp
, ap
->a_cnp
, 0));
1521 hfs_removedir(dvp
, vp
, cnp
, options
)
1524 struct componentname
*cnp
;
1527 struct proc
*p
= cnp
->cn_proc
;
1530 struct hfsmount
* hfsmp
;
1532 cat_cookie_t cookie
= {0};
1533 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
1542 return (EINVAL
); /* cannot remove "." */
1546 (void)hfs_getinoquota(cp
);
1549 hfs_global_shared_lock_acquire(hfsmp
);
1552 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1558 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1560 * Reserve some space in the Catalog file.
1562 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
1568 * Verify the directory is empty (and valid).
1569 * (Rmdir ".." won't be valid since
1570 * ".." will contain a reference to
1571 * the current directory and thus be
1574 if (cp
->c_entries
!= 0) {
1578 if ((dcp
->c_flags
& APPEND
) || (cp
->c_flags
& (IMMUTABLE
| APPEND
))) {
1583 /* Remove the entry from the namei cache: */
1586 /* Lock catalog b-tree */
1587 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1588 if (error
) goto out
;
1590 if (cp
->c_entries
> 0)
1591 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1592 /* Remove entry from catalog */
1593 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1595 /* Unlock catalog b-tree */
1596 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1597 if (error
) goto out
;
1600 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
1603 /* The parent lost a child */
1604 if (dcp
->c_entries
> 0)
1606 if (dcp
->c_nlink
> 0)
1608 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
1610 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
1611 HFS_KNOTE(dvp
, NOTE_WRITE
| NOTE_LINK
);
1613 hfs_volupdate(hfsmp
, VOL_RMDIR
, (dcp
->c_cnid
== kHFSRootFolderID
));
1615 cp
->c_mode
= 0; /* Makes the vnode go away...see inactive */
1616 cp
->c_flag
|= C_NOEXISTS
;
1618 if (!(options
& HFSRM_PARENT_LOCKED
)) {
1621 HFS_KNOTE(vp
, NOTE_DELETE
);
1624 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1625 cat_postflight(hfsmp
, &cookie
, p
);
1629 journal_end_transaction(hfsmp
->jnl
);
1632 hfs_global_shared_lock_release(hfsmp
);
1644 IN WILLRELE struct vnode *dvp;
1645 IN WILLRELE struct vnode *vp;
1646 IN struct componentname *cnp;
1652 struct vop_remove_args
/* {
1653 struct vnode *a_dvp;
1655 struct componentname *a_cnp;
1658 return (hfs_removefile(ap
->a_dvp
, ap
->a_vp
, ap
->a_cnp
, 0));
1666 * Similar to hfs_remove except there are additional options.
1669 hfs_removefile(dvp
, vp
, cnp
, options
)
1672 struct componentname
*cnp
;
1675 struct vnode
*rvp
= NULL
;
1678 struct hfsmount
*hfsmp
;
1679 struct proc
*p
= cnp
->cn_proc
;
1680 int dataforkbusy
= 0;
1681 int rsrcforkbusy
= 0;
1684 cat_cookie_t cookie
= {0};
1686 int started_tr
= 0, grabbed_lock
= 0;
1687 int refcount
, isbigfile
= 0;
1689 /* Directories should call hfs_rmdir! */
1690 if (vp
->v_type
== VDIR
) {
1699 if (cp
->c_parentcnid
!= dcp
->c_cnid
) {
1704 /* Make sure a remove is permitted */
1705 if ((cp
->c_flags
& (IMMUTABLE
| APPEND
)) ||
1706 (VTOC(dvp
)->c_flags
& APPEND
) ||
1707 VNODE_IS_RSRC(vp
)) {
1713 * Aquire a vnode for a non-empty resource fork.
1714 * (needed for VOP_TRUNCATE)
1716 if (cp
->c_blocks
- VTOF(vp
)->ff_blocks
) {
1717 error
= hfs_vgetrsrc(hfsmp
, vp
, &rvp
, p
);
1722 // XXXdbg - don't allow deleting the journal or journal_info_block
1723 if (hfsmp
->jnl
&& cp
->c_datafork
) {
1724 struct HFSPlusExtentDescriptor
*extd
;
1726 extd
= &cp
->c_datafork
->ff_extents
[0];
1727 if (extd
->startBlock
== HFSTOVCB(hfsmp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1734 * Check if this file is being used.
1736 * The namei done for the remove took a reference on the
1737 * vnode (vp). And we took a ref on the resource vnode (rvp).
1738 * Hence set 1 in the tookref parameter of ubc_isinuse().
1740 if (VTOC(vp
)->c_flag
& C_VPREFHELD
) {
1745 if (UBCISVALID(vp
) && ubc_isinuse(vp
, refcount
))
1747 if (rvp
&& UBCISVALID(rvp
) && ubc_isinuse(rvp
, 1))
1750 // need this to check if we have to break the deletion
1751 // into multiple pieces
1752 isbigfile
= (VTOC(vp
)->c_datafork
->ff_size
>= HFS_BIGFILE_SIZE
);
1755 * Carbon semantics prohibit deleting busy files.
1756 * (enforced when NODELETEBUSY is requested)
1758 if ((dataforkbusy
|| rsrcforkbusy
) &&
1759 ((cnp
->cn_flags
& NODELETEBUSY
) ||
1760 (hfsmp
->hfs_privdir_desc
.cd_cnid
== 0))) {
1766 (void)hfs_getinoquota(cp
);
1770 hfs_global_shared_lock_acquire(hfsmp
);
1773 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1779 if (!(options
& HFSRM_SKIP_RESERVE
)) {
1781 * Reserve some space in the Catalog file.
1783 if ((error
= cat_preflight(hfsmp
, CAT_DELETE
, &cookie
, p
))) {
1788 /* Remove our entry from the namei cache. */
1791 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1792 if (hfsmp
->jnl
&& vp
->v_type
== VLNK
&& vp
->v_dirtyblkhd
.lh_first
) {
1793 struct buf
*bp
, *nbp
;
1796 for (bp
=vp
->v_dirtyblkhd
.lh_first
; bp
; bp
=nbp
) {
1797 nbp
= bp
->b_vnbufs
.le_next
;
1799 if ((bp
->b_flags
& B_BUSY
)) {
1800 // if it was busy, someone else must be dealing
1801 // with it so just move on.
1805 if (!(bp
->b_flags
& B_META
)) {
1806 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp
);
1809 // if it's part of the current transaction, kill it.
1810 if (bp
->b_flags
& B_LOCKED
) {
1812 bp
->b_flags
|= B_BUSY
;
1813 journal_kill_block(hfsmp
->jnl
, bp
);
1821 * Truncate any non-busy forks. Busy forks will
1822 * get trucated when their vnode goes inactive.
1824 * (Note: hard links are truncated in VOP_INACTIVE)
1826 if ((cp
->c_flag
& C_HARDLINK
) == 0) {
1827 int mode
= cp
->c_mode
;
1829 if (!dataforkbusy
&& !isbigfile
&& cp
->c_datafork
->ff_blocks
!= 0) {
1830 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1831 error
= VOP_TRUNCATE(vp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1837 if (!rsrcforkbusy
&& rvp
) {
1838 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1839 error
= VOP_TRUNCATE(rvp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1847 * There are 3 remove cases to consider:
1848 * 1. File is a hardlink ==> remove the link
1849 * 2. File is busy (in use) ==> move/rename the file
1850 * 3. File is not in use ==> remove the file
1853 if (cp
->c_flag
& C_HARDLINK
) {
1854 struct cat_desc desc
;
1856 if ((cnp
->cn_flags
& HASBUF
) == 0 ||
1857 cnp
->cn_nameptr
[0] == '\0') {
1858 error
= ENOENT
; /* name missing! */
1862 /* Setup a descriptor for the link */
1863 bzero(&desc
, sizeof(desc
));
1864 desc
.cd_nameptr
= cnp
->cn_nameptr
;
1865 desc
.cd_namelen
= cnp
->cn_namelen
;
1866 desc
.cd_parentcnid
= dcp
->c_cnid
;
1867 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1868 desc
.cd_cnid
= cp
->c_cnid
;
1870 /* Lock catalog b-tree */
1871 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1875 /* Delete the link record */
1876 error
= cat_delete(hfsmp
, &desc
, &cp
->c_attr
);
1878 if ((error
== 0) && (--cp
->c_nlink
< 1)) {
1881 struct cat_desc to_desc
;
1882 struct cat_desc from_desc
;
1885 * This is now esentially an open deleted file.
1886 * Rename it to reflect this state which makes
1887 * orphan file cleanup easier (see hfs_remove_orphans).
1888 * Note: a rename failure here is not fatal.
1890 MAKE_INODE_NAME(inodename
, cp
->c_rdev
);
1891 bzero(&from_desc
, sizeof(from_desc
));
1892 from_desc
.cd_nameptr
= inodename
;
1893 from_desc
.cd_namelen
= strlen(inodename
);
1894 from_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1895 from_desc
.cd_flags
= 0;
1896 from_desc
.cd_cnid
= cp
->c_fileid
;
1898 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1899 bzero(&to_desc
, sizeof(to_desc
));
1900 to_desc
.cd_nameptr
= delname
;
1901 to_desc
.cd_namelen
= strlen(delname
);
1902 to_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1903 to_desc
.cd_flags
= 0;
1904 to_desc
.cd_cnid
= cp
->c_fileid
;
1906 (void) cat_rename(hfsmp
, &from_desc
, &hfsmp
->hfs_privdir_desc
,
1907 &to_desc
, (struct cat_desc
*)NULL
);
1908 cp
->c_flag
|= C_DELETED
;
1911 /* Unlock the Catalog */
1912 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1917 cp
->c_flag
|= C_CHANGE
;
1919 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1921 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
1923 } else if (dataforkbusy
|| rsrcforkbusy
|| isbigfile
) {
1925 struct cat_desc to_desc
;
1926 struct cat_desc todir_desc
;
1929 * Orphan this file (move to hidden directory).
1931 bzero(&todir_desc
, sizeof(todir_desc
));
1932 todir_desc
.cd_parentcnid
= 2;
1934 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1935 bzero(&to_desc
, sizeof(to_desc
));
1936 to_desc
.cd_nameptr
= delname
;
1937 to_desc
.cd_namelen
= strlen(delname
);
1938 to_desc
.cd_parentcnid
= hfsmp
->hfs_privdir_desc
.cd_cnid
;
1939 to_desc
.cd_flags
= 0;
1940 to_desc
.cd_cnid
= cp
->c_cnid
;
1942 /* Lock catalog b-tree */
1943 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1947 error
= cat_rename(hfsmp
, &cp
->c_desc
, &todir_desc
,
1948 &to_desc
, (struct cat_desc
*)NULL
);
1950 // XXXdbg - only bump this count if we were successful
1952 hfsmp
->hfs_privdir_attr
.ca_entries
++;
1954 (void)cat_update(hfsmp
, &hfsmp
->hfs_privdir_desc
,
1955 &hfsmp
->hfs_privdir_attr
, NULL
, NULL
);
1957 /* Unlock the Catalog */
1958 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1959 if (error
) goto out
;
1961 cp
->c_flag
|= C_CHANGE
| C_DELETED
| C_NOEXISTS
;
1964 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1966 } else /* Not busy */ {
1968 if (cp
->c_blocks
> 0) {
1970 panic("hfs_remove: attempting to delete a non-empty file!");
1972 printf("hfs_remove: attempting to delete a non-empty file %s\n",
1973 cp
->c_desc
.cd_nameptr
);
1979 /* Lock catalog b-tree */
1980 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1984 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1986 if (error
&& error
!= ENXIO
&& error
!= ENOENT
&& truncated
) {
1987 if ((cp
->c_datafork
&& cp
->c_datafork
->ff_size
!= 0) ||
1988 (cp
->c_rsrcfork
&& cp
->c_rsrcfork
->ff_size
!= 0)) {
1989 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1990 error
, cp
->c_datafork
->ff_size
, cp
->c_rsrcfork
->ff_size
);
1992 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1993 cp
->c_desc
.cd_nameptr
, cp
->c_attr
.ca_fileid
, error
);
1997 /* Unlock the Catalog */
1998 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1999 if (error
) goto out
;
2002 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
2006 truncated
= 0; // because the catalog entry is gone
2007 cp
->c_flag
|= C_CHANGE
| C_NOEXISTS
;
2009 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
2013 * All done with this cnode's descriptor...
2015 * Note: all future catalog calls for this cnode must be
2016 * by fileid only. This is OK for HFS (which doesn't have
2017 * file thread records) since HFS doesn't support hard
2018 * links or the removal of busy files.
2020 cat_releasedesc(&cp
->c_desc
);
2022 /* In all three cases the parent lost a child */
2023 if (dcp
->c_entries
> 0)
2025 if (dcp
->c_nlink
> 0)
2027 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2029 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
2030 HFS_KNOTE(dvp
, NOTE_WRITE
);
2033 /* All done with component name... */
2034 if ((options
& HFSRM_SAVE_NAME
) == 0 &&
2036 (cnp
->cn_flags
& (HASBUF
| SAVENAME
)) == (HASBUF
| SAVENAME
)) {
2037 char *tmp
= cnp
->cn_pnbuf
;
2038 cnp
->cn_pnbuf
= NULL
;
2039 cnp
->cn_flags
&= ~HASBUF
;
2040 FREE_ZONE(tmp
, cnp
->cn_pnlen
, M_NAMEI
);
2043 if (!(options
& HFSRM_SKIP_RESERVE
)) {
2044 cat_postflight(hfsmp
, &cookie
, p
);
2047 /* Commit the truncation to the catalog record */
2049 cp
->c_flag
|= C_CHANGE
| C_UPDATE
| C_FORCEUPDATE
;
2051 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
2056 journal_end_transaction(hfsmp
->jnl
);
2059 hfs_global_shared_lock_release(hfsmp
);
2062 HFS_KNOTE(vp
, NOTE_DELETE
);
2064 HFS_KNOTE(rvp
, NOTE_DELETE
);
2071 VOP_UNLOCK(vp
, 0, p
);
2072 // XXXdbg - try to prevent the lost ubc_info panic
2073 if ((cp
->c_flag
& C_HARDLINK
) == 0 || cp
->c_nlink
== 0) {
2074 (void) ubc_uncache(vp
);
2078 if (!(options
& HFSRM_PARENT_LOCKED
)) {
2086 __private_extern__
void
2087 replace_desc(struct cnode
*cp
, struct cat_desc
*cdp
)
2089 /* First release allocated name buffer */
2090 if (cp
->c_desc
.cd_flags
& CD_HASBUF
&& cp
->c_desc
.cd_nameptr
!= 0) {
2091 char *name
= cp
->c_desc
.cd_nameptr
;
2093 cp
->c_desc
.cd_nameptr
= 0;
2094 cp
->c_desc
.cd_namelen
= 0;
2095 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
2098 bcopy(cdp
, &cp
->c_desc
, sizeof(cp
->c_desc
));
2100 /* Cnode now owns the name buffer */
2101 cdp
->cd_nameptr
= 0;
2102 cdp
->cd_namelen
= 0;
2103 cdp
->cd_flags
&= ~CD_HASBUF
;
2109 #% rename fdvp U U U
2111 #% rename tdvp L U U
2118 * The VFS layer guarantees that source and destination will
2119 * either both be directories, or both not be directories.
2121 * When the target is a directory, hfs_rename must ensure
2124 * The rename system call is responsible for freeing
2125 * the pathname buffers (ie no need to call VOP_ABORTOP).
2130 struct vop_rename_args
/* {
2131 struct vnode *a_fdvp;
2132 struct vnode *a_fvp;
2133 struct componentname *a_fcnp;
2134 struct vnode *a_tdvp;
2135 struct vnode *a_tvp;
2136 struct componentname *a_tcnp;
2139 struct vnode
*tvp
= ap
->a_tvp
;
2140 struct vnode
*tdvp
= ap
->a_tdvp
;
2141 struct vnode
*fvp
= ap
->a_fvp
;
2142 struct vnode
*fdvp
= ap
->a_fdvp
;
2143 struct componentname
*tcnp
= ap
->a_tcnp
;
2144 struct componentname
*fcnp
= ap
->a_fcnp
;
2145 struct proc
*p
= fcnp
->cn_proc
;
2146 struct cnode
*fcp
= NULL
;
2147 struct cnode
*fdcp
= NULL
;
2148 struct cnode
*tdcp
= VTOC(tdvp
);
2149 struct cat_desc from_desc
;
2150 struct cat_desc to_desc
;
2151 struct cat_desc out_desc
;
2152 struct hfsmount
*hfsmp
= NULL
;
2154 cat_cookie_t cookie
= {0};
2155 int fdvp_locked
, fvp_locked
, tdvp_locked
, tvp_locked
;
2157 int started_tr
= 0, grabbed_lock
= 0;
2161 /* Establish our vnode lock state. */
2163 tvp_locked
= (tvp
!= 0);
2169 * Check for cross-device rename.
2171 if ((fvp
->v_mount
!= tdvp
->v_mount
) ||
2172 (tvp
&& (fvp
->v_mount
!= tvp
->v_mount
))) {
2178 * When fvp matches tvp they must be case variants
2181 * In some cases tvp will be locked in other cases
2182 * it be unlocked with no reference. Normalize the
2183 * state here (unlocked with a reference) so that
2184 * we can exit in a known state.
2187 if (VOP_ISLOCKED(tvp
) &&
2188 (VTOC(tvp
)->c_lock
.lk_lockholder
== p
->p_pid
) &&
2189 (VTOC(tvp
)->c_lock
.lk_lockthread
== current_thread())) {
2196 * If this a hard link with different parents
2197 * and its not a case variant then keep tvp
2198 * around for removal.
2200 if ((VTOC(fvp
)->c_flag
& C_HARDLINK
) &&
2202 (hfs_namecmp(fcnp
->cn_nameptr
, fcnp
->cn_namelen
,
2203 tcnp
->cn_nameptr
, tcnp
->cn_namelen
) != 0))) {
2210 * The following edge case is caught here:
2211 * (to cannot be a descendent of from)
2224 if (tdcp
->c_parentcnid
== VTOC(fvp
)->c_cnid
) {
2230 * The following two edge cases are caught here:
2231 * (note tvp is not empty)
2244 if (tvp
&& (tvp
->v_type
== VDIR
) && (VTOC(tvp
)->c_entries
!= 0)) {
2250 * The following edge case is caught here:
2251 * (the from child and parent are the same)
2264 * Make sure "from" vnode and its parent are changeable.
2266 if ((VTOC(fvp
)->c_flags
& (IMMUTABLE
| APPEND
)) ||
2267 (VTOC(fdvp
)->c_flags
& APPEND
)) {
2272 hfsmp
= VTOHFS(tdvp
);
2275 * If the destination parent directory is "sticky", then the
2276 * user must own the parent directory, or the destination of
2277 * the rename, otherwise the destination may not be changed
2278 * (except by root). This implements append-only directories.
2280 * Note that checks for immutable and write access are done
2281 * by the call to VOP_REMOVE.
2283 if (tvp
&& (tdcp
->c_mode
& S_ISTXT
) &&
2284 (tcnp
->cn_cred
->cr_uid
!= 0) &&
2285 (tcnp
->cn_cred
->cr_uid
!= tdcp
->c_uid
) &&
2286 (hfs_owner_rights(hfsmp
, VTOC(tvp
)->c_uid
, tcnp
->cn_cred
, p
, false)) ) {
2293 (void)hfs_getinoquota(VTOC(tvp
));
2297 * Lock all the vnodes before starting a journal transaction.
2301 * Simple case (same parent) - just lock child (fvp).
2304 if (error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
))
2311 * If fdvp is the parent of tdvp then we'll need to
2312 * drop tdvp's lock before acquiring a lock on fdvp.
2324 * If the parent directories are unrelated then we'll
2325 * need to aquire their vnode locks in vnode address
2326 * order. Otherwise we can race with another rename
2327 * call that involves the same vnodes except that to
2328 * and from are switched and potentially deadlock.
2329 * [ie rename("a/b", "c/d") vs rename("c/d", "a/b")]
2331 * If its not either of the two above cases then we
2332 * can safely lock fdvp and fvp.
2334 if ((VTOC(fdvp
)->c_cnid
== VTOC(tdvp
)->c_parentcnid
) ||
2335 ((VTOC(tdvp
)->c_cnid
!= VTOC(fdvp
)->c_parentcnid
) &&
2338 /* Drop locks on tvp and tdvp */
2340 VOP_UNLOCK(tvp
, 0, p
);
2343 VOP_UNLOCK(tdvp
, 0, p
);
2346 /* Aquire locks in correct order */
2347 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2350 if ((error
= vn_lock(tdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2355 * Now that the parents are locked only one thread
2356 * can continue. So the lock order of the children
2357 * doesn't really matter
2360 if ((error
= vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2365 if ((error
= vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2369 if ((error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2374 } else /* OK to lock fdvp and fvp */ {
2375 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2378 if (error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
))
2391 * While fvp is still locked, purge it from the name cache and
2392 * grab it's c_cnid value. Note that the removal of tvp (below)
2393 * can drop fvp's lock when fvp == tvp.
2398 * When a file moves out of "Cleanup At Startup"
2399 * we can drop its NODUMP status.
2401 if ((fcp
->c_flags
& UF_NODUMP
) &&
2402 (fvp
->v_type
== VREG
) &&
2404 (fdcp
->c_desc
.cd_nameptr
!= NULL
) &&
2405 (strcmp(fdcp
->c_desc
.cd_nameptr
, CARBON_TEMP_DIR_NAME
) == 0)) {
2406 fcp
->c_flags
&= ~UF_NODUMP
;
2407 fcp
->c_flag
|= C_CHANGE
;
2409 (void) VOP_UPDATE(fvp
, &tv
, &tv
, 0);
2412 bzero(&from_desc
, sizeof(from_desc
));
2413 from_desc
.cd_nameptr
= fcnp
->cn_nameptr
;
2414 from_desc
.cd_namelen
= fcnp
->cn_namelen
;
2415 from_desc
.cd_parentcnid
= fdcp
->c_cnid
;
2416 from_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2417 from_desc
.cd_cnid
= fcp
->c_cnid
;
2419 bzero(&to_desc
, sizeof(to_desc
));
2420 to_desc
.cd_nameptr
= tcnp
->cn_nameptr
;
2421 to_desc
.cd_namelen
= tcnp
->cn_namelen
;
2422 to_desc
.cd_parentcnid
= tdcp
->c_cnid
;
2423 to_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2424 to_desc
.cd_cnid
= fcp
->c_cnid
;
2426 hfs_global_shared_lock_acquire(hfsmp
);
2429 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2436 * Reserve some space in the Catalog file.
2438 if ((error
= cat_preflight(hfsmp
, CAT_RENAME
+ CAT_DELETE
, &cookie
, p
))) {
2443 * If the destination exists then it needs to be removed.
2450 * Note that hfs_removedir and hfs_removefile
2451 * will keep tdvp locked with a reference.
2452 * But tvp will lose its lock and reference.
2454 if (tvp
->v_type
== VDIR
)
2455 error
= hfs_removedir(tdvp
, tvp
, tcnp
, HFSRM_RENAMEOPTS
);
2457 error
= hfs_removefile(tdvp
, tvp
, tcnp
, HFSRM_RENAMEOPTS
);
2469 * All done with tvp and fvp
2472 /* Lock catalog b-tree */
2473 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
2477 error
= cat_rename(hfsmp
, &from_desc
, &tdcp
->c_desc
, &to_desc
, &out_desc
);
2479 /* Unlock catalog b-tree */
2480 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2486 /* Update cnode's catalog descriptor */
2488 replace_desc(fcp
, &out_desc
);
2489 fcp
->c_parentcnid
= tdcp
->c_cnid
;
2493 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_RMDIR
: VOL_RMFILE
,
2494 (fdcp
->c_cnid
== kHFSRootFolderID
));
2495 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
2496 (tdcp
->c_cnid
== kHFSRootFolderID
));
2498 /* Update both parent directories. */
2503 if (fdcp
->c_nlink
> 0)
2505 if (fdcp
->c_entries
> 0)
2507 fdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2508 (void) VOP_UPDATE(fdvp
, &tv
, &tv
, 0);
2510 tdcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
2511 tdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2512 (void) VOP_UPDATE(tdvp
, &tv
, &tv
, 0);
2516 cat_postflight(hfsmp
, &cookie
, p
);
2519 journal_end_transaction(hfsmp
->jnl
);
2522 hfs_global_shared_lock_release(hfsmp
);
2525 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2526 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2529 HFS_KNOTE(fvp
, NOTE_RENAME
);
2530 HFS_KNOTE(fdvp
, NOTE_WRITE
);
2531 if (tdvp
!= fdvp
) HFS_KNOTE(tdvp
, NOTE_WRITE
);
2534 VOP_UNLOCK(fvp
, 0, p
);
2537 VOP_UNLOCK(fdvp
, 0, p
);
2540 VOP_UNLOCK(tdvp
, 0, p
);
2543 VOP_UNLOCK(tvp
, 0, p
);
2552 /* After tvp is removed the only acceptable error is EIO */
2553 if (error
&& tvp_deleted
)
2567 IN WILLRELE struct vnode *dvp;
2568 OUT struct vnode **vpp;
2569 IN struct componentname *cnp;
2570 IN struct vattr *vap;
2572 We are responsible for freeing the namei buffer,
2573 it is done in hfs_makenode()
2578 struct vop_mkdir_args
/* {
2579 struct vnode *a_dvp;
2580 struct vnode **a_vpp;
2581 struct componentname *a_cnp;
2582 struct vattr *a_vap;
2585 struct vattr
*vap
= ap
->a_vap
;
2587 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
2588 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
2593 * symlink -- make a symbolic link
2594 #% symlink dvp L U U
2595 #% symlink vpp - U -
2597 # XXX - note that the return vnode has already been VRELE'ed
2598 # by the filesystem layer. To use it you must use vget,
2599 # possibly with a further namei.
2602 IN WILLRELE struct vnode *dvp;
2603 OUT WILLRELE struct vnode **vpp;
2604 IN struct componentname *cnp;
2605 IN struct vattr *vap;
2608 We are responsible for freeing the namei buffer,
2609 it is done in hfs_makenode().
2615 struct vop_symlink_args
/* {
2616 struct vnode *a_dvp;
2617 struct vnode **a_vpp;
2618 struct componentname *a_cnp;
2619 struct vattr *a_vap;
2623 register struct vnode
*vp
, **vpp
= ap
->a_vpp
;
2624 struct hfsmount
*hfsmp
;
2625 struct filefork
*fp
;
2627 struct buf
*bp
= NULL
;
2629 /* HFS standard disks don't support symbolic links */
2630 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
2631 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2633 return (EOPNOTSUPP
);
2636 /* Check for empty target name */
2637 if (ap
->a_target
[0] == 0) {
2638 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2644 hfsmp
= VTOHFS(ap
->a_dvp
);
2646 /* Create the vnode */
2647 if ((error
= hfs_makenode(S_IFLNK
| ap
->a_vap
->va_mode
,
2648 ap
->a_dvp
, vpp
, ap
->a_cnp
))) {
2653 len
= strlen(ap
->a_target
);
2657 (void)hfs_getinoquota(VTOC(vp
));
2661 hfs_global_shared_lock_acquire(hfsmp
);
2663 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2664 hfs_global_shared_lock_release(hfsmp
);
2670 /* Allocate space for the link */
2671 error
= VOP_TRUNCATE(vp
, len
, IO_NOZEROFILL
,
2672 ap
->a_cnp
->cn_cred
, ap
->a_cnp
->cn_proc
);
2674 goto out
; /* XXX need to remove link */
2676 /* Write the link to disk */
2677 bp
= getblk(vp
, 0, roundup((int)fp
->ff_size
, VTOHFS(vp
)->hfs_phys_block_size
),
2680 journal_modify_block_start(hfsmp
->jnl
, bp
);
2682 bzero(bp
->b_data
, bp
->b_bufsize
);
2683 bcopy(ap
->a_target
, bp
->b_data
, len
);
2685 journal_modify_block_end(hfsmp
->jnl
, bp
);
2691 journal_end_transaction(hfsmp
->jnl
);
2693 hfs_global_shared_lock_release(hfsmp
);
2700 * Dummy dirents to simulate the "." and ".." entries of the directory
2701 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2702 * the size of these entries is the smallest needed to represent them
2703 * (only 12 byte each).
2705 static hfsdotentry rootdots
[2] = {
2708 sizeof(struct hfsdotentry
), /* d_reclen */
2709 DT_DIR
, /* d_type */
2715 sizeof(struct hfsdotentry
), /* d_reclen */
2716 DT_DIR
, /* d_type */
2723 * There is some confusion as to what the semantics of uio_offset are.
2724 * In ufs, it represents the actual byte offset within the directory
2725 * "file." HFS, however, just uses it as an entry counter - essentially
2726 * assuming that it has no meaning except to the hfs_readdir function.
2727 * This approach would be more efficient here, but some callers may
2728 * assume the uio_offset acts like a byte offset. NFS in fact
2729 * monkeys around with the offset field a lot between readdir calls.
2731 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2732 * fields is a mess as well. The libc function readdir() returns
2733 * NULL (indicating the end of a directory) when either
2734 * the getdirentries() syscall (which calls this and returns
2735 * the size of the buffer passed in less the value of uiop->uio_resid)
2736 * returns 0, or a direct record with a d_reclen of zero.
2737 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2738 * of the directory by testing uiop->uio_resid == 0. The solution
2739 * is to pad the size of the last struct direct in a given
2740 * block to fill the block if we are not at the end of the directory.
2745 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2746 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2747 * "Nbytes must be greater than or equal to the block size associated with the file,
2748 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2749 * room for the . and .. entries that have to added manually.
2756 IN struct vnode *vp;
2757 INOUT struct uio *uio;
2758 IN struct ucred *cred;
2761 INOUT u_long **cookies;
2765 struct vop_readdir_args
/* {
2774 register struct uio
*uio
= ap
->a_uio
;
2775 struct cnode
*cp
= VTOC(ap
->a_vp
);
2776 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
2777 struct proc
*p
= current_proc();
2778 off_t off
= uio
->uio_offset
;
2781 void *user_start
= NULL
;
2785 u_long
*cookies
=NULL
;
2786 u_long
*cookiep
=NULL
;
2788 /* We assume it's all one big buffer... */
2789 if (uio
->uio_iovcnt
> 1 || uio
->uio_resid
< AVERAGE_HFSDIRENTRY_SIZE
)
2793 // We have to lock the user's buffer here so that we won't
2794 // fault on it after we've acquired a shared lock on the
2795 // catalog file. The issue is that you can get a 3-way
2796 // deadlock if someone else starts a transaction and then
2797 // tries to lock the catalog file but can't because we're
2798 // here and we can't service our page fault because VM is
2799 // blocked trying to start a transaction as a result of
2800 // trying to free up pages for our page fault. It's messy
2801 // but it does happen on dual-procesors that are paging
2802 // heavily (see radar 3082639 for more info). By locking
2803 // the buffer up-front we prevent ourselves from faulting
2804 // while holding the shared catalog file lock.
2806 // Fortunately this and hfs_search() are the only two places
2807 // currently (10/30/02) that can fault on user data with a
2808 // shared lock on the catalog file.
2810 if (hfsmp
->jnl
&& uio
->uio_segflg
== UIO_USERSPACE
) {
2811 user_start
= uio
->uio_iov
->iov_base
;
2812 user_len
= uio
->uio_iov
->iov_len
;
2814 if ((retval
= vslock(user_start
, user_len
)) != 0) {
2819 /* Create the entries for . and .. */
2820 if (uio
->uio_offset
< sizeof(rootdots
)) {
2824 rootdots
[0].d_fileno
= cp
->c_cnid
;
2825 rootdots
[1].d_fileno
= cp
->c_parentcnid
;
2827 if (uio
->uio_offset
== 0) {
2828 dep
= (caddr_t
) &rootdots
[0];
2829 dotsize
= 2* sizeof(struct hfsdotentry
);
2830 } else if (uio
->uio_offset
== sizeof(struct hfsdotentry
)) {
2831 dep
= (caddr_t
) &rootdots
[1];
2832 dotsize
= sizeof(struct hfsdotentry
);
2838 retval
= uiomove(dep
, dotsize
, uio
);
2843 if (ap
->a_ncookies
!= NULL
) {
2845 * These cookies are handles that allow NFS to restart
2846 * scanning through a directory. If a directory is large
2847 * enough, NFS will issue a successive readdir() with a
2848 * uio->uio_offset that is equal to one of these cookies.
2850 * The cookies that we generate are synthesized byte-offsets.
2851 * The offset is where the dirent the dirent would be if the
2852 * directory were an array of packed dirent structs. It is
2853 * synthetic because that's not how directories are stored in
2854 * HFS but other code expects that the cookie is a byte offset.
2856 * We have to pre-allocate the cookies because cat_getdirentries()
2857 * is the only one that can properly synthesize the offsets (since
2858 * it may have to skip over entries and only it knows the true
2859 * virtual offset of any particular directory entry). So we allocate
2860 * a cookie table here and pass it in to cat_getdirentries().
2862 * Note that the handling of "." and ".." is mostly done here but
2863 * cat_getdirentries() is aware of.
2865 * Only the NFS server uses cookies so fortunately this code is
2866 * not executed unless the NFS server is issuing the readdir
2869 * Also note that the NFS server is the one responsible for
2870 * free'ing the cookies even though we allocated them. Ick.
2872 * We allocate a reasonable number of entries for the size of
2873 * the buffer that we're going to fill in. cat_getdirentries()
2874 * is smart enough to not overflow if there's more room in the
2875 * buffer but not enough room in the cookie table.
2877 if (uio
->uio_segflg
!= UIO_SYSSPACE
)
2878 panic("hfs_readdir: unexpected uio from NFS server");
2880 ncookies
= uio
->uio_iov
->iov_len
/ (AVERAGE_HFSDIRENTRY_SIZE
/2);
2881 MALLOC(cookies
, u_long
*, ncookies
* sizeof(u_long
), M_TEMP
, M_WAITOK
);
2883 *ap
->a_ncookies
= ncookies
;
2884 *ap
->a_cookies
= cookies
;
2887 /* If there are no children then we're done */
2888 if (cp
->c_entries
== 0) {
2893 cookies
[1] = sizeof(struct hfsdotentry
);
2898 /* Lock catalog b-tree */
2899 retval
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
2900 if (retval
) goto Exit
;
2902 retval
= cat_getdirentries(hfsmp
, &cp
->c_desc
, cp
->c_entries
, uio
, &eofflag
, cookies
, ncookies
);
2904 /* Unlock catalog b-tree */
2905 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2907 if (retval
!= E_NONE
) {
2911 /* were we already past eof ? */
2912 if (uio
->uio_offset
== off
) {
2917 cp
->c_flag
|= C_ACCESS
;
2920 if (hfsmp
->jnl
&& user_start
) {
2921 vsunlock(user_start
, user_len
, TRUE
);
2925 *ap
->a_eofflag
= eofflag
;
2932 * Return target name of a symbolic link
2933 #% readlink vp L L L
2936 IN struct vnode *vp;
2937 INOUT struct uio *uio;
2938 IN struct ucred *cred;
2943 struct vop_readlink_args
/* {
2946 struct ucred *a_cred;
2950 struct vnode
*vp
= ap
->a_vp
;
2952 struct filefork
*fp
;
2954 if (vp
->v_type
!= VLNK
)
2960 /* Zero length sym links are not allowed */
2961 if (fp
->ff_size
== 0 || fp
->ff_size
> MAXPATHLEN
) {
2962 VTOVCB(vp
)->vcbFlags
|= kHFS_DamagedVolume
;
2966 /* Cache the path so we don't waste buffer cache resources */
2967 if (fp
->ff_symlinkptr
== NULL
) {
2968 struct buf
*bp
= NULL
;
2970 MALLOC(fp
->ff_symlinkptr
, char *, fp
->ff_size
, M_TEMP
, M_WAITOK
);
2971 retval
= meta_bread(vp
, 0,
2972 roundup((int)fp
->ff_size
,
2973 VTOHFS(vp
)->hfs_phys_block_size
),
2978 if (fp
->ff_symlinkptr
) {
2979 FREE(fp
->ff_symlinkptr
, M_TEMP
);
2980 fp
->ff_symlinkptr
= NULL
;
2984 bcopy(bp
->b_data
, fp
->ff_symlinkptr
, (size_t)fp
->ff_size
);
2986 if (VTOHFS(vp
)->jnl
&& (bp
->b_flags
& B_LOCKED
) == 0) {
2987 bp
->b_flags
|= B_INVAL
; /* data no longer needed */
2992 retval
= uiomove((caddr_t
)fp
->ff_symlinkptr
, (int)fp
->ff_size
, ap
->a_uio
);
2995 * Keep track blocks read
2997 if ((VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) && (retval
== 0)) {
3000 * If this file hasn't been seen since the start of
3001 * the current sampling period then start over.
3003 if (cp
->c_atime
< VTOHFS(vp
)->hfc_timebase
)
3004 VTOF(vp
)->ff_bytesread
= fp
->ff_size
;
3006 VTOF(vp
)->ff_bytesread
+= fp
->ff_size
;
3008 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3009 // cp->c_flag |= C_ACCESS;
3016 * Lock an cnode. If its already locked, set the WANT bit and sleep.
3020 IN struct vnode *vp;
3027 struct vop_lock_args
/* {
3033 struct vnode
*vp
= ap
->a_vp
;
3034 struct cnode
*cp
= VTOC(vp
);
3036 return (lockmgr(&cp
->c_lock
, ap
->a_flags
, &vp
->v_interlock
, ap
->a_p
));
3044 IN struct vnode *vp;
3051 struct vop_unlock_args
/* {
3057 struct vnode
*vp
= ap
->a_vp
;
3058 struct cnode
*cp
= VTOC(vp
);
3060 if (!lockstatus(&cp
->c_lock
)) {
3061 printf("hfs_unlock: vnode %s wasn't locked!\n",
3062 cp
->c_desc
.cd_nameptr
? cp
->c_desc
.cd_nameptr
: "");
3065 return (lockmgr(&cp
->c_lock
, ap
->a_flags
| LK_RELEASE
,
3066 &vp
->v_interlock
, ap
->a_p
));
3071 * Print out the contents of a cnode.
3075 IN struct vnode *vp;
3079 struct vop_print_args
/* {
3083 struct vnode
* vp
= ap
->a_vp
;
3084 struct cnode
*cp
= VTOC(vp
);
3086 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp
->c_cnid
,
3087 major(cp
->c_dev
), minor(cp
->c_dev
));
3089 if (vp
->v_type
== VFIFO
)
3092 lockmgr_printinfo(&cp
->c_lock
);
3099 * Check for a locked cnode.
3100 #% islocked vp = = =
3103 IN struct vnode *vp;
3108 struct vop_islocked_args
/* {
3112 return (lockstatus(&VTOC(ap
->a_vp
)->c_lock
));
3117 #% pathconf vp L L L
3120 IN struct vnode *vp;
3122 OUT register_t *retval;
3127 struct vop_pathconf_args
/* {
3135 switch (ap
->a_name
) {
3137 if (VTOVCB(ap
->a_vp
)->vcbSigWord
== kHFSPlusSigWord
)
3138 *ap
->a_retval
= HFS_LINK_MAX
;
3143 *ap
->a_retval
= kHFSPlusMaxFileNameBytes
; /* max # of characters x max utf8 representation */
3146 *ap
->a_retval
= PATH_MAX
; /* 1024 */
3149 *ap
->a_retval
= PIPE_BUF
;
3151 case _PC_CHOWN_RESTRICTED
:
3157 case _PC_NAME_CHARS_MAX
:
3158 *ap
->a_retval
= kHFSPlusMaxFileNameChars
;
3160 case _PC_CASE_SENSITIVE
:
3161 if (VTOHFS(ap
->a_vp
)->hfs_flags
& HFS_CASE_SENSITIVE
)
3166 case _PC_CASE_PRESERVING
:
3178 * Advisory record locking support
3182 IN struct vnode *vp;
3185 IN struct flock *fl;
3191 struct vop_advlock_args
/* {
3199 struct vnode
*vp
= ap
->a_vp
;
3200 struct flock
*fl
= ap
->a_fl
;
3201 struct hfslockf
*lock
;
3202 struct filefork
*fork
;
3206 /* Only regular files can have locks */
3207 if (vp
->v_type
!= VREG
)
3210 fork
= VTOF(ap
->a_vp
);
3212 * Avoid the common case of unlocking when cnode has no locks.
3214 if (fork
->ff_lockf
== (struct hfslockf
*)0) {
3215 if (ap
->a_op
!= F_SETLK
) {
3216 fl
->l_type
= F_UNLCK
;
3221 * Convert the flock structure into a start and end.
3224 switch (fl
->l_whence
) {
3228 * Caller is responsible for adding any necessary offset
3229 * when SEEK_CUR is used.
3231 start
= fl
->l_start
;
3234 start
= fork
->ff_size
+ fl
->l_start
;
3242 else if (fl
->l_len
> 0)
3243 end
= start
+ fl
->l_len
- 1;
3244 else { /* l_len is negative */
3252 * Create the hfslockf structure
3254 MALLOC(lock
, struct hfslockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
3255 lock
->lf_start
= start
;
3257 lock
->lf_id
= ap
->a_id
;
3258 lock
->lf_fork
= fork
;
3259 lock
->lf_type
= fl
->l_type
;
3260 lock
->lf_next
= (struct hfslockf
*)0;
3261 TAILQ_INIT(&lock
->lf_blkhd
);
3262 lock
->lf_flags
= ap
->a_flags
;
3264 * Do the requested operation.
3268 retval
= hfs_setlock(lock
);
3271 retval
= hfs_clearlock(lock
);
3272 FREE(lock
, M_LOCKF
);
3275 retval
= hfs_getlock(lock
, fl
);
3276 FREE(lock
, M_LOCKF
);
3280 _FREE(lock
, M_LOCKF
);
3290 * Update the access, modified, and node change times as specified
3291 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3292 * C_MODIFIED flag is used to specify that the node needs to be
3293 * updated but that the times have already been set. The access and
3294 * modified times are input parameters but the node change time is
3295 * always taken from the current time. If waitfor is set, then wait
3296 * for the disk write of the node to complete.
3300 IN struct vnode *vp;
3301 IN struct timeval *access;
3302 IN struct timeval *modify;
3307 struct vop_update_args
/* {
3309 struct timeval *a_access;
3310 struct timeval *a_modify;
3314 struct vnode
*vp
= ap
->a_vp
;
3315 struct cnode
*cp
= VTOC(ap
->a_vp
);
3317 struct cat_fork
*dataforkp
= NULL
;
3318 struct cat_fork
*rsrcforkp
= NULL
;
3319 struct cat_fork datafork
;
3321 struct hfsmount
*hfsmp
;
3326 /* XXX do we really want to clear the sytem cnode flags here???? */
3327 if (((vp
->v_flag
& VSYSTEM
) && (cp
->c_cnid
< kHFSFirstUserCatalogNodeID
))||
3328 (VTOHFS(vp
)->hfs_flags
& HFS_READ_ONLY
) ||
3329 (cp
->c_mode
== 0)) {
3330 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
3334 updateflag
= cp
->c_flag
& (C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
| C_FORCEUPDATE
);
3336 /* Nothing to update. */
3337 if (updateflag
== 0) {
3340 /* HFS standard doesn't have access times. */
3341 if ((updateflag
== C_ACCESS
) && (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
)) {
3344 if (updateflag
& C_ACCESS
) {
3346 * When the access time is the only thing changing
3347 * then make sure its sufficiently newer before
3348 * committing it to disk.
3350 if ((updateflag
== C_ACCESS
) &&
3351 (ap
->a_access
->tv_sec
< (cp
->c_atime
+ ATIME_ONDISK_ACCURACY
))) {
3354 cp
->c_atime
= ap
->a_access
->tv_sec
;
3356 if (updateflag
& C_UPDATE
) {
3357 cp
->c_mtime
= ap
->a_modify
->tv_sec
;
3358 cp
->c_mtime_nsec
= ap
->a_modify
->tv_usec
* 1000;
3360 if (updateflag
& C_CHANGE
) {
3361 cp
->c_ctime
= time
.tv_sec
;
3363 * HFS dates that WE set must be adjusted for DST
3365 if ((VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3366 cp
->c_ctime
+= 3600;
3367 cp
->c_mtime
= cp
->c_ctime
;
3372 dataforkp
= &cp
->c_datafork
->ff_data
;
3374 rsrcforkp
= &cp
->c_rsrcfork
->ff_data
;
3379 * For delayed allocations updates are
3380 * postponed until an fsync or the file
3381 * gets written to disk.
3383 * Deleted files can defer meta data updates until inactive.
3385 * If we're ever called with the C_FORCEUPDATE flag though
3386 * we have to do the update.
3388 if (ISSET(cp
->c_flag
, C_FORCEUPDATE
) == 0 &&
3389 (ISSET(cp
->c_flag
, C_DELETED
) ||
3390 (dataforkp
&& cp
->c_datafork
->ff_unallocblocks
) ||
3391 (rsrcforkp
&& cp
->c_rsrcfork
->ff_unallocblocks
))) {
3392 if (updateflag
& (C_CHANGE
| C_UPDATE
))
3393 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3394 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_UPDATE
);
3395 cp
->c_flag
|= C_MODIFIED
;
3397 HFS_KNOTE(vp
, NOTE_ATTRIB
);
3404 hfs_global_shared_lock_acquire(hfsmp
);
3406 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3407 hfs_global_shared_lock_release(hfsmp
);
3414 * For files with invalid ranges (holes) the on-disk
3415 * field representing the size of the file (cf_size)
3416 * must be no larger than the start of the first hole.
3418 if (dataforkp
&& !CIRCLEQ_EMPTY(&cp
->c_datafork
->ff_invalidranges
)) {
3419 bcopy(dataforkp
, &datafork
, sizeof(datafork
));
3420 datafork
.cf_size
= CIRCLEQ_FIRST(&cp
->c_datafork
->ff_invalidranges
)->rl_start
;
3421 dataforkp
= &datafork
;
3422 } else if (dataforkp
&& (cp
->c_datafork
->ff_unallocblocks
!= 0)) {
3423 // always make sure the block count and the size
3424 // of the file match the number of blocks actually
3425 // allocated to the file on disk
3426 bcopy(dataforkp
, &datafork
, sizeof(datafork
));
3427 // make sure that we don't assign a negative block count
3428 if (cp
->c_datafork
->ff_blocks
< cp
->c_datafork
->ff_unallocblocks
) {
3429 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3430 cp
->c_datafork
->ff_blocks
, cp
->c_datafork
->ff_unallocblocks
);
3432 datafork
.cf_blocks
= (cp
->c_datafork
->ff_blocks
- cp
->c_datafork
->ff_unallocblocks
);
3433 datafork
.cf_size
= datafork
.cf_blocks
* HFSTOVCB(hfsmp
)->blockSize
;
3434 dataforkp
= &datafork
;
3438 * Lock the Catalog b-tree file.
3439 * A shared lock is sufficient since an update doesn't change
3440 * the tree and the lock on vp protects the cnode.
3442 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3445 journal_end_transaction(hfsmp
->jnl
);
3447 hfs_global_shared_lock_release(hfsmp
);
3451 /* XXX - waitfor is not enforced */
3452 error
= cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, dataforkp
, rsrcforkp
);
3454 /* Unlock the Catalog b-tree file. */
3455 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3457 if (updateflag
& (C_CHANGE
| C_UPDATE
| C_FORCEUPDATE
))
3458 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3460 /* After the updates are finished, clear the flags */
3461 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
| C_FORCEUPDATE
);
3465 journal_end_transaction(hfsmp
->jnl
);
3467 hfs_global_shared_lock_release(hfsmp
);
3469 HFS_KNOTE(vp
, NOTE_ATTRIB
);
3475 * Allocate a new node
3477 * Upon leaving, namei buffer must be freed.
3481 hfs_makenode(mode
, dvp
, vpp
, cnp
)
3485 struct componentname
*cnp
;
3490 struct hfsmount
*hfsmp
;
3493 struct cat_desc in_desc
, out_desc
;
3494 struct cat_attr attr
;
3495 cat_cookie_t cookie
= {0};
3496 int error
, started_tr
= 0, grabbed_lock
= 0;
3497 enum vtype vnodetype
;
3501 hfsmp
= VTOHFS(dvp
);
3504 bzero(&out_desc
, sizeof(out_desc
));
3506 if ((mode
& S_IFMT
) == 0)
3508 vnodetype
= IFTOVT(mode
);
3510 /* Check if unmount in progress */
3511 if (VTOVFS(dvp
)->mnt_kern_flag
& MNTK_UNMOUNT
) {
3515 /* Check if were out of usable disk space. */
3516 if ((suser(cnp
->cn_cred
, NULL
) != 0) && (hfs_freeblks(hfsmp
, 1) <= 0)) {
3521 /* Setup the default attributes */
3522 bzero(&attr
, sizeof(attr
));
3523 attr
.ca_mode
= mode
;
3524 attr
.ca_nlink
= vnodetype
== VDIR
? 2 : 1;
3525 attr
.ca_mtime
= time
.tv_sec
;
3526 attr
.ca_mtime_nsec
= time
.tv_usec
* 1000;
3527 if ((VTOVCB(dvp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3528 attr
.ca_mtime
+= 3600; /* Same as what hfs_update does */
3530 attr
.ca_atime
= attr
.ca_ctime
= attr
.ca_itime
= attr
.ca_mtime
;
3531 if (VTOVFS(dvp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
3532 attr
.ca_uid
= hfsmp
->hfs_uid
;
3533 attr
.ca_gid
= hfsmp
->hfs_gid
;
3535 if (vnodetype
== VLNK
)
3536 attr
.ca_uid
= dcp
->c_uid
;
3538 attr
.ca_uid
= cnp
->cn_cred
->cr_uid
;
3539 attr
.ca_gid
= dcp
->c_gid
;
3542 * Don't tag as a special file (BLK or CHR) until *after*
3543 * hfs_getnewvnode is called. This insures that any
3544 * alias checking is defered until hfs_mknod completes.
3546 if (vnodetype
== VBLK
|| vnodetype
== VCHR
)
3547 attr
.ca_mode
= (attr
.ca_mode
& ~S_IFMT
) | S_IFREG
;
3549 /* Tag symlinks with a type and creator. */
3550 if (vnodetype
== VLNK
) {
3551 struct FndrFileInfo
*fip
;
3553 fip
= (struct FndrFileInfo
*)&attr
.ca_finderinfo
;
3554 fip
->fdType
= SWAP_BE32(kSymLinkFileType
);
3555 fip
->fdCreator
= SWAP_BE32(kSymLinkCreator
);
3557 if ((attr
.ca_mode
& S_ISGID
) &&
3558 !groupmember(dcp
->c_gid
, cnp
->cn_cred
) &&
3559 suser(cnp
->cn_cred
, NULL
)) {
3560 attr
.ca_mode
&= ~S_ISGID
;
3562 if (cnp
->cn_flags
& ISWHITEOUT
)
3563 attr
.ca_flags
|= UF_OPAQUE
;
3565 /* Setup the descriptor */
3566 bzero(&in_desc
, sizeof(in_desc
));
3567 in_desc
.cd_nameptr
= cnp
->cn_nameptr
;
3568 in_desc
.cd_namelen
= cnp
->cn_namelen
;
3569 in_desc
.cd_parentcnid
= dcp
->c_cnid
;
3570 in_desc
.cd_flags
= S_ISDIR(mode
) ? CD_ISDIR
: 0;
3573 hfs_global_shared_lock_acquire(hfsmp
);
3576 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3583 * Reserve some space in the Catalog file.
3585 * (we also add CAT_DELETE since our getnewvnode
3586 * request can cause an hfs_inactive call to
3587 * delete an unlinked file)
3589 if ((error
= cat_preflight(hfsmp
, CAT_CREATE
| CAT_DELETE
, &cookie
, p
))) {
3593 /* Lock catalog b-tree */
3594 error
= hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
3598 error
= cat_create(hfsmp
, &in_desc
, &attr
, &out_desc
);
3600 /* Unlock catalog b-tree */
3601 (void) hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_RELEASE
, p
);
3605 /* Update the parent directory */
3606 dcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
3609 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
3611 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
3612 if (vnodetype
== VDIR
) {
3613 HFS_KNOTE(dvp
, NOTE_WRITE
| NOTE_LINK
);
3615 HFS_KNOTE(dvp
, NOTE_WRITE
);
3618 hfs_volupdate(hfsmp
, vnodetype
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
3619 (dcp
->c_cnid
== kHFSRootFolderID
));
3622 // have to end the transaction here before we call hfs_getnewvnode()
3623 // because that can cause us to try and reclaim a vnode on a different
3624 // file system which could cause us to start a transaction which can
3625 // deadlock with someone on that other file system (since we could be
3626 // holding two transaction locks as well as various vnodes and we did
3627 // not obtain the locks on them in the proper order).
3629 // NOTE: this means that if the quota check fails or we have to update
3630 // the change time on a block-special device that those changes
3631 // will happen as part of independent transactions.
3634 journal_end_transaction(hfsmp
->jnl
);
3638 hfs_global_shared_lock_release(hfsmp
);
3642 /* Create a vnode for the object just created: */
3643 error
= hfs_getnewvnode(hfsmp
, NULL
, &out_desc
, 0, &attr
, NULL
, &tvp
);
3648 cache_enter(dvp
, tvp
, cnp
);
3653 * We call hfs_chkiq with FORCE flag so that if we
3654 * fall through to the rmdir we actually have
3655 * accounted for the inode
3657 if ((error
= hfs_getinoquota(cp
)) ||
3658 (error
= hfs_chkiq(cp
, 1, cnp
->cn_cred
, FORCE
))) {
3659 if (tvp
->v_type
== VDIR
)
3660 VOP_RMDIR(dvp
,tvp
, cnp
);
3662 VOP_REMOVE(dvp
,tvp
, cnp
);
3664 // because VOP_RMDIR and VOP_REMOVE already
3665 // have done the vput()
3672 * restore vtype and mode for VBLK and VCHR
3674 if (vnodetype
== VBLK
|| vnodetype
== VCHR
) {
3679 tvp
->v_type
= IFTOVT(mode
);
3680 cp
->c_flag
|= C_CHANGE
;
3682 if ((error
= VOP_UPDATE(tvp
, &tv
, &tv
, 1))) {
3690 cat_releasedesc(&out_desc
);
3692 cat_postflight(hfsmp
, &cookie
, p
);
3694 if ((cnp
->cn_flags
& (HASBUF
| SAVESTART
)) == HASBUF
) {
3695 char *tmp
= cnp
->cn_pnbuf
;
3696 cnp
->cn_pnbuf
= NULL
;
3697 cnp
->cn_flags
&= ~HASBUF
;
3698 FREE_ZONE(tmp
, cnp
->cn_pnlen
, M_NAMEI
);
3701 * Check if a file is located in the "Cleanup At Startup"
3702 * directory. If it is then tag it as NODUMP so that we
3703 * can be lazy about zero filling data holes.
3705 if ((error
== 0) && dvp
&& (vnodetype
== VREG
) &&
3706 (dcp
->c_desc
.cd_nameptr
!= NULL
) &&
3707 (strcmp(dcp
->c_desc
.cd_nameptr
, CARBON_TEMP_DIR_NAME
) == 0)) {
3711 parid
= dcp
->c_parentcnid
;
3716 * The parent of "Cleanup At Startup" should
3717 * have the ASCII name of the userid.
3719 if (VFS_VGET(HFSTOVFS(hfsmp
), &parid
, &ddvp
) == 0) {
3720 if (VTOC(ddvp
)->c_desc
.cd_nameptr
) {
3723 uid
= strtoul(VTOC(ddvp
)->c_desc
.cd_nameptr
, 0, 0);
3724 if (uid
== cp
->c_uid
|| uid
== cnp
->cn_cred
->cr_uid
) {
3725 cp
->c_flags
|= UF_NODUMP
;
3726 cp
->c_flag
|= C_CHANGE
;
3736 journal_end_transaction(hfsmp
->jnl
);
3740 hfs_global_shared_lock_release(hfsmp
);
3749 hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
, struct vnode
**rvpp
, struct proc
*p
)
3752 struct cnode
*cp
= VTOC(vp
);
3755 if ((rvp
= cp
->c_rsrc_vp
)) {
3756 /* Use exising vnode */
3757 error
= vget(rvp
, 0, p
);
3759 char * name
= VTOC(vp
)->c_desc
.cd_nameptr
;
3762 printf("hfs_vgetrsrc: couldn't get"
3763 " resource fork for %s\n", name
);
3767 struct cat_fork rsrcfork
;
3769 /* Lock catalog b-tree */
3770 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3774 /* Get resource fork data */
3775 error
= cat_lookup(hfsmp
, &cp
->c_desc
, 1, (struct cat_desc
*)0,
3776 (struct cat_attr
*)0, &rsrcfork
);
3778 /* Unlock the Catalog */
3779 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3783 error
= hfs_getnewvnode(hfsmp
, cp
, &cp
->c_desc
, 1, &cp
->c_attr
,
3795 filt_hfsdetach(struct knote
*kn
)
3799 struct proc
*p
= current_proc();
3801 vp
= (struct vnode
*)kn
->kn_hook
;
3802 if (1) { /* ! KNDETACH_VNLOCKED */
3803 result
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
3807 result
= KNOTE_DETACH(&VTOC(vp
)->c_knotes
, kn
);
3809 if (1) { /* ! KNDETACH_VNLOCKED */
3810 VOP_UNLOCK(vp
, 0, p
);
3816 filt_hfsread(struct knote
*kn
, long hint
)
3818 struct vnode
*vp
= (struct vnode
*)kn
->kn_fp
->f_data
;
3820 if (hint
== NOTE_REVOKE
) {
3822 * filesystem is gone, so set the EOF flag and schedule
3823 * the knote for deletion.
3825 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3829 kn
->kn_data
= VTOF(vp
)->ff_size
- kn
->kn_fp
->f_offset
;
3830 return (kn
->kn_data
!= 0);
3835 filt_hfswrite(struct knote
*kn
, long hint
)
3837 if (hint
== NOTE_REVOKE
) {
3839 * filesystem is gone, so set the EOF flag and schedule
3840 * the knote for deletion.
3842 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3850 filt_hfsvnode(struct knote
*kn
, long hint
)
3853 if (kn
->kn_sfflags
& hint
)
3854 kn
->kn_fflags
|= hint
;
3855 if (hint
== NOTE_REVOKE
) {
3856 kn
->kn_flags
|= EV_EOF
;
3859 return (kn
->kn_fflags
!= 0);
3862 static struct filterops hfsread_filtops
=
3863 { 1, NULL
, filt_hfsdetach
, filt_hfsread
};
3864 static struct filterops hfswrite_filtops
=
3865 { 1, NULL
, filt_hfsdetach
, filt_hfswrite
};
3866 static struct filterops hfsvnode_filtops
=
3867 { 1, NULL
, filt_hfsdetach
, filt_hfsvnode
};
3871 #% kqfilt_add vp L L L
3874 IN struct vnode *vp;
3875 IN struct knote *kn;
3880 struct vop_kqfilt_add_args
/* {
3886 struct vnode
*vp
= ap
->a_vp
;
3887 struct knote
*kn
= ap
->a_kn
;
3889 switch (kn
->kn_filter
) {
3891 if (vp
->v_type
== VREG
) {
3892 kn
->kn_fop
= &hfsread_filtops
;
3898 if (vp
->v_type
== VREG
) {
3899 kn
->kn_fop
= &hfswrite_filtops
;
3905 kn
->kn_fop
= &hfsvnode_filtops
;
3911 kn
->kn_hook
= (caddr_t
)vp
;
3913 /* simple_lock(&vp->v_pollinfo.vpi_lock); */
3914 KNOTE_ATTACH(&VTOC(vp
)->c_knotes
, kn
);
3915 /* simple_unlock(&vp->v_pollinfo.vpi_lock); */
3922 #% kqfilt_remove vp L L L
3925 IN struct vnode *vp;
3930 hfs_kqfilt_remove(ap
)
3931 struct vop_kqfilt_remove_args
/* {
3937 struct vnode
*vp
= ap
->a_vp
;
3938 uintptr_t ident
= ap
->a_ident
;
3941 result
= ENOTSUP
; /* XXX */
3947 * Wrapper for special device reads
3951 struct vop_read_args
/* {
3955 struct ucred *a_cred;
3961 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
3962 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_read
), ap
));
3966 * Wrapper for special device writes
3970 struct vop_write_args
/* {
3974 struct ucred *a_cred;
3978 * Set update and change flags.
3980 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
3981 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_write
), ap
));
3985 * Wrapper for special device close
3987 * Update the times on the cnode then do device close.
3991 struct vop_close_args
/* {
3994 struct ucred *a_cred;
3998 struct vnode
*vp
= ap
->a_vp
;
3999 struct cnode
*cp
= VTOC(vp
);
4001 simple_lock(&vp
->v_interlock
);
4002 if (ap
->a_vp
->v_usecount
> 1)
4003 CTIMES(cp
, &time
, &time
);
4004 simple_unlock(&vp
->v_interlock
);
4005 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_close
), ap
));
4010 * Wrapper for fifo reads
4014 struct vop_read_args
/* {
4018 struct ucred *a_cred;
4021 extern int (**fifo_vnodeop_p
)(void *);
4026 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
4027 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_read
), ap
));
4031 * Wrapper for fifo writes
4035 struct vop_write_args
/* {
4039 struct ucred *a_cred;
4042 extern int (**fifo_vnodeop_p
)(void *);
4045 * Set update and change flags.
4047 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
4048 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_write
), ap
));
4052 * Wrapper for fifo close
4054 * Update the times on the cnode then do device close.
4058 struct vop_close_args
/* {
4061 struct ucred *a_cred;
4065 extern int (**fifo_vnodeop_p
)(void *);
4066 struct vnode
*vp
= ap
->a_vp
;
4067 struct cnode
*cp
= VTOC(vp
);
4069 simple_lock(&vp
->v_interlock
);
4070 if (ap
->a_vp
->v_usecount
> 1)
4071 CTIMES(cp
, &time
, &time
);
4072 simple_unlock(&vp
->v_interlock
);
4073 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_close
), ap
));
4077 * kqfilt_add wrapper for fifos.
4079 * Fall through to hfs kqfilt_add routines if needed
4082 hfsfifo_kqfilt_add(ap
)
4083 struct vop_kqfilt_add_args
*ap
;
4085 extern int (**fifo_vnodeop_p
)(void *);
4088 error
= VOCALL(fifo_vnodeop_p
, VOFFSET(vop_kqfilt_add
), ap
);
4090 error
= hfs_kqfilt_add(ap
);
4095 * kqfilt_remove wrapper for fifos.
4097 * Fall through to hfs kqfilt_remove routines if needed
4100 hfsfifo_kqfilt_remove(ap
)
4101 struct vop_kqfilt_remove_args
*ap
;
4103 extern int (**fifo_vnodeop_p
)(void *);
4106 error
= VOCALL(fifo_vnodeop_p
, VOFFSET(vop_kqfilt_remove
), ap
);
4108 error
= hfs_kqfilt_remove(ap
);
4115 /*****************************************************************************
4119 *****************************************************************************/
4120 int hfs_cache_lookup(); /* in hfs_lookup.c */
4121 int hfs_lookup(); /* in hfs_lookup.c */
4122 int hfs_read(); /* in hfs_readwrite.c */
4123 int hfs_write(); /* in hfs_readwrite.c */
4124 int hfs_ioctl(); /* in hfs_readwrite.c */
4125 int hfs_select(); /* in hfs_readwrite.c */
4126 int hfs_bmap(); /* in hfs_readwrite.c */
4127 int hfs_strategy(); /* in hfs_readwrite.c */
4128 int hfs_truncate(); /* in hfs_readwrite.c */
4129 int hfs_allocate(); /* in hfs_readwrite.c */
4130 int hfs_pagein(); /* in hfs_readwrite.c */
4131 int hfs_pageout(); /* in hfs_readwrite.c */
4132 int hfs_search(); /* in hfs_search.c */
4133 int hfs_bwrite(); /* in hfs_readwrite.c */
4134 int hfs_link(); /* in hfs_link.c */
4135 int hfs_blktooff(); /* in hfs_readwrite.c */
4136 int hfs_offtoblk(); /* in hfs_readwrite.c */
4137 int hfs_cmap(); /* in hfs_readwrite.c */
4138 int hfs_getattrlist(); /* in hfs_attrlist.c */
4139 int hfs_setattrlist(); /* in hfs_attrlist.c */
4140 int hfs_readdirattr(); /* in hfs_attrlist.c */
4141 int hfs_inactive(); /* in hfs_cnode.c */
4142 int hfs_reclaim(); /* in hfs_cnode.c */
4144 int (**hfs_vnodeop_p
)(void *);
4146 #define VOPFUNC int (*)(void *)
4148 struct vnodeopv_entry_desc hfs_vnodeop_entries
[] = {
4149 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4150 { &vop_lookup_desc
, (VOPFUNC
)hfs_cache_lookup
}, /* lookup */
4151 { &vop_create_desc
, (VOPFUNC
)hfs_create
}, /* create */
4152 { &vop_mknod_desc
, (VOPFUNC
)hfs_mknod
}, /* mknod */
4153 { &vop_open_desc
, (VOPFUNC
)hfs_open
}, /* open */
4154 { &vop_close_desc
, (VOPFUNC
)hfs_close
}, /* close */
4155 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4156 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4157 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4158 { &vop_read_desc
, (VOPFUNC
)hfs_read
}, /* read */
4159 { &vop_write_desc
, (VOPFUNC
)hfs_write
}, /* write */
4160 { &vop_ioctl_desc
, (VOPFUNC
)hfs_ioctl
}, /* ioctl */
4161 { &vop_select_desc
, (VOPFUNC
)hfs_select
}, /* select */
4162 { &vop_revoke_desc
, (VOPFUNC
)nop_revoke
}, /* revoke */
4163 { &vop_exchange_desc
, (VOPFUNC
)hfs_exchange
}, /* exchange */
4164 { &vop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
4165 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4166 { &vop_seek_desc
, (VOPFUNC
)nop_seek
}, /* seek */
4167 { &vop_remove_desc
, (VOPFUNC
)hfs_remove
}, /* remove */
4168 { &vop_link_desc
, (VOPFUNC
)hfs_link
}, /* link */
4169 { &vop_rename_desc
, (VOPFUNC
)hfs_rename
}, /* rename */
4170 { &vop_mkdir_desc
, (VOPFUNC
)hfs_mkdir
}, /* mkdir */
4171 { &vop_rmdir_desc
, (VOPFUNC
)hfs_rmdir
}, /* rmdir */
4172 { &vop_mkcomplex_desc
, (VOPFUNC
)err_mkcomplex
}, /* mkcomplex */
4173 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
}, /* getattrlist */
4174 { &vop_setattrlist_desc
, (VOPFUNC
)hfs_setattrlist
}, /* setattrlist */
4175 { &vop_symlink_desc
, (VOPFUNC
)hfs_symlink
}, /* symlink */
4176 { &vop_readdir_desc
, (VOPFUNC
)hfs_readdir
}, /* readdir */
4177 { &vop_readdirattr_desc
, (VOPFUNC
)hfs_readdirattr
}, /* readdirattr */
4178 { &vop_readlink_desc
, (VOPFUNC
)hfs_readlink
}, /* readlink */
4179 { &vop_abortop_desc
, (VOPFUNC
)nop_abortop
}, /* abortop */
4180 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4181 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4182 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4183 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4184 { &vop_bmap_desc
, (VOPFUNC
)hfs_bmap
}, /* bmap */
4185 { &vop_strategy_desc
, (VOPFUNC
)hfs_strategy
}, /* strategy */
4186 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4187 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4188 { &vop_pathconf_desc
, (VOPFUNC
)hfs_pathconf
}, /* pathconf */
4189 { &vop_advlock_desc
, (VOPFUNC
)hfs_advlock
}, /* advlock */
4190 { &vop_reallocblks_desc
, (VOPFUNC
)err_reallocblks
}, /* reallocblks */
4191 { &vop_truncate_desc
, (VOPFUNC
)hfs_truncate
}, /* truncate */
4192 { &vop_allocate_desc
, (VOPFUNC
)hfs_allocate
}, /* allocate */
4193 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4194 { &vop_searchfs_desc
, (VOPFUNC
)hfs_search
}, /* search fs */
4195 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
}, /* bwrite */
4196 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* pagein */
4197 { &vop_pageout_desc
,(VOPFUNC
) hfs_pageout
}, /* pageout */
4198 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4199 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4200 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4201 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
4202 { &vop_kqfilt_add_desc
, (VOPFUNC
)hfs_kqfilt_add
}, /* kqfilt_add */
4203 { &vop_kqfilt_remove_desc
, (VOPFUNC
)hfs_kqfilt_remove
}, /* kqfilt_remove */
4204 { NULL
, (VOPFUNC
)NULL
}
4207 struct vnodeopv_desc hfs_vnodeop_opv_desc
=
4208 { &hfs_vnodeop_p
, hfs_vnodeop_entries
};
4210 int (**hfs_specop_p
)(void *);
4211 struct vnodeopv_entry_desc hfs_specop_entries
[] = {
4212 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4213 { &vop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
4214 { &vop_create_desc
, (VOPFUNC
)spec_create
}, /* create */
4215 { &vop_mknod_desc
, (VOPFUNC
)spec_mknod
}, /* mknod */
4216 { &vop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
4217 { &vop_close_desc
, (VOPFUNC
)hfsspec_close
}, /* close */
4218 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4219 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4220 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4221 { &vop_read_desc
, (VOPFUNC
)hfsspec_read
}, /* read */
4222 { &vop_write_desc
, (VOPFUNC
)hfsspec_write
}, /* write */
4223 { &vop_lease_desc
, (VOPFUNC
)spec_lease_check
}, /* lease */
4224 { &vop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
4225 { &vop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
4226 { &vop_revoke_desc
, (VOPFUNC
)spec_revoke
}, /* revoke */
4227 { &vop_mmap_desc
, (VOPFUNC
)spec_mmap
}, /* mmap */
4228 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4229 { &vop_seek_desc
, (VOPFUNC
)spec_seek
}, /* seek */
4230 { &vop_remove_desc
, (VOPFUNC
)spec_remove
}, /* remove */
4231 { &vop_link_desc
, (VOPFUNC
)spec_link
}, /* link */
4232 { &vop_rename_desc
, (VOPFUNC
)spec_rename
}, /* rename */
4233 { &vop_mkdir_desc
, (VOPFUNC
)spec_mkdir
}, /* mkdir */
4234 { &vop_rmdir_desc
, (VOPFUNC
)spec_rmdir
}, /* rmdir */
4235 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
},
4236 { &vop_symlink_desc
, (VOPFUNC
)spec_symlink
}, /* symlink */
4237 { &vop_readdir_desc
, (VOPFUNC
)spec_readdir
}, /* readdir */
4238 { &vop_readlink_desc
, (VOPFUNC
)spec_readlink
}, /* readlink */
4239 { &vop_abortop_desc
, (VOPFUNC
)spec_abortop
}, /* abortop */
4240 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4241 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4242 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4243 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4244 { &vop_bmap_desc
, (VOPFUNC
)spec_bmap
}, /* bmap */
4245 { &vop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
4246 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4247 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4248 { &vop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
4249 { &vop_advlock_desc
, (VOPFUNC
)spec_advlock
}, /* advlock */
4250 { &vop_blkatoff_desc
, (VOPFUNC
)spec_blkatoff
}, /* blkatoff */
4251 { &vop_valloc_desc
, (VOPFUNC
)spec_valloc
}, /* valloc */
4252 { &vop_reallocblks_desc
, (VOPFUNC
)spec_reallocblks
}, /* reallocblks */
4253 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
4254 { &vop_truncate_desc
, (VOPFUNC
)spec_truncate
}, /* truncate */
4255 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4256 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
4257 { &vop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
4258 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
4259 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
4260 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4261 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4262 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4263 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
4265 struct vnodeopv_desc hfs_specop_opv_desc
=
4266 { &hfs_specop_p
, hfs_specop_entries
};
4269 int (**hfs_fifoop_p
)(void *);
4270 struct vnodeopv_entry_desc hfs_fifoop_entries
[] = {
4271 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
4272 { &vop_lookup_desc
, (VOPFUNC
)fifo_lookup
}, /* lookup */
4273 { &vop_create_desc
, (VOPFUNC
)fifo_create
}, /* create */
4274 { &vop_mknod_desc
, (VOPFUNC
)fifo_mknod
}, /* mknod */
4275 { &vop_open_desc
, (VOPFUNC
)fifo_open
}, /* open */
4276 { &vop_close_desc
, (VOPFUNC
)hfsfifo_close
}, /* close */
4277 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
4278 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
4279 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
4280 { &vop_read_desc
, (VOPFUNC
)hfsfifo_read
}, /* read */
4281 { &vop_write_desc
, (VOPFUNC
)hfsfifo_write
}, /* write */
4282 { &vop_lease_desc
, (VOPFUNC
)fifo_lease_check
}, /* lease */
4283 { &vop_ioctl_desc
, (VOPFUNC
)fifo_ioctl
}, /* ioctl */
4284 { &vop_select_desc
, (VOPFUNC
)fifo_select
}, /* select */
4285 { &vop_revoke_desc
, (VOPFUNC
)fifo_revoke
}, /* revoke */
4286 { &vop_mmap_desc
, (VOPFUNC
)fifo_mmap
}, /* mmap */
4287 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
4288 { &vop_seek_desc
, (VOPFUNC
)fifo_seek
}, /* seek */
4289 { &vop_remove_desc
, (VOPFUNC
)fifo_remove
}, /* remove */
4290 { &vop_link_desc
, (VOPFUNC
)fifo_link
}, /* link */
4291 { &vop_rename_desc
, (VOPFUNC
)fifo_rename
}, /* rename */
4292 { &vop_mkdir_desc
, (VOPFUNC
)fifo_mkdir
}, /* mkdir */
4293 { &vop_rmdir_desc
, (VOPFUNC
)fifo_rmdir
}, /* rmdir */
4294 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
},
4295 { &vop_symlink_desc
, (VOPFUNC
)fifo_symlink
}, /* symlink */
4296 { &vop_readdir_desc
, (VOPFUNC
)fifo_readdir
}, /* readdir */
4297 { &vop_readlink_desc
, (VOPFUNC
)fifo_readlink
}, /* readlink */
4298 { &vop_abortop_desc
, (VOPFUNC
)fifo_abortop
}, /* abortop */
4299 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
4300 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
4301 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
4302 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
4303 { &vop_bmap_desc
, (VOPFUNC
)fifo_bmap
}, /* bmap */
4304 { &vop_strategy_desc
, (VOPFUNC
)fifo_strategy
}, /* strategy */
4305 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
4306 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
4307 { &vop_pathconf_desc
, (VOPFUNC
)fifo_pathconf
}, /* pathconf */
4308 { &vop_advlock_desc
, (VOPFUNC
)fifo_advlock
}, /* advlock */
4309 { &vop_blkatoff_desc
, (VOPFUNC
)fifo_blkatoff
}, /* blkatoff */
4310 { &vop_valloc_desc
, (VOPFUNC
)fifo_valloc
}, /* valloc */
4311 { &vop_reallocblks_desc
, (VOPFUNC
)fifo_reallocblks
}, /* reallocblks */
4312 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
4313 { &vop_truncate_desc
, (VOPFUNC
)fifo_truncate
}, /* truncate */
4314 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
4315 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
4316 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
4317 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
4318 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
4319 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
4320 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
4321 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
4322 { &vop_kqfilt_add_desc
, (VOPFUNC
)hfsfifo_kqfilt_add
}, /* kqfilt_add */
4323 { &vop_kqfilt_remove_desc
, (VOPFUNC
)hfsfifo_kqfilt_remove
}, /* kqfilt_remove */
4324 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
4326 struct vnodeopv_desc hfs_fifoop_opv_desc
=
4327 { &hfs_fifoop_p
, hfs_fifoop_entries
};