2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
29 #include <sys/dirent.h>
32 #include <sys/mount.h>
33 #include <sys/vnode.h>
34 #include <sys/malloc.h>
35 #include <sys/namei.h>
37 #include <sys/quota.h>
39 #include <miscfs/specfs/specdev.h>
40 #include <miscfs/fifofs/fifo.h>
41 #include <vfs/vfs_support.h>
42 #include <machine/spl.h>
44 #include <sys/kdebug.h>
47 #include "hfs_catalog.h"
48 #include "hfs_cnode.h"
49 #include "hfs_lockf.h"
51 #include "hfs_mount.h"
52 #include "hfs_quota.h"
53 #include "hfs_endian.h"
55 #include "hfscommon/headers/BTreesInternal.h"
56 #include "hfscommon/headers/FileMgrInternal.h"
58 #define MAKE_DELETED_NAME(NAME,FID) \
59 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
62 extern uid_t console_user
;
64 extern unsigned long strtoul(const char *, char **, int);
66 /* Global vfs data structures for hfs */
69 extern int groupmember(gid_t gid
, struct ucred
*cred
);
71 static int hfs_makenode(int mode
, struct vnode
*dvp
, struct vnode
**vpp
,
72 struct componentname
*cnp
);
74 static int hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
,
75 struct vnode
**rvpp
, struct proc
*p
);
77 static int hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
);
79 int hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
);
81 int hfs_chflags(struct vnode
*vp
, u_long flags
, struct ucred
*cred
,
83 int hfs_chmod(struct vnode
*vp
, int mode
, struct ucred
*cred
,
85 int hfs_chown(struct vnode
*vp
, uid_t uid
, gid_t gid
,
86 struct ucred
*cred
, struct proc
*p
);
88 /*****************************************************************************
90 * Common Operations on vnodes
92 *****************************************************************************/
95 * Create a regular file
100 IN WILLRELE struct vnode *dvp;
101 OUT struct vnode **vpp;
102 IN struct componentname *cnp;
103 IN struct vattr *vap;
105 We are responsible for freeing the namei buffer,
106 it is done in hfs_makenode()
111 struct vop_create_args
/* {
113 struct vnode **a_vpp;
114 struct componentname *a_cnp;
118 struct vattr
*vap
= ap
->a_vap
;
120 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
121 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
132 IN WILLRELE struct vnode *dvp;
133 OUT WILLRELE struct vnode **vpp;
134 IN struct componentname *cnp;
135 IN struct vattr *vap;
141 struct vop_mknod_args
/* {
143 struct vnode **a_vpp;
144 struct componentname *a_cnp;
148 struct vattr
*vap
= ap
->a_vap
;
149 struct vnode
**vpp
= ap
->a_vpp
;
153 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
154 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
159 /* Create the vnode */
160 error
= hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
161 ap
->a_dvp
, vpp
, ap
->a_cnp
);
165 cp
->c_flag
|= C_ACCESS
| C_CHANGE
| C_UPDATE
;
166 if ((vap
->va_rdev
!= VNOVAL
) &&
167 (vap
->va_type
== VBLK
|| vap
->va_type
== VCHR
))
168 cp
->c_rdev
= vap
->va_rdev
;
170 * Remove cnode so that it will be reloaded by lookup and
171 * checked to see if it is an alias of an existing vnode.
172 * Note: unlike UFS, we don't bash v_type here.
188 IN struct ucred *cred;
195 struct vop_open_args
/* {
198 struct ucred *a_cred;
202 struct vnode
*vp
= ap
->a_vp
;
205 * Files marked append-only must be opened for appending.
207 if ((vp
->v_type
!= VDIR
) && (VTOC(vp
)->c_flags
& APPEND
) &&
208 (ap
->a_mode
& (FWRITE
| O_APPEND
)) == FWRITE
)
217 * Update the times on the cnode.
223 IN struct ucred *cred;
230 struct vop_close_args
/* {
233 struct ucred *a_cred;
237 register struct vnode
*vp
= ap
->a_vp
;
238 register struct cnode
*cp
= VTOC(vp
);
239 register struct filefork
*fp
= VTOF(vp
);
240 struct proc
*p
= ap
->a_p
;
243 u_long blks
, blocksize
;
247 simple_lock(&vp
->v_interlock
);
248 if ((!UBCISVALID(vp
) && vp
->v_usecount
> 1)
249 || (UBCISVALID(vp
) && ubc_isinuse(vp
, 1))) {
251 CTIMES(cp
, &tv
, &tv
);
253 simple_unlock(&vp
->v_interlock
);
256 * VOP_CLOSE can be called with vp locked (from vclean).
257 * We check for this case using VOP_ISLOCKED and bail.
259 * XXX During a force unmount we won't do the cleanup below!
261 if (vp
->v_type
== VDIR
|| VOP_ISLOCKED(vp
))
266 if ((fp
->ff_blocks
> 0) && !ISSET(cp
->c_flag
, C_DELETED
)) {
267 enum vtype our_type
= vp
->v_type
;
268 u_long our_id
= vp
->v_id
;
269 int was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
271 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
275 * Since we can context switch in vn_lock our vnode
276 * could get recycled (eg umount -f). Double check
277 * that its still ours.
279 if (vp
->v_type
!= our_type
|| vp
->v_id
!= our_id
280 || cp
!= VTOC(vp
) || !UBCINFOEXISTS(vp
)) {
281 VOP_UNLOCK(vp
, 0, p
);
286 * Last chance to explicitly zero out the areas
287 * that are currently marked invalid:
289 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devBlockSize
);
290 (void) cluster_push(vp
);
291 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
292 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
293 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
294 off_t start
= invalid_range
->rl_start
;
295 off_t end
= invalid_range
->rl_end
;
297 /* The range about to be written must be validated
298 * first, so that VOP_CMAP() will return the
299 * appropriate mapping for the cluster code:
301 rl_remove(start
, end
, &fp
->ff_invalidranges
);
303 (void) cluster_write(vp
, (struct uio
*) 0, leof
,
304 invalid_range
->rl_end
+ 1, invalid_range
->rl_start
,
305 (off_t
)0, devBlockSize
, IO_HEADZEROFILL
| IO_NOZERODIRTY
);
307 if (ISSET(vp
->v_flag
, VHASDIRTY
))
308 (void) cluster_push(vp
);
310 cp
->c_flag
|= C_MODIFIED
;
312 cp
->c_flag
&= ~C_ZFWANTSYNC
;
314 blocksize
= VTOVCB(vp
)->blockSize
;
315 blks
= leof
/ blocksize
;
316 if (((off_t
)blks
* (off_t
)blocksize
) != leof
)
319 * Shrink the peof to the smallest size neccessary to contain the leof.
321 if (blks
< fp
->ff_blocks
)
322 (void) VOP_TRUNCATE(vp
, leof
, IO_NDELAY
, ap
->a_cred
, p
);
323 (void) cluster_push(vp
);
326 CLR(vp
->v_flag
, VNOCACHE_DATA
);
329 * If the VOP_TRUNCATE didn't happen to flush the vnode's
330 * information out to disk, force it to be updated now that
331 * all invalid ranges have been zero-filled and validated:
333 if (cp
->c_flag
& C_MODIFIED
) {
335 VOP_UPDATE(vp
, &tv
, &tv
, 0);
337 VOP_UNLOCK(vp
, 0, p
);
348 IN struct ucred *cred;
355 struct vop_access_args
/* {
358 struct ucred *a_cred;
362 struct vnode
*vp
= ap
->a_vp
;
363 struct cnode
*cp
= VTOC(vp
);
364 struct ucred
*cred
= ap
->a_cred
;
366 mode_t mode
= ap
->a_mode
;
372 * Disallow write attempts on read-only file systems;
373 * unless the file is a socket, fifo, or a block or
374 * character device resident on the file system.
377 switch (vp
->v_type
) {
381 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
384 if ((error
= hfs_getinoquota(cp
)))
391 /* If immutable bit set, nobody gets to write it. */
392 if ((mode
& VWRITE
) && (cp
->c_flags
& IMMUTABLE
))
395 /* Otherwise, user id 0 always gets access. */
396 if (ap
->a_cred
->cr_uid
== 0)
401 /* Otherwise, check the owner. */
402 if (hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, ap
->a_p
, false) == 0) {
409 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
412 /* Otherwise, check the groups. */
413 if (! (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)) {
414 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++)
415 if (cp
->c_gid
== *gp
) {
422 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
426 /* Otherwise, check everyone else. */
433 return ((cp
->c_mode
& mask
) == mask
? 0 : EACCES
);
443 IN struct vattr *vap;
444 IN struct ucred *cred;
453 struct vop_getattr_args
/* {
456 struct ucred *a_cred;
460 struct vnode
*vp
= ap
->a_vp
;
461 struct cnode
*cp
= VTOC(vp
);
462 struct vattr
*vap
= ap
->a_vap
;
466 CTIMES(cp
, &tv
, &tv
);
468 vap
->va_type
= vp
->v_type
;
470 * [2856576] Since we are dynamically changing the owner, also
471 * effectively turn off the set-user-id and set-group-id bits,
472 * just like chmod(2) would when changing ownership. This prevents
473 * a security hole where set-user-id programs run as whoever is
474 * logged on (or root if nobody is logged in yet!)
476 vap
->va_mode
= (cp
->c_uid
== UNKNOWNUID
) ? cp
->c_mode
& ~(S_ISUID
| S_ISGID
) : cp
->c_mode
;
477 vap
->va_nlink
= cp
->c_nlink
;
478 vap
->va_uid
= (cp
->c_uid
== UNKNOWNUID
) ? console_user
: cp
->c_uid
;
479 vap
->va_gid
= cp
->c_gid
;
480 vap
->va_fsid
= cp
->c_dev
;
482 * Exporting file IDs from HFS Plus:
484 * For "normal" files the c_fileid is the same value as the
485 * c_cnid. But for hard link files, they are different - the
486 * c_cnid belongs to the active directory entry (ie the link)
487 * and the c_fileid is for the actual inode (ie the data file).
489 * The stat call (getattr) will always return the c_fileid
490 * and Carbon APIs, which are hardlink-ignorant, will always
491 * receive the c_cnid (from getattrlist).
493 vap
->va_fileid
= cp
->c_fileid
;
494 vap
->va_atime
.tv_sec
= cp
->c_atime
;
495 vap
->va_atime
.tv_nsec
= 0;
496 vap
->va_mtime
.tv_sec
= cp
->c_mtime
;
497 vap
->va_mtime
.tv_nsec
= cp
->c_mtime_nsec
;
498 vap
->va_ctime
.tv_sec
= cp
->c_ctime
;
499 vap
->va_ctime
.tv_nsec
= 0;
501 vap
->va_flags
= cp
->c_flags
;
503 vap
->va_blocksize
= VTOVFS(vp
)->mnt_stat
.f_iosize
;
506 if (vp
->v_type
== VDIR
) {
507 vap
->va_size
= cp
->c_nlink
* AVERAGE_HFSDIRENTRY_SIZE
;
510 vap
->va_size
= VTOF(vp
)->ff_size
;
511 vap
->va_bytes
= (u_quad_t
)cp
->c_blocks
*
512 (u_quad_t
)VTOVCB(vp
)->blockSize
;
513 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
)
514 vap
->va_rdev
= cp
->c_rdev
;
520 * Set attribute vnode op. called from several syscalls
525 IN struct vattr *vap;
526 IN struct ucred *cred;
533 struct vop_setattr_args
/* {
536 struct ucred *a_cred;
540 struct vattr
*vap
= ap
->a_vap
;
541 struct vnode
*vp
= ap
->a_vp
;
542 struct cnode
*cp
= VTOC(vp
);
543 struct ucred
*cred
= ap
->a_cred
;
544 struct proc
*p
= ap
->a_p
;
545 struct timeval atimeval
, mtimeval
;
549 * Check for unsettable attributes.
551 if ((vap
->va_type
!= VNON
) || (vap
->va_nlink
!= VNOVAL
) ||
552 (vap
->va_fsid
!= VNOVAL
) || (vap
->va_fileid
!= VNOVAL
) ||
553 (vap
->va_blocksize
!= VNOVAL
) || (vap
->va_rdev
!= VNOVAL
) ||
554 ((int)vap
->va_bytes
!= VNOVAL
) || (vap
->va_gen
!= VNOVAL
)) {
558 if (vap
->va_flags
!= VNOVAL
) {
559 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
561 if ((error
= hfs_chflags(vp
, vap
->va_flags
, cred
, p
)))
563 if (vap
->va_flags
& (IMMUTABLE
| APPEND
))
567 if (cp
->c_flags
& (IMMUTABLE
| APPEND
))
570 // XXXdbg - don't allow modification of the journal or journal_info_block
571 if (VTOHFS(vp
)->jnl
&& cp
->c_datafork
) {
572 struct HFSPlusExtentDescriptor
*extd
;
574 extd
= &cp
->c_datafork
->ff_data
.cf_extents
[0];
575 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
581 * Go through the fields and update iff not VNOVAL.
583 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
584 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
586 if ((error
= hfs_chown(vp
, vap
->va_uid
, vap
->va_gid
, cred
, p
)))
589 if (vap
->va_size
!= VNOVAL
) {
591 * Disallow write attempts on read-only file systems;
592 * unless the file is a socket, fifo, or a block or
593 * character device resident on the file system.
595 switch (vp
->v_type
) {
600 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
606 if ((error
= VOP_TRUNCATE(vp
, vap
->va_size
, 0, cred
, p
)))
610 if (vap
->va_atime
.tv_sec
!= VNOVAL
|| vap
->va_mtime
.tv_sec
!= VNOVAL
) {
611 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
613 if (((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) &&
614 ((vap
->va_vaflags
& VA_UTIMES_NULL
) == 0 ||
615 (error
= VOP_ACCESS(vp
, VWRITE
, cred
, p
)))) {
618 if (vap
->va_atime
.tv_sec
!= VNOVAL
)
619 cp
->c_flag
|= C_ACCESS
;
620 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
621 cp
->c_flag
|= C_CHANGE
| C_UPDATE
;
623 * The utimes system call can reset the modification
624 * time but it doesn't know about HFS create times.
625 * So we need to insure that the creation time is
626 * always at least as old as the modification time.
628 if ((VTOVCB(vp
)->vcbSigWord
== kHFSPlusSigWord
) &&
629 (cp
->c_cnid
!= kRootDirID
) &&
630 (vap
->va_mtime
.tv_sec
< cp
->c_itime
)) {
631 cp
->c_itime
= vap
->va_mtime
.tv_sec
;
634 atimeval
.tv_sec
= vap
->va_atime
.tv_sec
;
635 atimeval
.tv_usec
= 0;
636 mtimeval
.tv_sec
= vap
->va_mtime
.tv_sec
;
637 mtimeval
.tv_usec
= 0;
638 if ((error
= VOP_UPDATE(vp
, &atimeval
, &mtimeval
, 1)))
642 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
643 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
645 error
= hfs_chmod(vp
, (int)vap
->va_mode
, cred
, p
);
652 * Change the mode on a file.
653 * cnode must be locked before calling.
656 hfs_chmod(vp
, mode
, cred
, p
)
657 register struct vnode
*vp
;
659 register struct ucred
*cred
;
662 register struct cnode
*cp
= VTOC(vp
);
665 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
668 // XXXdbg - don't allow modification of the journal or journal_info_block
669 if (VTOHFS(vp
)->jnl
&& cp
&& cp
->c_datafork
) {
670 struct HFSPlusExtentDescriptor
*extd
;
672 extd
= &cp
->c_datafork
->ff_data
.cf_extents
[0];
673 if (extd
->startBlock
== VTOVCB(vp
)->vcbJinfoBlock
|| extd
->startBlock
== VTOHFS(vp
)->jnl_start
) {
678 #if OVERRIDE_UNKNOWN_PERMISSIONS
679 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
683 if ((error
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0)
686 if (vp
->v_type
!= VDIR
&& (mode
& S_ISTXT
))
688 if (!groupmember(cp
->c_gid
, cred
) && (mode
& S_ISGID
))
691 cp
->c_mode
&= ~ALLPERMS
;
692 cp
->c_mode
|= (mode
& ALLPERMS
);
693 cp
->c_flag
|= C_CHANGE
;
699 hfs_write_access(struct vnode
*vp
, struct ucred
*cred
, struct proc
*p
, Boolean considerFlags
)
701 struct cnode
*cp
= VTOC(vp
);
707 * Disallow write attempts on read-only file systems;
708 * unless the file is a socket, fifo, or a block or
709 * character device resident on the file system.
711 switch (vp
->v_type
) {
715 if (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
)
722 /* If immutable bit set, nobody gets to write it. */
723 if (considerFlags
&& (cp
->c_flags
& IMMUTABLE
))
726 /* Otherwise, user id 0 always gets access. */
727 if (cred
->cr_uid
== 0)
730 /* Otherwise, check the owner. */
731 if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, false)) == 0)
732 return ((cp
->c_mode
& S_IWUSR
) == S_IWUSR
? 0 : EACCES
);
734 /* Otherwise, check the groups. */
735 for (i
= 0, gp
= cred
->cr_groups
; i
< cred
->cr_ngroups
; i
++, gp
++) {
736 if (cp
->c_gid
== *gp
)
737 return ((cp
->c_mode
& S_IWGRP
) == S_IWGRP
? 0 : EACCES
);
740 /* Otherwise, check everyone else. */
741 return ((cp
->c_mode
& S_IWOTH
) == S_IWOTH
? 0 : EACCES
);
747 * Change the flags on a file or directory.
748 * cnode must be locked before calling.
751 hfs_chflags(vp
, flags
, cred
, p
)
752 register struct vnode
*vp
;
753 register u_long flags
;
754 register struct ucred
*cred
;
757 register struct cnode
*cp
= VTOC(vp
);
760 if (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) {
761 if ((retval
= hfs_write_access(vp
, cred
, p
, false)) != 0) {
764 } else if ((retval
= hfs_owner_rights(VTOHFS(vp
), cp
->c_uid
, cred
, p
, true)) != 0) {
768 if (cred
->cr_uid
== 0) {
769 if ((cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
)) &&
775 if (cp
->c_flags
& (SF_IMMUTABLE
| SF_APPEND
) ||
776 (flags
& UF_SETTABLE
) != flags
) {
779 cp
->c_flags
&= SF_SETTABLE
;
780 cp
->c_flags
|= (flags
& UF_SETTABLE
);
782 cp
->c_flag
|= C_CHANGE
;
789 * Perform chown operation on cnode cp;
790 * code must be locked prior to call.
793 hfs_chown(vp
, uid
, gid
, cred
, p
)
794 register struct vnode
*vp
;
800 register struct cnode
*cp
= VTOC(vp
);
809 if (VTOVCB(vp
)->vcbSigWord
!= kHFSPlusSigWord
)
812 if (VTOVFS(vp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
)
815 if (uid
== (uid_t
)VNOVAL
)
817 if (gid
== (gid_t
)VNOVAL
)
820 * If we don't own the file, are trying to change the owner
821 * of the file, or are not a member of the target group,
822 * the caller must be superuser or the call fails.
824 if ((cred
->cr_uid
!= cp
->c_uid
|| uid
!= cp
->c_uid
||
825 (gid
!= cp
->c_gid
&& !groupmember((gid_t
)gid
, cred
))) &&
826 (error
= suser(cred
, &p
->p_acflag
)))
832 if ((error
= hfs_getinoquota(cp
)))
835 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
836 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
839 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
840 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
844 * Eventually need to account for (fake) a block per directory
845 *if (vp->v_type == VDIR)
846 *change = VTOVCB(vp)->blockSize;
850 change
= (int64_t)(cp
->c_blocks
) * (int64_t)VTOVCB(vp
)->blockSize
;
851 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
);
852 (void) hfs_chkiq(cp
, -1, cred
, CHOWN
);
853 for (i
= 0; i
< MAXQUOTAS
; i
++) {
854 dqrele(vp
, cp
->c_dquot
[i
]);
855 cp
->c_dquot
[i
] = NODQUOT
;
861 if ((error
= hfs_getinoquota(cp
)) == 0) {
863 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
864 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
867 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
868 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
870 if ((error
= hfs_chkdq(cp
, change
, cred
, CHOWN
)) == 0) {
871 if ((error
= hfs_chkiq(cp
, 1, cred
, CHOWN
)) == 0)
874 (void) hfs_chkdq(cp
, -change
, cred
, CHOWN
|FORCE
);
876 for (i
= 0; i
< MAXQUOTAS
; i
++) {
877 dqrele(vp
, cp
->c_dquot
[i
]);
878 cp
->c_dquot
[i
] = NODQUOT
;
883 if (hfs_getinoquota(cp
) == 0) {
885 dqrele(vp
, cp
->c_dquot
[USRQUOTA
]);
886 cp
->c_dquot
[USRQUOTA
] = NODQUOT
;
889 dqrele(vp
, cp
->c_dquot
[GRPQUOTA
]);
890 cp
->c_dquot
[GRPQUOTA
] = NODQUOT
;
892 (void) hfs_chkdq(cp
, change
, cred
, FORCE
|CHOWN
);
893 (void) hfs_chkiq(cp
, 1, cred
, FORCE
|CHOWN
);
894 (void) hfs_getinoquota(cp
);
898 if (hfs_getinoquota(cp
))
899 panic("hfs_chown: lost quota");
902 if (ouid
!= uid
|| ogid
!= gid
)
903 cp
->c_flag
|= C_CHANGE
;
904 if (ouid
!= uid
&& cred
->cr_uid
!= 0)
905 cp
->c_mode
&= ~S_ISUID
;
906 if (ogid
!= gid
&& cred
->cr_uid
!= 0)
907 cp
->c_mode
&= ~S_ISGID
;
914 #% exchange fvp L L L
915 #% exchange tvp L L L
919 * The hfs_exchange routine swaps the fork data in two files by
920 * exchanging some of the information in the cnode. It is used
921 * to preserve the file ID when updating an existing file, in
922 * case the file is being tracked through its file ID. Typically
923 * its used after creating a new file during a safe-save.
928 struct vop_exchange_args
/* {
931 struct ucred *a_cred;
935 struct vnode
*from_vp
= ap
->a_fvp
;
936 struct vnode
*to_vp
= ap
->a_tvp
;
937 struct vnode
*from_rvp
= NULL
;
938 struct vnode
*to_rvp
= NULL
;
939 struct cnode
*from_cp
= VTOC(from_vp
);
940 struct cnode
*to_cp
= VTOC(to_vp
);
941 struct hfsmount
*hfsmp
= VTOHFS(from_vp
);
942 struct cat_desc tempdesc
;
943 struct cat_attr tempattr
;
944 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
946 /* The files must be on the same volume. */
947 if (from_vp
->v_mount
!= to_vp
->v_mount
)
950 /* Only normal files can be exchanged. */
951 if ((from_vp
->v_type
!= VREG
) || (to_vp
->v_type
!= VREG
) ||
952 (from_cp
->c_flag
& C_HARDLINK
) || (to_cp
->c_flag
& C_HARDLINK
) ||
953 VNODE_IS_RSRC(from_vp
) || VNODE_IS_RSRC(to_vp
))
956 // XXXdbg - don't allow modification of the journal or journal_info_block
958 struct HFSPlusExtentDescriptor
*extd
;
960 if (from_cp
->c_datafork
) {
961 extd
= &from_cp
->c_datafork
->ff_data
.cf_extents
[0];
962 if (extd
->startBlock
== VTOVCB(from_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
967 if (to_cp
->c_datafork
) {
968 extd
= &to_cp
->c_datafork
->ff_data
.cf_extents
[0];
969 if (extd
->startBlock
== VTOVCB(to_vp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
975 from_rvp
= from_cp
->c_rsrc_vp
;
976 to_rvp
= to_cp
->c_rsrc_vp
;
978 /* If one of the resource forks is open then get the other one. */
979 if (from_rvp
|| to_rvp
) {
980 error
= hfs_vgetrsrc(hfsmp
, from_vp
, &from_rvp
, ap
->a_p
);
983 error
= hfs_vgetrsrc(hfsmp
, to_vp
, &to_rvp
, ap
->a_p
);
990 /* Ignore any errors, we are doing a 'best effort' on flushing */
992 (void) vinvalbuf(from_vp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
994 (void) vinvalbuf(to_vp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
996 (void) vinvalbuf(from_rvp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
998 (void) vinvalbuf(to_rvp
, V_SAVE
, ap
->a_cred
, ap
->a_p
, 0, 0);
1001 hfs_global_shared_lock_acquire(hfsmp
);
1004 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1010 /* Lock catalog b-tree */
1011 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1012 if (error
) goto Err_Exit
;
1014 /* The backend code always tries to delete the virtual
1015 * extent id for exchanging files so we neeed to lock
1016 * the extents b-tree.
1018 error
= hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1020 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1024 /* Do the exchange */
1025 error
= MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp
),
1026 from_cp
->c_desc
.cd_nameptr
, to_cp
->c_desc
.cd_nameptr
,
1027 from_cp
->c_parentcnid
, to_cp
->c_parentcnid
,
1028 from_cp
->c_hint
, to_cp
->c_hint
));
1030 (void) hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1031 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, ap
->a_p
);
1033 if (error
!= E_NONE
) {
1037 /* Purge the vnodes from the name cache */
1039 cache_purge(from_vp
);
1043 /* Save a copy of from attributes before swapping. */
1044 bcopy(&from_cp
->c_desc
, &tempdesc
, sizeof(struct cat_desc
));
1045 bcopy(&from_cp
->c_attr
, &tempattr
, sizeof(struct cat_attr
));
1048 * Swap the descriptors and all non-fork related attributes.
1049 * (except the modify date)
1051 bcopy(&to_cp
->c_desc
, &from_cp
->c_desc
, sizeof(struct cat_desc
));
1053 from_cp
->c_hint
= 0;
1054 from_cp
->c_fileid
= from_cp
->c_cnid
;
1055 from_cp
->c_itime
= to_cp
->c_itime
;
1056 from_cp
->c_btime
= to_cp
->c_btime
;
1057 from_cp
->c_atime
= to_cp
->c_atime
;
1058 from_cp
->c_ctime
= to_cp
->c_ctime
;
1059 from_cp
->c_gid
= to_cp
->c_gid
;
1060 from_cp
->c_uid
= to_cp
->c_uid
;
1061 from_cp
->c_flags
= to_cp
->c_flags
;
1062 from_cp
->c_mode
= to_cp
->c_mode
;
1063 bcopy(to_cp
->c_finderinfo
, from_cp
->c_finderinfo
, 32);
1065 bcopy(&tempdesc
, &to_cp
->c_desc
, sizeof(struct cat_desc
));
1067 to_cp
->c_fileid
= to_cp
->c_cnid
;
1068 to_cp
->c_itime
= tempattr
.ca_itime
;
1069 to_cp
->c_btime
= tempattr
.ca_btime
;
1070 to_cp
->c_atime
= tempattr
.ca_atime
;
1071 to_cp
->c_ctime
= tempattr
.ca_ctime
;
1072 to_cp
->c_gid
= tempattr
.ca_gid
;
1073 to_cp
->c_uid
= tempattr
.ca_uid
;
1074 to_cp
->c_flags
= tempattr
.ca_flags
;
1075 to_cp
->c_mode
= tempattr
.ca_mode
;
1076 bcopy(tempattr
.ca_finderinfo
, to_cp
->c_finderinfo
, 32);
1078 /* Reinsert into the cnode hash under new file IDs*/
1079 hfs_chashremove(from_cp
);
1080 hfs_chashremove(to_cp
);
1082 hfs_chashinsert(from_cp
);
1083 hfs_chashinsert(to_cp
);
1086 * When a file moves out of "Cleanup At Startup"
1087 * we can drop its NODUMP status.
1089 if ((from_cp
->c_flags
& UF_NODUMP
) &&
1090 (from_cp
->c_parentcnid
!= to_cp
->c_parentcnid
)) {
1091 from_cp
->c_flags
&= ~UF_NODUMP
;
1092 from_cp
->c_flag
|= C_CHANGE
;
1095 if ((to_cp
->c_flags
& UF_NODUMP
) &&
1096 (to_cp
->c_parentcnid
!= from_cp
->c_parentcnid
)) {
1097 to_cp
->c_flags
&= ~UF_NODUMP
;
1098 to_cp
->c_flag
|= C_CHANGE
;
1109 journal_end_transaction(hfsmp
->jnl
);
1112 hfs_global_shared_lock_release(hfsmp
);
1124 IN struct vnode *vp;
1125 IN struct ucred *cred;
1132 struct vop_fsync_args
/* {
1134 struct ucred *a_cred;
1139 struct vnode
*vp
= ap
->a_vp
;
1140 struct cnode
*cp
= VTOC(vp
);
1141 struct filefork
*fp
= NULL
;
1143 register struct buf
*bp
;
1146 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
1151 wait
= (ap
->a_waitfor
== MNT_WAIT
);
1153 /* HFS directories don't have any data blocks. */
1154 if (vp
->v_type
== VDIR
)
1158 * For system files flush the B-tree header and
1159 * for regular files write out any clusters
1161 if (vp
->v_flag
& VSYSTEM
) {
1162 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
) {
1165 if (BTIsDirty(VTOF(vp
))) {
1166 panic("hfs: system file vp 0x%x has dirty blocks (jnl 0x%x)\n",
1170 BTFlushPath(VTOF(vp
));
1173 } else if (UBCINFOEXISTS(vp
))
1174 (void) cluster_push(vp
);
1177 * When MNT_WAIT is requested and the zero fill timeout
1178 * has expired then we must explicitly zero out any areas
1179 * that are currently marked invalid (holes).
1181 * Files with NODUMP can bypass zero filling here.
1183 if ((wait
|| (cp
->c_flag
& C_ZFWANTSYNC
)) &&
1184 ((cp
->c_flags
& UF_NODUMP
) == 0) &&
1185 UBCINFOEXISTS(vp
) && (fp
= VTOF(vp
)) &&
1186 cp
->c_zftimeout
!= 0) {
1190 if (time
.tv_sec
< cp
->c_zftimeout
) {
1191 /* Remember that a force sync was requested. */
1192 cp
->c_flag
|= C_ZFWANTSYNC
;
1195 VOP_DEVBLOCKSIZE(cp
->c_devvp
, &devblksize
);
1196 was_nocache
= ISSET(vp
->v_flag
, VNOCACHE_DATA
);
1197 SET(vp
->v_flag
, VNOCACHE_DATA
); /* Don't cache zeros */
1199 while (!CIRCLEQ_EMPTY(&fp
->ff_invalidranges
)) {
1200 struct rl_entry
*invalid_range
= CIRCLEQ_FIRST(&fp
->ff_invalidranges
);
1201 off_t start
= invalid_range
->rl_start
;
1202 off_t end
= invalid_range
->rl_end
;
1204 /* The range about to be written must be validated
1205 * first, so that VOP_CMAP() will return the
1206 * appropriate mapping for the cluster code:
1208 rl_remove(start
, end
, &fp
->ff_invalidranges
);
1210 (void) cluster_write(vp
, (struct uio
*) 0,
1212 invalid_range
->rl_end
+ 1,
1213 invalid_range
->rl_start
,
1214 (off_t
)0, devblksize
,
1215 IO_HEADZEROFILL
| IO_NOZERODIRTY
);
1216 cp
->c_flag
|= C_MODIFIED
;
1218 (void) cluster_push(vp
);
1220 CLR(vp
->v_flag
, VNOCACHE_DATA
);
1221 cp
->c_flag
&= ~C_ZFWANTSYNC
;
1222 cp
->c_zftimeout
= 0;
1226 * Flush all dirty buffers associated with a vnode.
1230 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1231 nbp
= bp
->b_vnbufs
.le_next
;
1232 if ((bp
->b_flags
& B_BUSY
))
1234 if ((bp
->b_flags
& B_DELWRI
) == 0)
1235 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp
, hfsmp
);
1237 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1238 if ((bp
->b_flags
& B_META
) == 0) {
1239 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1242 // if journal_active() returns >= 0 then the journal is ok and we
1243 // shouldn't do anything to this locked block (because it is part
1244 // of a transaction). otherwise we'll just go through the normal
1245 // code path and flush the buffer.
1246 if (journal_active(hfsmp
->jnl
) >= 0) {
1252 bp
->b_flags
|= B_BUSY
;
1253 /* Clear B_LOCKED, should only be set on meta files */
1254 bp
->b_flags
&= ~B_LOCKED
;
1258 * Wait for I/O associated with indirect blocks to complete,
1259 * since there is no way to quickly wait for them below.
1261 if (bp
->b_vp
== vp
|| ap
->a_waitfor
== MNT_NOWAIT
)
1264 (void) VOP_BWRITE(bp
);
1269 while (vp
->v_numoutput
) {
1270 vp
->v_flag
|= VBWAIT
;
1271 tsleep((caddr_t
)&vp
->v_numoutput
, PRIBIO
+ 1, "hfs_fsync", 0);
1274 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1276 if (hfsmp
->jnl
== NULL
&& vp
->v_dirtyblkhd
.lh_first
) {
1277 /* still have some dirty buffers */
1279 vprint("hfs_fsync: dirty", vp
);
1282 * Looks like the requests are not
1283 * getting queued to the driver.
1284 * Retrying here causes a cpu bound loop.
1285 * Yield to the other threads and hope
1288 (void)tsleep((caddr_t
)&vp
->v_numoutput
,
1289 PRIBIO
+ 1, "hfs_fsync", hz
/10);
1302 if (vp
->v_flag
& VSYSTEM
) {
1303 if (VTOF(vp
)->fcbBTCBPtr
!= NULL
)
1304 BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1305 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1306 } else /* User file */ {
1307 retval
= VOP_UPDATE(ap
->a_vp
, &tv
, &tv
, wait
);
1309 /* When MNT_WAIT is requested push out any delayed meta data */
1310 if ((retval
== 0) && wait
&& cp
->c_hint
&&
1311 !ISSET(cp
->c_flag
, C_DELETED
| C_NOEXISTS
)) {
1312 hfs_metasync(VTOHFS(vp
), cp
->c_hint
, ap
->a_p
);
1319 /* Sync an hfs catalog b-tree node */
1321 hfs_metasync(struct hfsmount
*hfsmp
, daddr_t node
, struct proc
*p
)
1328 vp
= HFSTOVCB(hfsmp
)->catalogRefNum
;
1330 // XXXdbg - don't need to do this on a journaled volume
1335 if (hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
) != 0)
1339 * Look for a matching node that has been delayed
1340 * but is not part of a set (B_LOCKED).
1343 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1344 nbp
= bp
->b_vnbufs
.le_next
;
1345 if (bp
->b_flags
& B_BUSY
)
1347 if (bp
->b_lblkno
== node
) {
1348 if (bp
->b_flags
& B_LOCKED
)
1352 bp
->b_flags
|= B_BUSY
;
1354 (void) VOP_BWRITE(bp
);
1360 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1367 hfs_btsync(struct vnode
*vp
, int sync_transaction
)
1369 struct cnode
*cp
= VTOC(vp
);
1370 register struct buf
*bp
;
1373 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1377 * Flush all dirty buffers associated with b-tree.
1382 for (bp
= vp
->v_dirtyblkhd
.lh_first
; bp
; bp
= nbp
) {
1383 nbp
= bp
->b_vnbufs
.le_next
;
1384 if ((bp
->b_flags
& B_BUSY
))
1386 if ((bp
->b_flags
& B_DELWRI
) == 0)
1387 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp
, hfsmp
);
1390 if (hfsmp
->jnl
&& (bp
->b_flags
& B_LOCKED
)) {
1391 if ((bp
->b_flags
& B_META
) == 0) {
1392 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1395 // if journal_active() returns >= 0 then the journal is ok and we
1396 // shouldn't do anything to this locked block (because it is part
1397 // of a transaction). otherwise we'll just go through the normal
1398 // code path and flush the buffer.
1399 if (journal_active(hfsmp
->jnl
) >= 0) {
1404 if (sync_transaction
&& !(bp
->b_flags
& B_LOCKED
))
1408 bp
->b_flags
|= B_BUSY
;
1409 bp
->b_flags
&= ~B_LOCKED
;
1420 if ((vp
->v_flag
& VSYSTEM
) && (VTOF(vp
)->fcbBTCBPtr
!= NULL
))
1421 (void) BTSetLastSync(VTOF(vp
), tv
.tv_sec
);
1422 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
1428 * Rmdir system call.
1433 IN WILLRELE struct vnode *dvp;
1434 IN WILLRELE struct vnode *vp;
1435 IN struct componentname *cnp;
1440 struct vop_rmdir_args
/* {
1441 struct vnode *a_dvp;
1443 struct componentname *a_cnp;
1446 struct vnode
*vp
= ap
->a_vp
;
1447 struct vnode
*dvp
= ap
->a_dvp
;
1448 struct proc
*p
= ap
->a_cnp
->cn_proc
;
1451 struct hfsmount
* hfsmp
;
1453 int error
= 0, started_tr
= 0, grabbed_lock
= 0;
1462 return (EINVAL
); /* cannot remove "." */
1466 (void)hfs_getinoquota(cp
);
1470 hfs_global_shared_lock_acquire(hfsmp
);
1473 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1480 * Verify the directory is empty (and valid).
1481 * (Rmdir ".." won't be valid since
1482 * ".." will contain a reference to
1483 * the current directory and thus be
1486 if (cp
->c_entries
!= 0) {
1490 if ((dcp
->c_flags
& APPEND
) || (cp
->c_flags
& (IMMUTABLE
| APPEND
))) {
1495 /* Remove the entry from the namei cache: */
1498 /* Lock catalog b-tree */
1499 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1500 if (error
) goto out
;
1502 if (cp
->c_entries
> 0)
1503 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1504 /* Remove entry from catalog */
1505 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1507 /* Unlock catalog b-tree */
1508 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1509 if (error
) goto out
;
1512 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
1515 /* The parent lost a child */
1516 if (dcp
->c_entries
> 0)
1518 if (dcp
->c_nlink
> 0)
1520 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
1522 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
1524 hfs_volupdate(hfsmp
, VOL_RMDIR
, (dcp
->c_cnid
== kHFSRootFolderID
));
1526 cp
->c_mode
= 0; /* Makes the vnode go away...see inactive */
1527 cp
->c_flag
|= C_NOEXISTS
;
1535 journal_end_transaction(hfsmp
->jnl
);
1538 hfs_global_shared_lock_release(hfsmp
);
1550 IN WILLRELE struct vnode *dvp;
1551 IN WILLRELE struct vnode *vp;
1552 IN struct componentname *cnp;
1558 struct vop_remove_args
/* {
1559 struct vnode *a_dvp;
1561 struct componentname *a_cnp;
1564 struct vnode
*vp
= ap
->a_vp
;
1565 struct vnode
*dvp
= ap
->a_dvp
;
1566 struct vnode
*rvp
= NULL
;
1569 struct hfsmount
*hfsmp
;
1570 struct proc
*p
= current_proc();
1571 int dataforkbusy
= 0;
1572 int rsrcforkbusy
= 0;
1576 int started_tr
= 0, grabbed_lock
= 0;
1578 /* Redirect directories to rmdir */
1579 if (vp
->v_type
== VDIR
)
1580 return (hfs_rmdir(ap
));
1586 if (cp
->c_parentcnid
!= dcp
->c_cnid
) {
1591 /* Make sure a remove is permitted */
1592 if ((cp
->c_flags
& (IMMUTABLE
| APPEND
)) ||
1593 (VTOC(dvp
)->c_flags
& APPEND
) ||
1594 VNODE_IS_RSRC(vp
)) {
1600 * Aquire a vnode for a non-empty resource fork.
1601 * (needed for VOP_TRUNCATE)
1603 if (cp
->c_blocks
- VTOF(vp
)->ff_blocks
) {
1604 error
= hfs_vgetrsrc(hfsmp
, vp
, &rvp
, p
);
1609 // XXXdbg - don't allow deleting the journal or journal_info_block
1610 if (hfsmp
->jnl
&& cp
->c_datafork
) {
1611 struct HFSPlusExtentDescriptor
*extd
;
1613 extd
= &cp
->c_datafork
->ff_data
.cf_extents
[0];
1614 if (extd
->startBlock
== HFSTOVCB(hfsmp
)->vcbJinfoBlock
|| extd
->startBlock
== hfsmp
->jnl_start
) {
1621 * Check if this file is being used.
1623 * The namei done for the remove took a reference on the
1624 * vnode (vp). And we took a ref on the resource vnode (rvp).
1625 * Hence set 1 in the tookref parameter of ubc_isinuse().
1627 if (UBCISVALID(vp
) && ubc_isinuse(vp
, 1))
1629 if (rvp
&& UBCISVALID(rvp
) && ubc_isinuse(rvp
, 1))
1633 * Carbon semantics prohibit deleting busy files.
1634 * (enforced when NODELETEBUSY is requested)
1636 if ((dataforkbusy
|| rsrcforkbusy
) &&
1637 ((ap
->a_cnp
->cn_flags
& NODELETEBUSY
) ||
1638 (hfsmp
->hfs_private_metadata_dir
== 0))) {
1644 (void)hfs_getinoquota(cp
);
1648 hfs_global_shared_lock_acquire(hfsmp
);
1651 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
1657 /* Remove our entry from the namei cache. */
1660 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1661 if (hfsmp
->jnl
&& vp
->v_type
== VLNK
&& vp
->v_dirtyblkhd
.lh_first
) {
1662 struct buf
*bp
, *nbp
;
1665 for (bp
=vp
->v_dirtyblkhd
.lh_first
; bp
; bp
=nbp
) {
1666 nbp
= bp
->b_vnbufs
.le_next
;
1668 if ((bp
->b_flags
& B_BUSY
)) {
1669 // if it was busy, someone else must be dealing
1670 // with it so just move on.
1674 if (!(bp
->b_flags
& B_META
)) {
1675 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp
);
1678 // if it's part of the current transaction, kill it.
1679 if (bp
->b_flags
& B_LOCKED
) {
1681 bp
->b_flags
|= B_BUSY
;
1682 journal_kill_block(hfsmp
->jnl
, bp
);
1690 * Truncate any non-busy forks. Busy forks will
1691 * get trucated when their vnode goes inactive.
1693 * (Note: hard links are truncated in VOP_INACTIVE)
1695 if ((cp
->c_flag
& C_HARDLINK
) == 0) {
1696 int mode
= cp
->c_mode
;
1698 if (!dataforkbusy
&& cp
->c_datafork
->ff_blocks
!= 0) {
1699 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1700 error
= VOP_TRUNCATE(vp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1706 if (!rsrcforkbusy
&& rvp
) {
1707 cp
->c_mode
= 0; /* Suppress VOP_UPDATES */
1708 error
= VOP_TRUNCATE(rvp
, (off_t
)0, IO_NDELAY
, NOCRED
, p
);
1716 * There are 3 remove cases to consider:
1717 * 1. File is a hardlink ==> remove the link
1718 * 2. File is busy (in use) ==> move/rename the file
1719 * 3. File is not in use ==> remove the file
1722 if (cp
->c_flag
& C_HARDLINK
) {
1723 struct cat_desc desc
;
1725 if ((ap
->a_cnp
->cn_flags
& HASBUF
) == 0 ||
1726 ap
->a_cnp
->cn_nameptr
[0] == '\0') {
1727 error
= ENOENT
; /* name missing! */
1731 /* Setup a descriptor for the link */
1732 bzero(&desc
, sizeof(desc
));
1733 desc
.cd_nameptr
= ap
->a_cnp
->cn_nameptr
;
1734 desc
.cd_namelen
= ap
->a_cnp
->cn_namelen
;
1735 desc
.cd_parentcnid
= dcp
->c_cnid
;
1736 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1737 desc
.cd_cnid
= cp
->c_cnid
;
1739 /* Lock catalog b-tree */
1740 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1744 /* Delete the link record */
1745 error
= cat_delete(hfsmp
, &desc
, &cp
->c_attr
);
1747 if ((error
== 0) && (--cp
->c_nlink
< 1)) {
1750 struct cat_desc to_desc
;
1751 struct cat_desc from_desc
;
1754 * This is now esentially an open deleted file.
1755 * Rename it to reflect this state which makes
1756 * orphan file cleanup easier (see hfs_remove_orphans).
1757 * Note: a rename failure here is not fatal.
1759 MAKE_INODE_NAME(inodename
, cp
->c_rdev
);
1760 bzero(&from_desc
, sizeof(from_desc
));
1761 from_desc
.cd_nameptr
= inodename
;
1762 from_desc
.cd_namelen
= strlen(inodename
);
1763 from_desc
.cd_parentcnid
= hfsmp
->hfs_private_metadata_dir
;
1764 from_desc
.cd_flags
= 0;
1765 from_desc
.cd_cnid
= cp
->c_fileid
;
1767 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1768 bzero(&to_desc
, sizeof(to_desc
));
1769 to_desc
.cd_nameptr
= delname
;
1770 to_desc
.cd_namelen
= strlen(delname
);
1771 to_desc
.cd_parentcnid
= hfsmp
->hfs_private_metadata_dir
;
1772 to_desc
.cd_flags
= 0;
1773 to_desc
.cd_cnid
= cp
->c_fileid
;
1775 (void) cat_rename(hfsmp
, &from_desc
, &hfsmp
->hfs_privdir_desc
,
1776 &to_desc
, (struct cat_desc
*)NULL
);
1777 cp
->c_flag
|= C_DELETED
;
1780 /* Unlock the Catalog */
1781 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1783 /* All done with component name... */
1784 if ((ap
->a_cnp
->cn_flags
& (HASBUF
| SAVENAME
)) == (HASBUF
| SAVENAME
))
1785 FREE_ZONE(ap
->a_cnp
->cn_pnbuf
, ap
->a_cnp
->cn_pnlen
, M_NAMEI
);
1790 cp
->c_flag
|= C_CHANGE
;
1792 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1794 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
1796 } else if (dataforkbusy
|| rsrcforkbusy
) {
1798 struct cat_desc to_desc
;
1799 struct cat_desc todir_desc
;
1802 * Orphan this file (move to hidden directory).
1804 bzero(&todir_desc
, sizeof(todir_desc
));
1805 todir_desc
.cd_parentcnid
= 2;
1807 MAKE_DELETED_NAME(delname
, cp
->c_fileid
);
1808 bzero(&to_desc
, sizeof(to_desc
));
1809 to_desc
.cd_nameptr
= delname
;
1810 to_desc
.cd_namelen
= strlen(delname
);
1811 to_desc
.cd_parentcnid
= hfsmp
->hfs_private_metadata_dir
;
1812 to_desc
.cd_flags
= 0;
1813 to_desc
.cd_cnid
= cp
->c_cnid
;
1815 /* Lock catalog b-tree */
1816 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1820 error
= cat_rename(hfsmp
, &cp
->c_desc
, &todir_desc
,
1821 &to_desc
, (struct cat_desc
*)NULL
);
1823 // XXXdbg - only bump this count if we were successful
1825 hfsmp
->hfs_privdir_attr
.ca_entries
++;
1827 (void)cat_update(hfsmp
, &hfsmp
->hfs_privdir_desc
,
1828 &hfsmp
->hfs_privdir_attr
, NULL
, NULL
);
1830 /* Unlock the Catalog */
1831 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1832 if (error
) goto out
;
1834 cp
->c_flag
|= C_CHANGE
| C_DELETED
| C_NOEXISTS
;
1837 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1839 } else /* Not busy */ {
1841 if (cp
->c_blocks
> 0) {
1842 printf("hfs_remove: attempting to delete a non-empty file!");
1847 /* Lock catalog b-tree */
1848 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
1852 error
= cat_delete(hfsmp
, &cp
->c_desc
, &cp
->c_attr
);
1854 if (error
&& error
!= ENXIO
&& error
!= ENOENT
&& truncated
) {
1855 if ((cp
->c_datafork
&& cp
->c_datafork
->ff_data
.cf_size
!= 0) ||
1856 (cp
->c_rsrcfork
&& cp
->c_rsrcfork
->ff_data
.cf_size
!= 0)) {
1857 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1858 error
, cp
->c_datafork
->ff_data
.cf_size
, cp
->c_rsrcfork
->ff_data
.cf_size
);
1860 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1861 cp
->c_desc
.cd_nameptr
, cp
->c_attr
.ca_fileid
, error
);
1865 /* Unlock the Catalog */
1866 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
1867 if (error
) goto out
;
1870 (void)hfs_chkiq(cp
, -1, NOCRED
, 0);
1874 cp
->c_flag
|= C_CHANGE
| C_NOEXISTS
;
1876 hfs_volupdate(hfsmp
, VOL_RMFILE
, (dcp
->c_cnid
== kHFSRootFolderID
));
1880 * All done with this cnode's descriptor...
1882 * Note: all future catalog calls for this cnode must be
1883 * by fileid only. This is OK for HFS (which doesn't have
1884 * file thread records) since HFS doesn't support hard
1885 * links or the removal of busy files.
1887 cat_releasedesc(&cp
->c_desc
);
1889 /* In all three cases the parent lost a child */
1890 if (dcp
->c_entries
> 0)
1892 if (dcp
->c_nlink
> 0)
1894 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
1896 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
1900 journal_end_transaction(hfsmp
->jnl
);
1903 hfs_global_shared_lock_release(hfsmp
);
1908 VOP_UNLOCK(vp
, 0, p
);
1909 // XXXdbg - try to prevent the lost ubc_info panic
1910 if ((cp
->c_flag
& C_HARDLINK
) == 0 || cp
->c_nlink
== 0) {
1911 (void) ubc_uncache(vp
);
1922 /* Commit the truncation to the catalog record */
1924 cp
->c_flag
|= C_CHANGE
| C_UPDATE
;
1926 (void) VOP_UPDATE(vp
, &tv
, &tv
, 0);
1933 journal_end_transaction(hfsmp
->jnl
);
1936 hfs_global_shared_lock_release(hfsmp
);
1943 __private_extern__
void
1944 replace_desc(struct cnode
*cp
, struct cat_desc
*cdp
)
1946 /* First release allocated name buffer */
1947 if (cp
->c_desc
.cd_flags
& CD_HASBUF
&& cp
->c_desc
.cd_nameptr
!= 0) {
1948 char *name
= cp
->c_desc
.cd_nameptr
;
1950 cp
->c_desc
.cd_nameptr
= 0;
1951 cp
->c_desc
.cd_namelen
= 0;
1952 cp
->c_desc
.cd_flags
&= ~CD_HASBUF
;
1955 bcopy(cdp
, &cp
->c_desc
, sizeof(cp
->c_desc
));
1957 /* Cnode now owns the name buffer */
1958 cdp
->cd_nameptr
= 0;
1959 cdp
->cd_namelen
= 0;
1960 cdp
->cd_flags
&= ~CD_HASBUF
;
1966 #% rename fdvp U U U
1968 #% rename tdvp L U U
1972 IN WILLRELE struct vnode *fdvp;
1973 IN WILLRELE struct vnode *fvp;
1974 IN struct componentname *fcnp;
1975 IN WILLRELE struct vnode *tdvp;
1976 IN WILLRELE struct vnode *tvp;
1977 IN struct componentname *tcnp;
1983 * The VFS layer guarantees that source and destination will
1984 * either both be directories, or both not be directories.
1986 * When the target is a directory, hfs_rename must ensure
1989 * The rename system call is responsible for freeing
1990 * the pathname buffers (ie no need to call VOP_ABORTOP).
1995 struct vop_rename_args
/* {
1996 struct vnode *a_fdvp;
1997 struct vnode *a_fvp;
1998 struct componentname *a_fcnp;
1999 struct vnode *a_tdvp;
2000 struct vnode *a_tvp;
2001 struct componentname *a_tcnp;
2004 struct vnode
*tvp
= ap
->a_tvp
;
2005 struct vnode
*tdvp
= ap
->a_tdvp
;
2006 struct vnode
*fvp
= ap
->a_fvp
;
2007 struct vnode
*fdvp
= ap
->a_fdvp
;
2008 struct componentname
*tcnp
= ap
->a_tcnp
;
2009 struct componentname
*fcnp
= ap
->a_fcnp
;
2010 struct proc
*p
= fcnp
->cn_proc
;
2011 struct cnode
*fcp
= NULL
;
2012 struct cnode
*fdcp
= NULL
;
2013 struct cnode
*tdcp
= VTOC(tdvp
);
2014 struct cat_desc from_desc
;
2015 struct cat_desc to_desc
;
2016 struct cat_desc out_desc
;
2017 struct hfsmount
*hfsmp
;
2019 int fdvp_locked
, fvp_locked
, tdvp_locked
;
2021 int started_tr
= 0, grabbed_lock
= 0;
2024 hfsmp
= VTOHFS(tdvp
);
2026 /* Establish our vnode lock state. */
2033 * When fvp matches tvp they must be case variants
2036 * For the hardlink case there can be an extra ref on fvp.
2039 if (VOP_ISLOCKED(fvp
) &&
2040 (VTOC(fvp
)->c_lock
.lk_lockholder
== p
->p_pid
) &&
2041 (VTOC(fvp
)->c_lock
.lk_lockthread
== current_thread())) {
2043 vrele(fvp
); /* drop the extra ref */
2047 * If this a hard link and its not a case
2048 * variant then keep tvp around for removal.
2050 if ((VTOC(fvp
)->c_flag
& C_HARDLINK
) &&
2052 (hfs_namecmp(fcnp
->cn_nameptr
, fcnp
->cn_namelen
,
2053 tcnp
->cn_nameptr
, tcnp
->cn_namelen
) != 0))) {
2059 * Check for cross-device rename.
2061 if ((fvp
->v_mount
!= tdvp
->v_mount
) ||
2062 (tvp
&& (fvp
->v_mount
!= tvp
->v_mount
))) {
2068 * Make sure "from" vnode and its parent are changeable.
2070 if ((VTOC(fvp
)->c_flags
& (IMMUTABLE
| APPEND
)) ||
2071 (VTOC(fdvp
)->c_flags
& APPEND
)) {
2077 * Be sure we are not renaming ".", "..", or an alias of ".".
2079 if ((fvp
->v_type
== VDIR
) &&
2080 (((fcnp
->cn_namelen
== 1) && (fcnp
->cn_nameptr
[0] == '.')) ||
2082 (fcnp
->cn_flags
&ISDOTDOT
))) {
2088 * If the destination parent directory is "sticky", then the
2089 * user must own the parent directory, or the destination of
2090 * the rename, otherwise the destination may not be changed
2091 * (except by root). This implements append-only directories.
2093 * Note that checks for immutable, write access, and a non-empty
2094 * target are done by the call to VOP_REMOVE.
2096 if (tvp
&& (tdcp
->c_mode
& S_ISTXT
) &&
2097 (tcnp
->cn_cred
->cr_uid
!= 0) &&
2098 (tcnp
->cn_cred
->cr_uid
!= tdcp
->c_uid
) &&
2099 (hfs_owner_rights(hfsmp
, VTOC(tvp
)->c_uid
, tcnp
->cn_cred
, p
, false)) ) {
2105 * All done with preflighting.
2107 * We now break the call into two transactions:
2108 * 1 - Remove the destionation (if any) using VOP_REMOVE,
2109 * which in itself is a complete transaction.
2111 * 2 - Rename source to destination.
2113 * Since all the preflighting is done, we assume that a
2114 * rename failure is unlikely once part 1 is complete.
2115 * Breaking rename into two transactions buys us a much
2116 * simpler implementation with respect to the locking
2117 * protocol. There are only 3 vnodes to worry about
2118 * locking in the correct order (instead of 4).
2122 * Part 1 - If the destination exists then it needs to be removed.
2126 * VOP_REMOVE will vput tdvp so we better bump its
2127 * ref count and relockit, always set tvp to NULL
2128 * afterwards to indicate that we're done with it.
2136 error
= vget(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
2145 /* Clear SAVENAME to keep VOP_REMOVE from smashing tcnp. */
2146 tcnp
->cn_flags
&= ~SAVENAME
;
2148 if (tvp
->v_type
== VDIR
)
2149 error
= VOP_RMDIR(tdvp
, tvp
, tcnp
);
2151 error
= VOP_REMOVE(tdvp
, tvp
, tcnp
);
2153 /* Get lock states back in sync. */
2157 tvp
= NULL
; /* all done with tvp */
2161 goto out
; /* couldn't remove destination! */
2164 * All done with tvp.
2166 * For POSIX compliance, if tvp was removed the only
2167 * error we can return from this point on is EIO.
2171 * Part 2 - rename source to destination
2175 * Lock the vnodes before starting a journal transaction.
2179 * fvp is a child and must be locked last.
2182 VOP_UNLOCK(fvp
, 0, p
);
2186 * If fdvp is the parent of tdvp then it needs to be locked first.
2188 if ((VTOC(fdvp
)->c_cnid
== VTOC(tdvp
)->c_parentcnid
)) {
2190 VOP_UNLOCK(tdvp
, 0, p
);
2193 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2196 if ((error
= vn_lock(tdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2200 } else /* Lock tdvp then fdvp */ {
2202 if ((error
= vn_lock(tdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2206 if ((error
= vn_lock(fdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2210 } else if (!tdvp_locked
) {
2212 * fvp is a child and must be locked last.
2215 VOP_UNLOCK(fvp
, 0, p
);
2218 if ((error
= vn_lock(tdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
)))
2223 /* Now its safe to lock fvp */
2225 if (error
= vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
))
2234 * When a file moves out of "Cleanup At Startup"
2235 * we can drop its NODUMP status.
2237 if ((fcp
->c_flags
& UF_NODUMP
) &&
2238 (fvp
->v_type
== VREG
) &&
2240 (fdcp
->c_desc
.cd_nameptr
!= NULL
) &&
2241 (strcmp(fdcp
->c_desc
.cd_nameptr
, "Cleanup At Startup") == 0)) {
2242 fcp
->c_flags
&= ~UF_NODUMP
;
2243 fcp
->c_flag
|= C_CHANGE
;
2245 (void) VOP_UPDATE(fvp
, &tv
, &tv
, 0);
2248 hfs_global_shared_lock_acquire(hfsmp
);
2251 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2259 bzero(&from_desc
, sizeof(from_desc
));
2260 from_desc
.cd_nameptr
= fcnp
->cn_nameptr
;
2261 from_desc
.cd_namelen
= fcnp
->cn_namelen
;
2262 from_desc
.cd_parentcnid
= fdcp
->c_cnid
;
2263 from_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2264 from_desc
.cd_cnid
= fcp
->c_cnid
;
2266 bzero(&to_desc
, sizeof(to_desc
));
2267 to_desc
.cd_nameptr
= tcnp
->cn_nameptr
;
2268 to_desc
.cd_namelen
= tcnp
->cn_namelen
;
2269 to_desc
.cd_parentcnid
= tdcp
->c_cnid
;
2270 to_desc
.cd_flags
= fcp
->c_desc
.cd_flags
& ~(CD_HASBUF
| CD_DECOMPOSED
);
2271 to_desc
.cd_cnid
= fcp
->c_cnid
;
2273 /* Lock catalog b-tree */
2274 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
2278 error
= cat_rename(hfsmp
, &from_desc
, &tdcp
->c_desc
, &to_desc
, &out_desc
);
2280 /* Unlock catalog b-tree */
2281 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2285 /* Update cnode's catalog descriptor */
2286 replace_desc(fcp
, &out_desc
);
2288 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_RMDIR
: VOL_RMFILE
,
2289 (fdcp
->c_cnid
== kHFSRootFolderID
));
2290 hfs_volupdate(hfsmp
, fvp
->v_type
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
2291 (tdcp
->c_cnid
== kHFSRootFolderID
));
2293 VOP_UNLOCK(fvp
, 0, p
);
2296 /* All done with fvp. */
2298 /* Update both parent directories. */
2303 if (fdcp
->c_nlink
> 0)
2305 if (fdcp
->c_entries
> 0)
2307 fdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2308 (void) VOP_UPDATE(fdvp
, &tv
, &tv
, 0);
2310 tdcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
2311 tdcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
2312 (void) VOP_UPDATE(tdvp
, &tv
, &tv
, 0);
2316 journal_end_transaction(hfsmp
->jnl
);
2319 hfs_global_shared_lock_release(hfsmp
);
2323 VOP_UNLOCK(fvp
, 0, p
);
2326 VOP_UNLOCK(fdvp
, 0, p
);
2329 VOP_UNLOCK(tdvp
, 0, p
);
2331 if (tvp
&& (tvp
!= fvp
)) {
2333 VOP_UNLOCK(tvp
, 0, p
);
2341 /* After tvp is removed the only acceptable error is EIO */
2342 if ((error
== ENOSPC
) && tvp_deleted
)
2356 IN WILLRELE struct vnode *dvp;
2357 OUT struct vnode **vpp;
2358 IN struct componentname *cnp;
2359 IN struct vattr *vap;
2361 We are responsible for freeing the namei buffer,
2362 it is done in hfs_makenode()
2367 struct vop_mkdir_args
/* {
2368 struct vnode *a_dvp;
2369 struct vnode **a_vpp;
2370 struct componentname *a_cnp;
2371 struct vattr *a_vap;
2374 struct vattr
*vap
= ap
->a_vap
;
2376 return (hfs_makenode(MAKEIMODE(vap
->va_type
, vap
->va_mode
),
2377 ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
));
2382 * symlink -- make a symbolic link
2383 #% symlink dvp L U U
2384 #% symlink vpp - U -
2386 # XXX - note that the return vnode has already been VRELE'ed
2387 # by the filesystem layer. To use it you must use vget,
2388 # possibly with a further namei.
2391 IN WILLRELE struct vnode *dvp;
2392 OUT WILLRELE struct vnode **vpp;
2393 IN struct componentname *cnp;
2394 IN struct vattr *vap;
2397 We are responsible for freeing the namei buffer,
2398 it is done in hfs_makenode().
2404 struct vop_symlink_args
/* {
2405 struct vnode *a_dvp;
2406 struct vnode **a_vpp;
2407 struct componentname *a_cnp;
2408 struct vattr *a_vap;
2412 register struct vnode
*vp
, **vpp
= ap
->a_vpp
;
2413 struct hfsmount
*hfsmp
;
2414 struct filefork
*fp
;
2416 struct buf
*bp
= NULL
;
2418 /* HFS standard disks don't support symbolic links */
2419 if (VTOVCB(ap
->a_dvp
)->vcbSigWord
!= kHFSPlusSigWord
) {
2420 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2422 return (EOPNOTSUPP
);
2425 /* Check for empty target name */
2426 if (ap
->a_target
[0] == 0) {
2427 VOP_ABORTOP(ap
->a_dvp
, ap
->a_cnp
);
2433 hfsmp
= VTOHFS(ap
->a_dvp
);
2435 /* Create the vnode */
2436 if ((error
= hfs_makenode(S_IFLNK
| ap
->a_vap
->va_mode
,
2437 ap
->a_dvp
, vpp
, ap
->a_cnp
))) {
2442 len
= strlen(ap
->a_target
);
2444 fp
->ff_clumpsize
= VTOVCB(vp
)->blockSize
;
2447 (void)hfs_getinoquota(VTOC(vp
));
2451 hfs_global_shared_lock_acquire(hfsmp
);
2453 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
2454 hfs_global_shared_lock_release(hfsmp
);
2460 /* Allocate space for the link */
2461 error
= VOP_TRUNCATE(vp
, len
, IO_NOZEROFILL
,
2462 ap
->a_cnp
->cn_cred
, ap
->a_cnp
->cn_proc
);
2464 goto out
; /* XXX need to remove link */
2466 /* Write the link to disk */
2467 bp
= getblk(vp
, 0, roundup((int)fp
->ff_size
, VTOHFS(vp
)->hfs_phys_block_size
),
2470 journal_modify_block_start(hfsmp
->jnl
, bp
);
2472 bzero(bp
->b_data
, bp
->b_bufsize
);
2473 bcopy(ap
->a_target
, bp
->b_data
, len
);
2475 journal_modify_block_end(hfsmp
->jnl
, bp
);
2481 journal_end_transaction(hfsmp
->jnl
);
2483 hfs_global_shared_lock_release(hfsmp
);
2490 * Dummy dirents to simulate the "." and ".." entries of the directory
2491 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2492 * the size of these entries is the smallest needed to represent them
2493 * (only 12 byte each).
2495 static hfsdotentry rootdots
[2] = {
2498 sizeof(struct hfsdotentry
), /* d_reclen */
2499 DT_DIR
, /* d_type */
2505 sizeof(struct hfsdotentry
), /* d_reclen */
2506 DT_DIR
, /* d_type */
2513 * There is some confusion as to what the semantics of uio_offset are.
2514 * In ufs, it represents the actual byte offset within the directory
2515 * "file." HFS, however, just uses it as an entry counter - essentially
2516 * assuming that it has no meaning except to the hfs_readdir function.
2517 * This approach would be more efficient here, but some callers may
2518 * assume the uio_offset acts like a byte offset. NFS in fact
2519 * monkeys around with the offset field a lot between readdir calls.
2521 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2522 * fields is a mess as well. The libc function readdir() returns
2523 * NULL (indicating the end of a directory) when either
2524 * the getdirentries() syscall (which calls this and returns
2525 * the size of the buffer passed in less the value of uiop->uio_resid)
2526 * returns 0, or a direct record with a d_reclen of zero.
2527 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2528 * of the directory by testing uiop->uio_resid == 0. The solution
2529 * is to pad the size of the last struct direct in a given
2530 * block to fill the block if we are not at the end of the directory.
2535 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2536 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2537 * "Nbytes must be greater than or equal to the block size associated with the file,
2538 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2539 * room for the . and .. entries that have to added manually.
2546 IN struct vnode *vp;
2547 INOUT struct uio *uio;
2548 IN struct ucred *cred;
2551 INOUT u_long **cookies;
2555 struct vop_readdir_args
/* {
2564 register struct uio
*uio
= ap
->a_uio
;
2565 struct cnode
*cp
= VTOC(ap
->a_vp
);
2566 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
2567 struct proc
*p
= current_proc();
2568 off_t off
= uio
->uio_offset
;
2571 void *user_start
= NULL
;
2574 /* We assume it's all one big buffer... */
2575 if (uio
->uio_iovcnt
> 1 || uio
->uio_resid
< AVERAGE_HFSDIRENTRY_SIZE
)
2579 // We have to lock the user's buffer here so that we won't
2580 // fault on it after we've acquired a shared lock on the
2581 // catalog file. The issue is that you can get a 3-way
2582 // deadlock if someone else starts a transaction and then
2583 // tries to lock the catalog file but can't because we're
2584 // here and we can't service our page fault because VM is
2585 // blocked trying to start a transaction as a result of
2586 // trying to free up pages for our page fault. It's messy
2587 // but it does happen on dual-procesors that are paging
2588 // heavily (see radar 3082639 for more info). By locking
2589 // the buffer up-front we prevent ourselves from faulting
2590 // while holding the shared catalog file lock.
2592 // Fortunately this and hfs_search() are the only two places
2593 // currently (10/30/02) that can fault on user data with a
2594 // shared lock on the catalog file.
2596 if (hfsmp
->jnl
&& uio
->uio_segflg
== UIO_USERSPACE
) {
2597 user_start
= uio
->uio_iov
->iov_base
;
2598 user_len
= uio
->uio_iov
->iov_len
;
2600 if ((retval
= vslock(user_start
, user_len
)) != 0) {
2606 /* Create the entries for . and .. */
2607 if (uio
->uio_offset
< sizeof(rootdots
)) {
2611 rootdots
[0].d_fileno
= cp
->c_cnid
;
2612 rootdots
[1].d_fileno
= cp
->c_parentcnid
;
2614 if (uio
->uio_offset
== 0) {
2615 dep
= (caddr_t
) &rootdots
[0];
2616 dotsize
= 2* sizeof(struct hfsdotentry
);
2617 } else if (uio
->uio_offset
== sizeof(struct hfsdotentry
)) {
2618 dep
= (caddr_t
) &rootdots
[1];
2619 dotsize
= sizeof(struct hfsdotentry
);
2625 retval
= uiomove(dep
, dotsize
, uio
);
2630 /* If there are no children then we're done */
2631 if (cp
->c_entries
== 0) {
2637 /* Lock catalog b-tree */
2638 retval
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
2639 if (retval
) goto Exit
;
2641 retval
= cat_getdirentries(hfsmp
, &cp
->c_desc
, uio
, &eofflag
);
2643 /* Unlock catalog b-tree */
2644 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
2646 if (retval
!= E_NONE
) {
2650 /* were we already past eof ? */
2651 if (uio
->uio_offset
== off
) {
2656 cp
->c_flag
|= C_ACCESS
;
2657 /* Bake any cookies */
2658 if (!retval
&& ap
->a_ncookies
!= NULL
) {
2659 struct dirent
* dpStart
;
2660 struct dirent
* dpEnd
;
2667 * Only the NFS server uses cookies, and it loads the
2668 * directory block into system space, so we can just look at
2671 if (uio
->uio_segflg
!= UIO_SYSSPACE
)
2672 panic("hfs_readdir: unexpected uio from NFS server");
2673 dpStart
= (struct dirent
*)(uio
->uio_iov
->iov_base
- (uio
->uio_offset
- off
));
2674 dpEnd
= (struct dirent
*) uio
->uio_iov
->iov_base
;
2675 for (dp
= dpStart
, ncookies
= 0;
2676 dp
< dpEnd
&& dp
->d_reclen
!= 0;
2677 dp
= (struct dirent
*)((caddr_t
)dp
+ dp
->d_reclen
))
2679 MALLOC(cookies
, u_long
*, ncookies
* sizeof(u_long
), M_TEMP
, M_WAITOK
);
2680 for (dp
= dpStart
, cookiep
= cookies
;
2682 dp
= (struct dirent
*)((caddr_t
) dp
+ dp
->d_reclen
)) {
2683 off
+= dp
->d_reclen
;
2684 *cookiep
++ = (u_long
) off
;
2686 *ap
->a_ncookies
= ncookies
;
2687 *ap
->a_cookies
= cookies
;
2691 if (hfsmp
->jnl
&& user_start
) {
2692 vsunlock(user_start
, user_len
, TRUE
);
2696 *ap
->a_eofflag
= eofflag
;
2703 * Return target name of a symbolic link
2704 #% readlink vp L L L
2707 IN struct vnode *vp;
2708 INOUT struct uio *uio;
2709 IN struct ucred *cred;
2714 struct vop_readlink_args
/* {
2717 struct ucred *a_cred;
2721 struct vnode
*vp
= ap
->a_vp
;
2723 struct filefork
*fp
;
2725 if (vp
->v_type
!= VLNK
)
2731 /* Zero length sym links are not allowed */
2732 if (fp
->ff_size
== 0 || fp
->ff_size
> MAXPATHLEN
) {
2733 VTOVCB(vp
)->vcbFlags
|= kHFS_DamagedVolume
;
2737 /* Cache the path so we don't waste buffer cache resources */
2738 if (fp
->ff_symlinkptr
== NULL
) {
2739 struct buf
*bp
= NULL
;
2741 MALLOC(fp
->ff_symlinkptr
, char *, fp
->ff_size
, M_TEMP
, M_WAITOK
);
2742 retval
= meta_bread(vp
, 0,
2743 roundup((int)fp
->ff_size
,
2744 VTOHFS(vp
)->hfs_phys_block_size
),
2749 if (fp
->ff_symlinkptr
) {
2750 FREE(fp
->ff_symlinkptr
, M_TEMP
);
2751 fp
->ff_symlinkptr
= NULL
;
2755 bcopy(bp
->b_data
, fp
->ff_symlinkptr
, (size_t)fp
->ff_size
);
2757 if (VTOHFS(vp
)->jnl
&& (bp
->b_flags
& B_LOCKED
) == 0) {
2758 bp
->b_flags
|= B_INVAL
; /* data no longer needed */
2763 retval
= uiomove((caddr_t
)fp
->ff_symlinkptr
, (int)fp
->ff_size
, ap
->a_uio
);
2770 * hfs abort op, called after namei() when a CREATE/DELETE isn't actually
2771 * done. If a buffer has been saved in anticipation of a CREATE, delete it.
2772 #% abortop dvp = = =
2775 IN struct vnode *dvp;
2776 IN struct componentname *cnp;
2784 struct vop_abortop_args
/* {
2785 struct vnode *a_dvp;
2786 struct componentname *a_cnp;
2789 if ((ap
->a_cnp
->cn_flags
& (HASBUF
| SAVESTART
)) == HASBUF
) {
2790 FREE_ZONE(ap
->a_cnp
->cn_pnbuf
, ap
->a_cnp
->cn_pnlen
, M_NAMEI
);
2791 ap
->a_cnp
->cn_flags
&= ~HASBUF
;
2799 * Lock an cnode. If its already locked, set the WANT bit and sleep.
2803 IN struct vnode *vp;
2810 struct vop_lock_args
/* {
2816 struct vnode
*vp
= ap
->a_vp
;
2817 struct cnode
*cp
= VTOC(vp
);
2820 panic("hfs_lock: cnode in vnode is null\n");
2822 return (lockmgr(&cp
->c_lock
, ap
->a_flags
, &vp
->v_interlock
, ap
->a_p
));
2830 IN struct vnode *vp;
2837 struct vop_unlock_args
/* {
2843 struct vnode
*vp
= ap
->a_vp
;
2844 struct cnode
*cp
= VTOC(vp
);
2847 panic("hfs_unlock: cnode in vnode is null\n");
2849 return (lockmgr(&cp
->c_lock
, ap
->a_flags
| LK_RELEASE
,
2850 &vp
->v_interlock
, ap
->a_p
));
2855 * Print out the contents of a cnode.
2859 IN struct vnode *vp;
2863 struct vop_print_args
/* {
2867 struct vnode
* vp
= ap
->a_vp
;
2868 struct cnode
*cp
= VTOC(vp
);
2870 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp
->c_cnid
,
2871 major(cp
->c_dev
), minor(cp
->c_dev
));
2873 if (vp
->v_type
== VFIFO
)
2876 lockmgr_printinfo(&cp
->c_lock
);
2883 * Check for a locked cnode.
2884 #% islocked vp = = =
2887 IN struct vnode *vp;
2892 struct vop_islocked_args
/* {
2896 return (lockstatus(&VTOC(ap
->a_vp
)->c_lock
));
2901 #% pathconf vp L L L
2904 IN struct vnode *vp;
2906 OUT register_t *retval;
2911 struct vop_pathconf_args
/* {
2919 switch (ap
->a_name
) {
2921 if (VTOVCB(ap
->a_vp
)->vcbSigWord
== kHFSPlusSigWord
)
2922 *ap
->a_retval
= HFS_LINK_MAX
;
2927 *ap
->a_retval
= kHFSPlusMaxFileNameBytes
; /* max # of characters x max utf8 representation */
2930 *ap
->a_retval
= PATH_MAX
; /* 1024 */
2932 case _PC_CHOWN_RESTRICTED
:
2938 case _PC_NAME_CHARS_MAX
:
2939 *ap
->a_retval
= kHFSPlusMaxFileNameChars
;
2941 case _PC_CASE_SENSITIVE
:
2944 case _PC_CASE_PRESERVING
:
2956 * Advisory record locking support
2960 IN struct vnode *vp;
2963 IN struct flock *fl;
2969 struct vop_advlock_args
/* {
2977 struct vnode
*vp
= ap
->a_vp
;
2978 struct flock
*fl
= ap
->a_fl
;
2979 struct hfslockf
*lock
;
2980 struct filefork
*fork
;
2984 /* Only regular files can have locks */
2985 if (vp
->v_type
!= VREG
)
2988 fork
= VTOF(ap
->a_vp
);
2990 * Avoid the common case of unlocking when cnode has no locks.
2992 if (fork
->ff_lockf
== (struct hfslockf
*)0) {
2993 if (ap
->a_op
!= F_SETLK
) {
2994 fl
->l_type
= F_UNLCK
;
2999 * Convert the flock structure into a start and end.
3002 switch (fl
->l_whence
) {
3006 * Caller is responsible for adding any necessary offset
3007 * when SEEK_CUR is used.
3009 start
= fl
->l_start
;
3012 start
= fork
->ff_size
+ fl
->l_start
;
3023 end
= start
+ fl
->l_len
- 1;
3026 * Create the hfslockf structure
3028 MALLOC(lock
, struct hfslockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
3029 lock
->lf_start
= start
;
3031 lock
->lf_id
= ap
->a_id
;
3032 lock
->lf_fork
= fork
;
3033 lock
->lf_type
= fl
->l_type
;
3034 lock
->lf_next
= (struct hfslockf
*)0;
3035 TAILQ_INIT(&lock
->lf_blkhd
);
3036 lock
->lf_flags
= ap
->a_flags
;
3038 * Do the requested operation.
3042 retval
= hfs_setlock(lock
);
3045 retval
= hfs_clearlock(lock
);
3046 FREE(lock
, M_LOCKF
);
3049 retval
= hfs_getlock(lock
, fl
);
3050 FREE(lock
, M_LOCKF
);
3054 _FREE(lock
, M_LOCKF
);
3064 * Update the access, modified, and node change times as specified
3065 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3066 * C_MODIFIED flag is used to specify that the node needs to be
3067 * updated but that the times have already been set. The access and
3068 * modified times are input parameters but the node change time is
3069 * always taken from the current time. If waitfor is set, then wait
3070 * for the disk write of the node to complete.
3074 IN struct vnode *vp;
3075 IN struct timeval *access;
3076 IN struct timeval *modify;
3081 struct vop_update_args
/* {
3083 struct timeval *a_access;
3084 struct timeval *a_modify;
3088 struct vnode
*vp
= ap
->a_vp
;
3089 struct cnode
*cp
= VTOC(ap
->a_vp
);
3091 struct cat_fork
*dataforkp
= NULL
;
3092 struct cat_fork
*rsrcforkp
= NULL
;
3093 struct cat_fork datafork
;
3095 struct hfsmount
*hfsmp
;
3100 /* XXX do we really want to clear the sytem cnode flags here???? */
3101 if ((vp
->v_flag
& VSYSTEM
) ||
3102 (VTOVFS(vp
)->mnt_flag
& MNT_RDONLY
) ||
3103 (cp
->c_mode
== 0)) {
3104 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
3108 updateflag
= cp
->c_flag
& (C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
);
3110 /* Nothing to update. */
3111 if (updateflag
== 0) {
3114 /* HFS standard doesn't have access times. */
3115 if ((updateflag
== C_ACCESS
) && (VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
)) {
3118 if (updateflag
& C_ACCESS
) {
3120 * If only the access time is changing then defer
3121 * updating it on-disk util later (in hfs_inactive).
3122 * If it was recently updated then skip the update.
3124 if (updateflag
== C_ACCESS
) {
3125 cp
->c_flag
&= ~C_ACCESS
;
3127 /* Its going to disk or its sufficiently newer... */
3128 if ((cp
->c_flag
& C_ATIMEMOD
) ||
3129 (ap
->a_access
->tv_sec
> (cp
->c_atime
+ ATIME_ACCURACY
))) {
3130 cp
->c_atime
= ap
->a_access
->tv_sec
;
3131 cp
->c_flag
|= C_ATIMEMOD
;
3135 cp
->c_atime
= ap
->a_access
->tv_sec
;
3138 if (updateflag
& C_UPDATE
) {
3139 cp
->c_mtime
= ap
->a_modify
->tv_sec
;
3140 cp
->c_mtime_nsec
= ap
->a_modify
->tv_usec
* 1000;
3142 if (updateflag
& C_CHANGE
) {
3143 cp
->c_ctime
= time
.tv_sec
;
3145 * HFS dates that WE set must be adjusted for DST
3147 if ((VTOVCB(vp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3148 cp
->c_ctime
+= 3600;
3149 cp
->c_mtime
= cp
->c_ctime
;
3154 dataforkp
= &cp
->c_datafork
->ff_data
;
3156 rsrcforkp
= &cp
->c_rsrcfork
->ff_data
;
3161 * For delayed allocations updates are
3162 * postponed until an fsync or the file
3163 * gets written to disk.
3165 * Deleted files can defer meta data updates until inactive.
3167 if (ISSET(cp
->c_flag
, C_DELETED
) ||
3168 (dataforkp
&& cp
->c_datafork
->ff_unallocblocks
) ||
3169 (rsrcforkp
&& cp
->c_rsrcfork
->ff_unallocblocks
)) {
3170 if (updateflag
& (C_CHANGE
| C_UPDATE
))
3171 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3172 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_UPDATE
);
3173 cp
->c_flag
|= C_MODIFIED
;
3180 hfs_global_shared_lock_acquire(hfsmp
);
3182 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3183 hfs_global_shared_lock_release(hfsmp
);
3190 * For files with invalid ranges (holes) the on-disk
3191 * field representing the size of the file (cf_size)
3192 * must be no larger than the start of the first hole.
3194 if (dataforkp
&& !CIRCLEQ_EMPTY(&cp
->c_datafork
->ff_invalidranges
)) {
3195 bcopy(dataforkp
, &datafork
, sizeof(datafork
));
3196 datafork
.cf_size
= CIRCLEQ_FIRST(&cp
->c_datafork
->ff_invalidranges
)->rl_start
;
3197 dataforkp
= &datafork
;
3201 * Lock the Catalog b-tree file.
3202 * A shared lock is sufficient since an update doesn't change
3203 * the tree and the lock on vp protects the cnode.
3205 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3208 journal_end_transaction(hfsmp
->jnl
);
3210 hfs_global_shared_lock_release(hfsmp
);
3214 /* XXX - waitfor is not enforced */
3215 error
= cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, dataforkp
, rsrcforkp
);
3217 /* Unlock the Catalog b-tree file. */
3218 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3220 if (updateflag
& (C_CHANGE
| C_UPDATE
))
3221 hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3225 journal_end_transaction(hfsmp
->jnl
);
3227 hfs_global_shared_lock_release(hfsmp
);
3229 /* After the updates are finished, clear the flags */
3230 cp
->c_flag
&= ~(C_ACCESS
| C_CHANGE
| C_MODIFIED
| C_UPDATE
| C_ATIMEMOD
);
3236 * Allocate a new node
3238 * Upon leaving, namei buffer must be freed.
3242 hfs_makenode(mode
, dvp
, vpp
, cnp
)
3246 struct componentname
*cnp
;
3251 struct hfsmount
*hfsmp
;
3254 struct cat_desc in_desc
, out_desc
;
3255 struct cat_attr attr
;
3256 int error
, started_tr
= 0, grabbed_lock
= 0;
3257 enum vtype vnodetype
;
3261 hfsmp
= VTOHFS(dvp
);
3264 bzero(&out_desc
, sizeof(out_desc
));
3266 if ((mode
& S_IFMT
) == 0)
3268 vnodetype
= IFTOVT(mode
);
3270 /* Check if unmount in progress */
3271 if (VTOVFS(dvp
)->mnt_kern_flag
& MNTK_UNMOUNT
) {
3275 /* Check if were out of usable disk space. */
3276 if ((suser(cnp
->cn_cred
, NULL
) != 0) && (hfs_freeblks(hfsmp
, 1) <= 0)) {
3281 /* Setup the default attributes */
3282 bzero(&attr
, sizeof(attr
));
3283 attr
.ca_mode
= mode
;
3284 attr
.ca_nlink
= vnodetype
== VDIR
? 2 : 1;
3285 attr
.ca_mtime
= time
.tv_sec
;
3286 attr
.ca_mtime_nsec
= time
.tv_usec
* 1000;
3287 if ((VTOVCB(dvp
)->vcbSigWord
== kHFSSigWord
) && gTimeZone
.tz_dsttime
) {
3288 attr
.ca_mtime
+= 3600; /* Same as what hfs_update does */
3290 attr
.ca_atime
= attr
.ca_ctime
= attr
.ca_itime
= attr
.ca_mtime
;
3291 if (VTOVFS(dvp
)->mnt_flag
& MNT_UNKNOWNPERMISSIONS
) {
3292 attr
.ca_uid
= hfsmp
->hfs_uid
;
3293 attr
.ca_gid
= hfsmp
->hfs_gid
;
3295 if (vnodetype
== VLNK
)
3296 attr
.ca_uid
= dcp
->c_uid
;
3298 attr
.ca_uid
= cnp
->cn_cred
->cr_uid
;
3299 attr
.ca_gid
= dcp
->c_gid
;
3302 * Don't tag as a special file (BLK or CHR) until *after*
3303 * hfs_getnewvnode is called. This insures that any
3304 * alias checking is defered until hfs_mknod completes.
3306 if (vnodetype
== VBLK
|| vnodetype
== VCHR
)
3307 attr
.ca_mode
= (attr
.ca_mode
& ~S_IFMT
) | S_IFREG
;
3309 /* Tag symlinks with a type and creator. */
3310 if (vnodetype
== VLNK
) {
3311 struct FndrFileInfo
*fip
;
3313 fip
= (struct FndrFileInfo
*)&attr
.ca_finderinfo
;
3314 fip
->fdType
= SWAP_BE32(kSymLinkFileType
);
3315 fip
->fdCreator
= SWAP_BE32(kSymLinkCreator
);
3317 if ((attr
.ca_mode
& S_ISGID
) &&
3318 !groupmember(dcp
->c_gid
, cnp
->cn_cred
) &&
3319 suser(cnp
->cn_cred
, NULL
)) {
3320 attr
.ca_mode
&= ~S_ISGID
;
3322 if (cnp
->cn_flags
& ISWHITEOUT
)
3323 attr
.ca_flags
|= UF_OPAQUE
;
3325 /* Setup the descriptor */
3326 bzero(&in_desc
, sizeof(in_desc
));
3327 in_desc
.cd_nameptr
= cnp
->cn_nameptr
;
3328 in_desc
.cd_namelen
= cnp
->cn_namelen
;
3329 in_desc
.cd_parentcnid
= dcp
->c_cnid
;
3330 in_desc
.cd_flags
= S_ISDIR(mode
) ? CD_ISDIR
: 0;
3333 hfs_global_shared_lock_acquire(hfsmp
);
3336 if ((error
= journal_start_transaction(hfsmp
->jnl
)) != 0) {
3342 /* Lock catalog b-tree */
3343 error
= hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_EXCLUSIVE
, p
);
3347 error
= cat_create(hfsmp
, &in_desc
, &attr
, &out_desc
);
3349 /* Unlock catalog b-tree */
3350 (void) hfs_metafilelocking(VTOHFS(dvp
), kHFSCatalogFileID
, LK_RELEASE
, p
);
3354 /* Update the parent directory */
3355 dcp
->c_childhint
= out_desc
.cd_hint
; /* Cache directory's location */
3358 dcp
->c_flag
|= C_CHANGE
| C_UPDATE
;
3360 (void) VOP_UPDATE(dvp
, &tv
, &tv
, 0);
3362 hfs_volupdate(hfsmp
, vnodetype
== VDIR
? VOL_MKDIR
: VOL_MKFILE
,
3363 (dcp
->c_cnid
== kHFSRootFolderID
));
3366 // have to end the transaction here before we call hfs_getnewvnode()
3367 // because that can cause us to try and reclaim a vnode on a different
3368 // file system which could cause us to start a transaction which can
3369 // deadlock with someone on that other file system (since we could be
3370 // holding two transaction locks as well as various vnodes and we did
3371 // not obtain the locks on them in the proper order).
3373 // NOTE: this means that if the quota check fails or we have to update
3374 // the change time on a block-special device that those changes
3375 // will happen as part of independent transactions.
3378 journal_end_transaction(hfsmp
->jnl
);
3382 hfs_global_shared_lock_release(hfsmp
);
3386 /* Create a vnode for the object just created: */
3387 error
= hfs_getnewvnode(hfsmp
, NULL
, &out_desc
, 0, &attr
, NULL
, &tvp
);
3395 * We call hfs_chkiq with FORCE flag so that if we
3396 * fall through to the rmdir we actually have
3397 * accounted for the inode
3399 if ((error
= hfs_getinoquota(cp
)) ||
3400 (error
= hfs_chkiq(cp
, 1, cnp
->cn_cred
, FORCE
))) {
3401 if ((cnp
->cn_flags
& (HASBUF
| SAVESTART
)) == HASBUF
) {
3402 FREE_ZONE(cnp
->cn_pnbuf
, cnp
->cn_pnlen
, M_NAMEI
);
3403 cnp
->cn_flags
&= ~HASBUF
;
3405 if (tvp
->v_type
== VDIR
)
3406 VOP_RMDIR(dvp
,tvp
, cnp
);
3408 VOP_REMOVE(dvp
,tvp
, cnp
);
3415 * restore vtype and mode for VBLK and VCHR
3417 if (vnodetype
== VBLK
|| vnodetype
== VCHR
) {
3422 tvp
->v_type
= IFTOVT(mode
);
3423 cp
->c_flag
|= C_CHANGE
;
3425 if ((error
= VOP_UPDATE(tvp
, &tv
, &tv
, 1))) {
3433 cat_releasedesc(&out_desc
);
3435 if ((cnp
->cn_flags
& (HASBUF
| SAVESTART
)) == HASBUF
)
3436 FREE_ZONE(cnp
->cn_pnbuf
, cnp
->cn_pnlen
, M_NAMEI
);
3439 * Check if a file is located in the "Cleanup At Startup"
3440 * directory. If it is then tag it as NODUMP so that we
3441 * can be lazy about zero filling data holes.
3443 if ((error
== 0) && (vnodetype
== VREG
) &&
3444 (dcp
->c_desc
.cd_nameptr
!= NULL
) &&
3445 (strcmp(dcp
->c_desc
.cd_nameptr
, "Cleanup At Startup") == 0)) {
3449 parid
= dcp
->c_parentcnid
;
3454 * The parent of "Cleanup At Startup" should
3455 * have the ASCII name of the userid.
3457 if (VFS_VGET(HFSTOVFS(hfsmp
), &parid
, &ddvp
) == 0) {
3458 if (VTOC(ddvp
)->c_desc
.cd_nameptr
&&
3459 (cp
->c_uid
== strtoul(VTOC(ddvp
)->c_desc
.cd_nameptr
, 0, 0))) {
3460 cp
->c_flags
|= UF_NODUMP
;
3461 cp
->c_flag
|= C_CHANGE
;
3472 journal_end_transaction(hfsmp
->jnl
);
3476 hfs_global_shared_lock_release(hfsmp
);
3485 hfs_vgetrsrc(struct hfsmount
*hfsmp
, struct vnode
*vp
, struct vnode
**rvpp
, struct proc
*p
)
3488 struct cnode
*cp
= VTOC(vp
);
3491 if ((rvp
= cp
->c_rsrc_vp
)) {
3492 /* Use exising vnode */
3493 error
= vget(rvp
, 0, p
);
3495 char * name
= VTOC(vp
)->c_desc
.cd_nameptr
;
3498 printf("hfs_vgetrsrc: couldn't get"
3499 " resource fork for %s\n", name
);
3503 struct cat_fork rsrcfork
;
3505 /* Lock catalog b-tree */
3506 error
= hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_SHARED
, p
);
3510 /* Get resource fork data */
3511 error
= cat_lookup(hfsmp
, &cp
->c_desc
, 1, (struct cat_desc
*)0,
3512 (struct cat_attr
*)0, &rsrcfork
);
3514 /* Unlock the Catalog */
3515 (void) hfs_metafilelocking(hfsmp
, kHFSCatalogFileID
, LK_RELEASE
, p
);
3519 error
= hfs_getnewvnode(hfsmp
, cp
, &cp
->c_desc
, 1, &cp
->c_attr
,
3531 * Wrapper for special device reads
3535 struct vop_read_args
/* {
3539 struct ucred *a_cred;
3545 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
3546 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_read
), ap
));
3550 * Wrapper for special device writes
3554 struct vop_write_args
/* {
3558 struct ucred *a_cred;
3562 * Set update and change flags.
3564 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
3565 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_write
), ap
));
3569 * Wrapper for special device close
3571 * Update the times on the cnode then do device close.
3575 struct vop_close_args
/* {
3578 struct ucred *a_cred;
3582 struct vnode
*vp
= ap
->a_vp
;
3583 struct cnode
*cp
= VTOC(vp
);
3585 simple_lock(&vp
->v_interlock
);
3586 if (ap
->a_vp
->v_usecount
> 1)
3587 CTIMES(cp
, &time
, &time
);
3588 simple_unlock(&vp
->v_interlock
);
3589 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_close
), ap
));
3594 * Wrapper for fifo reads
3598 struct vop_read_args
/* {
3602 struct ucred *a_cred;
3605 extern int (**fifo_vnodeop_p
)(void *);
3610 VTOC(ap
->a_vp
)->c_flag
|= C_ACCESS
;
3611 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_read
), ap
));
3615 * Wrapper for fifo writes
3619 struct vop_write_args
/* {
3623 struct ucred *a_cred;
3626 extern int (**fifo_vnodeop_p
)(void *);
3629 * Set update and change flags.
3631 VTOC(ap
->a_vp
)->c_flag
|= C_CHANGE
| C_UPDATE
;
3632 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_write
), ap
));
3636 * Wrapper for fifo close
3638 * Update the times on the cnode then do device close.
3642 struct vop_close_args
/* {
3645 struct ucred *a_cred;
3649 extern int (**fifo_vnodeop_p
)(void *);
3650 struct vnode
*vp
= ap
->a_vp
;
3651 struct cnode
*cp
= VTOC(vp
);
3653 simple_lock(&vp
->v_interlock
);
3654 if (ap
->a_vp
->v_usecount
> 1)
3655 CTIMES(cp
, &time
, &time
);
3656 simple_unlock(&vp
->v_interlock
);
3657 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_close
), ap
));
3662 /*****************************************************************************
3666 *****************************************************************************/
3667 int hfs_cache_lookup(); /* in hfs_lookup.c */
3668 int hfs_lookup(); /* in hfs_lookup.c */
3669 int hfs_read(); /* in hfs_readwrite.c */
3670 int hfs_write(); /* in hfs_readwrite.c */
3671 int hfs_ioctl(); /* in hfs_readwrite.c */
3672 int hfs_select(); /* in hfs_readwrite.c */
3673 int hfs_bmap(); /* in hfs_readwrite.c */
3674 int hfs_strategy(); /* in hfs_readwrite.c */
3675 int hfs_truncate(); /* in hfs_readwrite.c */
3676 int hfs_allocate(); /* in hfs_readwrite.c */
3677 int hfs_pagein(); /* in hfs_readwrite.c */
3678 int hfs_pageout(); /* in hfs_readwrite.c */
3679 int hfs_search(); /* in hfs_search.c */
3680 int hfs_bwrite(); /* in hfs_readwrite.c */
3681 int hfs_link(); /* in hfs_link.c */
3682 int hfs_blktooff(); /* in hfs_readwrite.c */
3683 int hfs_offtoblk(); /* in hfs_readwrite.c */
3684 int hfs_cmap(); /* in hfs_readwrite.c */
3685 int hfs_getattrlist(); /* in hfs_attrlist.c */
3686 int hfs_setattrlist(); /* in hfs_attrlist.c */
3687 int hfs_readdirattr(); /* in hfs_attrlist.c */
3688 int hfs_inactive(); /* in hfs_cnode.c */
3689 int hfs_reclaim(); /* in hfs_cnode.c */
3691 int (**hfs_vnodeop_p
)(void *);
3693 #define VOPFUNC int (*)(void *)
3695 struct vnodeopv_entry_desc hfs_vnodeop_entries
[] = {
3696 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
3697 { &vop_lookup_desc
, (VOPFUNC
)hfs_cache_lookup
}, /* lookup */
3698 { &vop_create_desc
, (VOPFUNC
)hfs_create
}, /* create */
3699 { &vop_mknod_desc
, (VOPFUNC
)hfs_mknod
}, /* mknod */
3700 { &vop_open_desc
, (VOPFUNC
)hfs_open
}, /* open */
3701 { &vop_close_desc
, (VOPFUNC
)hfs_close
}, /* close */
3702 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
3703 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
3704 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
3705 { &vop_read_desc
, (VOPFUNC
)hfs_read
}, /* read */
3706 { &vop_write_desc
, (VOPFUNC
)hfs_write
}, /* write */
3707 { &vop_ioctl_desc
, (VOPFUNC
)hfs_ioctl
}, /* ioctl */
3708 { &vop_select_desc
, (VOPFUNC
)hfs_select
}, /* select */
3709 { &vop_exchange_desc
, (VOPFUNC
)hfs_exchange
}, /* exchange */
3710 { &vop_mmap_desc
, (VOPFUNC
)err_mmap
}, /* mmap */
3711 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
3712 { &vop_seek_desc
, (VOPFUNC
)nop_seek
}, /* seek */
3713 { &vop_remove_desc
, (VOPFUNC
)hfs_remove
}, /* remove */
3714 { &vop_link_desc
, (VOPFUNC
)hfs_link
}, /* link */
3715 { &vop_rename_desc
, (VOPFUNC
)hfs_rename
}, /* rename */
3716 { &vop_mkdir_desc
, (VOPFUNC
)hfs_mkdir
}, /* mkdir */
3717 { &vop_rmdir_desc
, (VOPFUNC
)hfs_rmdir
}, /* rmdir */
3718 { &vop_mkcomplex_desc
, (VOPFUNC
)err_mkcomplex
}, /* mkcomplex */
3719 { &vop_getattrlist_desc
, (VOPFUNC
)hfs_getattrlist
}, /* getattrlist */
3720 { &vop_setattrlist_desc
, (VOPFUNC
)hfs_setattrlist
}, /* setattrlist */
3721 { &vop_symlink_desc
, (VOPFUNC
)hfs_symlink
}, /* symlink */
3722 { &vop_readdir_desc
, (VOPFUNC
)hfs_readdir
}, /* readdir */
3723 { &vop_readdirattr_desc
, (VOPFUNC
)hfs_readdirattr
}, /* readdirattr */
3724 { &vop_readlink_desc
, (VOPFUNC
)hfs_readlink
}, /* readlink */
3725 { &vop_abortop_desc
, (VOPFUNC
)hfs_abortop
}, /* abortop */
3726 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
3727 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
3728 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
3729 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
3730 { &vop_bmap_desc
, (VOPFUNC
)hfs_bmap
}, /* bmap */
3731 { &vop_strategy_desc
, (VOPFUNC
)hfs_strategy
}, /* strategy */
3732 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
3733 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
3734 { &vop_pathconf_desc
, (VOPFUNC
)hfs_pathconf
}, /* pathconf */
3735 { &vop_advlock_desc
, (VOPFUNC
)hfs_advlock
}, /* advlock */
3736 { &vop_reallocblks_desc
, (VOPFUNC
)err_reallocblks
}, /* reallocblks */
3737 { &vop_truncate_desc
, (VOPFUNC
)hfs_truncate
}, /* truncate */
3738 { &vop_allocate_desc
, (VOPFUNC
)hfs_allocate
}, /* allocate */
3739 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
3740 { &vop_searchfs_desc
, (VOPFUNC
)hfs_search
}, /* search fs */
3741 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
}, /* bwrite */
3742 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* pagein */
3743 { &vop_pageout_desc
,(VOPFUNC
) hfs_pageout
}, /* pageout */
3744 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
3745 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
3746 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
3747 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
3748 { NULL
, (VOPFUNC
)NULL
}
3751 struct vnodeopv_desc hfs_vnodeop_opv_desc
=
3752 { &hfs_vnodeop_p
, hfs_vnodeop_entries
};
3754 int (**hfs_specop_p
)(void *);
3755 struct vnodeopv_entry_desc hfs_specop_entries
[] = {
3756 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
3757 { &vop_lookup_desc
, (VOPFUNC
)spec_lookup
}, /* lookup */
3758 { &vop_create_desc
, (VOPFUNC
)spec_create
}, /* create */
3759 { &vop_mknod_desc
, (VOPFUNC
)spec_mknod
}, /* mknod */
3760 { &vop_open_desc
, (VOPFUNC
)spec_open
}, /* open */
3761 { &vop_close_desc
, (VOPFUNC
)hfsspec_close
}, /* close */
3762 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
3763 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
3764 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
3765 { &vop_read_desc
, (VOPFUNC
)hfsspec_read
}, /* read */
3766 { &vop_write_desc
, (VOPFUNC
)hfsspec_write
}, /* write */
3767 { &vop_lease_desc
, (VOPFUNC
)spec_lease_check
}, /* lease */
3768 { &vop_ioctl_desc
, (VOPFUNC
)spec_ioctl
}, /* ioctl */
3769 { &vop_select_desc
, (VOPFUNC
)spec_select
}, /* select */
3770 { &vop_revoke_desc
, (VOPFUNC
)spec_revoke
}, /* revoke */
3771 { &vop_mmap_desc
, (VOPFUNC
)spec_mmap
}, /* mmap */
3772 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
3773 { &vop_seek_desc
, (VOPFUNC
)spec_seek
}, /* seek */
3774 { &vop_remove_desc
, (VOPFUNC
)spec_remove
}, /* remove */
3775 { &vop_link_desc
, (VOPFUNC
)spec_link
}, /* link */
3776 { &vop_rename_desc
, (VOPFUNC
)spec_rename
}, /* rename */
3777 { &vop_mkdir_desc
, (VOPFUNC
)spec_mkdir
}, /* mkdir */
3778 { &vop_rmdir_desc
, (VOPFUNC
)spec_rmdir
}, /* rmdir */
3779 { &vop_symlink_desc
, (VOPFUNC
)spec_symlink
}, /* symlink */
3780 { &vop_readdir_desc
, (VOPFUNC
)spec_readdir
}, /* readdir */
3781 { &vop_readlink_desc
, (VOPFUNC
)spec_readlink
}, /* readlink */
3782 { &vop_abortop_desc
, (VOPFUNC
)spec_abortop
}, /* abortop */
3783 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
3784 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
3785 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
3786 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
3787 { &vop_bmap_desc
, (VOPFUNC
)spec_bmap
}, /* bmap */
3788 { &vop_strategy_desc
, (VOPFUNC
)spec_strategy
}, /* strategy */
3789 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
3790 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
3791 { &vop_pathconf_desc
, (VOPFUNC
)spec_pathconf
}, /* pathconf */
3792 { &vop_advlock_desc
, (VOPFUNC
)spec_advlock
}, /* advlock */
3793 { &vop_blkatoff_desc
, (VOPFUNC
)spec_blkatoff
}, /* blkatoff */
3794 { &vop_valloc_desc
, (VOPFUNC
)spec_valloc
}, /* valloc */
3795 { &vop_reallocblks_desc
, (VOPFUNC
)spec_reallocblks
}, /* reallocblks */
3796 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
3797 { &vop_truncate_desc
, (VOPFUNC
)spec_truncate
}, /* truncate */
3798 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
3799 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
3800 { &vop_devblocksize_desc
, (VOPFUNC
)spec_devblocksize
}, /* devblocksize */
3801 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
3802 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
3803 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
3804 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
3805 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
3806 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
3808 struct vnodeopv_desc hfs_specop_opv_desc
=
3809 { &hfs_specop_p
, hfs_specop_entries
};
3812 int (**hfs_fifoop_p
)(void *);
3813 struct vnodeopv_entry_desc hfs_fifoop_entries
[] = {
3814 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
3815 { &vop_lookup_desc
, (VOPFUNC
)fifo_lookup
}, /* lookup */
3816 { &vop_create_desc
, (VOPFUNC
)fifo_create
}, /* create */
3817 { &vop_mknod_desc
, (VOPFUNC
)fifo_mknod
}, /* mknod */
3818 { &vop_open_desc
, (VOPFUNC
)fifo_open
}, /* open */
3819 { &vop_close_desc
, (VOPFUNC
)hfsfifo_close
}, /* close */
3820 { &vop_access_desc
, (VOPFUNC
)hfs_access
}, /* access */
3821 { &vop_getattr_desc
, (VOPFUNC
)hfs_getattr
}, /* getattr */
3822 { &vop_setattr_desc
, (VOPFUNC
)hfs_setattr
}, /* setattr */
3823 { &vop_read_desc
, (VOPFUNC
)hfsfifo_read
}, /* read */
3824 { &vop_write_desc
, (VOPFUNC
)hfsfifo_write
}, /* write */
3825 { &vop_lease_desc
, (VOPFUNC
)fifo_lease_check
}, /* lease */
3826 { &vop_ioctl_desc
, (VOPFUNC
)fifo_ioctl
}, /* ioctl */
3827 { &vop_select_desc
, (VOPFUNC
)fifo_select
}, /* select */
3828 { &vop_revoke_desc
, (VOPFUNC
)fifo_revoke
}, /* revoke */
3829 { &vop_mmap_desc
, (VOPFUNC
)fifo_mmap
}, /* mmap */
3830 { &vop_fsync_desc
, (VOPFUNC
)hfs_fsync
}, /* fsync */
3831 { &vop_seek_desc
, (VOPFUNC
)fifo_seek
}, /* seek */
3832 { &vop_remove_desc
, (VOPFUNC
)fifo_remove
}, /* remove */
3833 { &vop_link_desc
, (VOPFUNC
)fifo_link
}, /* link */
3834 { &vop_rename_desc
, (VOPFUNC
)fifo_rename
}, /* rename */
3835 { &vop_mkdir_desc
, (VOPFUNC
)fifo_mkdir
}, /* mkdir */
3836 { &vop_rmdir_desc
, (VOPFUNC
)fifo_rmdir
}, /* rmdir */
3837 { &vop_symlink_desc
, (VOPFUNC
)fifo_symlink
}, /* symlink */
3838 { &vop_readdir_desc
, (VOPFUNC
)fifo_readdir
}, /* readdir */
3839 { &vop_readlink_desc
, (VOPFUNC
)fifo_readlink
}, /* readlink */
3840 { &vop_abortop_desc
, (VOPFUNC
)fifo_abortop
}, /* abortop */
3841 { &vop_inactive_desc
, (VOPFUNC
)hfs_inactive
}, /* inactive */
3842 { &vop_reclaim_desc
, (VOPFUNC
)hfs_reclaim
}, /* reclaim */
3843 { &vop_lock_desc
, (VOPFUNC
)hfs_lock
}, /* lock */
3844 { &vop_unlock_desc
, (VOPFUNC
)hfs_unlock
}, /* unlock */
3845 { &vop_bmap_desc
, (VOPFUNC
)fifo_bmap
}, /* bmap */
3846 { &vop_strategy_desc
, (VOPFUNC
)fifo_strategy
}, /* strategy */
3847 { &vop_print_desc
, (VOPFUNC
)hfs_print
}, /* print */
3848 { &vop_islocked_desc
, (VOPFUNC
)hfs_islocked
}, /* islocked */
3849 { &vop_pathconf_desc
, (VOPFUNC
)fifo_pathconf
}, /* pathconf */
3850 { &vop_advlock_desc
, (VOPFUNC
)fifo_advlock
}, /* advlock */
3851 { &vop_blkatoff_desc
, (VOPFUNC
)fifo_blkatoff
}, /* blkatoff */
3852 { &vop_valloc_desc
, (VOPFUNC
)fifo_valloc
}, /* valloc */
3853 { &vop_reallocblks_desc
, (VOPFUNC
)fifo_reallocblks
}, /* reallocblks */
3854 { &vop_vfree_desc
, (VOPFUNC
)err_vfree
}, /* vfree */
3855 { &vop_truncate_desc
, (VOPFUNC
)fifo_truncate
}, /* truncate */
3856 { &vop_update_desc
, (VOPFUNC
)hfs_update
}, /* update */
3857 { &vop_bwrite_desc
, (VOPFUNC
)hfs_bwrite
},
3858 { &vop_pagein_desc
, (VOPFUNC
)hfs_pagein
}, /* Pagein */
3859 { &vop_pageout_desc
, (VOPFUNC
)hfs_pageout
}, /* Pageout */
3860 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* copyfile */
3861 { &vop_blktooff_desc
, (VOPFUNC
)hfs_blktooff
}, /* blktooff */
3862 { &vop_offtoblk_desc
, (VOPFUNC
)hfs_offtoblk
}, /* offtoblk */
3863 { &vop_cmap_desc
, (VOPFUNC
)hfs_cmap
}, /* cmap */
3864 { (struct vnodeop_desc
*)NULL
, (VOPFUNC
)NULL
}
3866 struct vnodeopv_desc hfs_fifoop_opv_desc
=
3867 { &hfs_fifoop_p
, hfs_fifoop_entries
};