/* from bsd/hfs/hfs_vfsops.c */
extern int hfs_vfs_vget (struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
-static int hfs_clonelink(struct vnode *, int, kauth_cred_t, struct proc *);
static int hfs_clonefile(struct vnode *, int, int, int);
static int hfs_clonesysfile(struct vnode *, int, int, int, kauth_cred_t, struct proc *);
static int hfs_minorupdate(struct vnode *vp);
static int do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skip, vfs_context_t context);
+/* from bsd/hfs/hfs_vnops.c */
+extern decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
+
+
int flush_cache_on_write = 0;
SYSCTL_INT (_kern, OID_AUTO, flush_cache_on_write, CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0, "always flush the drive cache on writes to uncached files");
if (offset < 0)
return (EINVAL); /* cant read from a negative offset */
+
+
#if HFS_COMPRESSION
if (VNODE_IS_RSRC(vp)) {
if (hfs_hides_rsrc(ap->a_context, VTOC(vp), 1)) { /* 1 == don't take the cnode lock */
/*
* If this read request originated from a syscall (as opposed to
* an in-kernel page fault or something), then set it up for
- * throttle checks. For example, large EAs may cause a VNOP_READ
- * to occur, and we wouldn't want to throttle I/O while holding the
- * EA B-Tree lock.
+ * throttle checks
*/
if (ap->a_ioflag & IO_SYSCALL_DISPATCH) {
io_throttle = IO_RETURN_ON_THROTTLE;
read_again:
/* Protect against a size change. */
- hfs_lock_truncate(cp, HFS_SHARED_LOCK);
+ hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
took_truncate_lock = 1;
filesize = fp->ff_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START,
(int)uio_offset(uio), uio_resid(uio), (int)filesize, (int)filebytes, 0);
- retval = cluster_read(vp, uio, filesize, ap->a_ioflag | (io_throttle));
+ retval = cluster_read(vp, uio, filesize, ap->a_ioflag |io_throttle);
cp->c_touch_acctime = TRUE;
/* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
if ((fp->ff_bytesread + bytesread) > 0x00000000ffffffff) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
took_cnode_lock = 1;
}
/*
}
exit:
if (took_truncate_lock) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
}
if (retval == EAGAIN) {
throttle_lowpri_io(1);
#endif
- // LP64todo - fix this! uio_resid may be 64-bit value
resid = uio_resid(uio);
offset = uio_offset(uio);
}
#endif /* HFS_SPARSE_DEV */
- if ((ioflag & (IO_SINGLE_WRITER | IO_RETURN_ON_THROTTLE)) ==
- (IO_SINGLE_WRITER | IO_RETURN_ON_THROTTLE)) {
+ if ((ioflag & (IO_SINGLE_WRITER | IO_SYSCALL_DISPATCH)) ==
+ (IO_SINGLE_WRITER | IO_SYSCALL_DISPATCH)) {
io_return_on_throttle = IO_RETURN_ON_THROTTLE;
}
+
again:
/* Protect against a size change. */
/*
* start.
*/
if (ioflag & IO_APPEND || took_truncate_lock) {
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
}
else {
- hfs_lock_truncate(cp, HFS_SHARED_LOCK);
+ hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
}
took_truncate_lock = 1;
}
}
- if ( (retval = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
+ if ( (retval = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
goto exit;
}
cnode_locked = 1;
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ hfs_incr_gencount (cp);
+ }
+
/*
* Now that we have the cnode lock, see if there are delayed zero fill ranges
* overlapping our write. If so, we need the truncate lock exclusive (see above).
*/
hfs_unlock(cp);
cnode_locked = 0;
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
goto again;
}
fp->ff_size, inval_start,
zero_off, (off_t)0,
lflag | IO_HEADZEROFILL | IO_NOZERODIRTY);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cnode_locked = 1;
if (retval) goto ioerr_exit;
offset = uio_offset(uio);
cred = vfs_context_ucred(ap->a_context);
if (resid > uio_resid(uio) && cred && suser(cred, NULL)) {
if (!cnode_locked) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cnode_locked = 1;
}
cp->c_mode &= ~(S_ISUID | S_ISGID);
if (retval) {
if (ioflag & IO_UNIT) {
if (!cnode_locked) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cnode_locked = 1;
}
(void)hfs_truncate(vp, origFileSize, ioflag & IO_SYNC,
0, 0, ap->a_context);
- // LP64todo - fix this! resid needs to by user_ssize_t
uio_setoffset(uio, (uio_offset(uio) - (resid - uio_resid(uio))));
uio_setresid(uio, resid);
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;
}
} else if ((ioflag & IO_SYNC) && (resid > uio_resid(uio))) {
if (!cnode_locked) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cnode_locked = 1;
}
retval = hfs_update(vp, TRUE);
hfs_unlock(cp);
if (took_truncate_lock) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
}
if (retval == EAGAIN) {
throttle_lowpri_io(1);
}
if (cache->numcached > NUM_CACHE_ENTRIES) {
- /*printf("hfs: EGAD! numcached is %d... cut our losses and trim to %d\n",
- cache->numcached, NUM_CACHE_ENTRIES);*/
cache->numcached = NUM_CACHE_ENTRIES;
}
/* if the cache is full, do a replace rather than an insert */
if (cache->numcached >= NUM_CACHE_ENTRIES) {
- //printf("hfs: cache is full (%d). replace at index %d\n", cache->numcached, index);
cache->numcached = NUM_CACHE_ENTRIES-1;
if (index > cache->numcached) {
- // printf("hfs: index %d pinned to %d\n", index, cache->numcached);
index = cache->numcached;
}
}
err_exit_bulk_access:
- //printf("hfs: on exit (err %d), numfiles/numcached/cachehits/lookups is %d/%d/%d/%d\n", error, num_files, cache.numcached, cache.cachehits, cache.lookups);
-
if (file_ids)
kfree(file_ids, sizeof(int) * num_files);
if (parents)
int outlen;
char *bufptr;
int error;
+ int flags = 0;
/* Caller must be owner of file system. */
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
}
bufptr = (char *)ap->a_data;
cnid = strtoul(bufptr, NULL, 10);
+ if (ap->a_fflag & HFS_GETPATH_VOLUME_RELATIVE) {
+ flags |= BUILDPATH_VOLUME_RELATIVE;
+ }
/* We need to call hfs_vfs_vget to leverage the code that will
* fix the origin list for us if needed, as opposed to calling
if ((error = hfs_vfs_vget(HFSTOVFS(hfsmp), cnid, &file_vp, context))) {
return (error);
}
- error = build_path(file_vp, bufptr, sizeof(pathname_t), &outlen, 0, context);
+ error = build_path(file_vp, bufptr, sizeof(pathname_t), &outlen, flags, context);
vnode_put(file_vp);
return (error);
}
+ case HFS_GET_WRITE_GEN_COUNTER:
+ {
+ struct cnode *cp = NULL;
+ int error;
+ u_int32_t *counter = (u_int32_t *)ap->a_data;
+
+ cp = VTOC(vp);
+
+ if (vnode_isdir (vp)) {
+ error = EISDIR;
+ *counter = 0;
+ return error;
+ }
+
+ error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ if (error == 0) {
+ struct ubc_info *uip;
+ int is_mapped = 0;
+
+ if (UBCINFOEXISTS(vp)) {
+ uip = vp->v_ubcinfo;
+ if (uip->ui_flags & UI_ISMAPPED) {
+ is_mapped = 1;
+ }
+ }
+
+
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ uint32_t gcount = hfs_get_gencount(cp);
+ //
+ // Even though we return EBUSY for files that are mmap'ed
+ // we also want to bump the value so that the write-gen
+ // counter will always be different once the file is unmapped
+ // (since the file may be unmapped but the pageouts have not
+ // yet happened).
+ //
+ if (is_mapped) {
+ hfs_incr_gencount (cp);
+ gcount = hfs_get_gencount(cp);
+ }
+
+ *counter = gcount;
+
+ }
+ else {
+ /* not a file or dir? silently return */
+ *counter = 0;
+ }
+ hfs_unlock (cp);
+
+ if (is_mapped) {
+ error = EBUSY;
+ }
+ }
+
+ return error;
+ }
+
case HFS_PREV_LINK:
case HFS_NEXT_LINK:
{
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount(hfsmp);
location = *(u_int32_t *)ap->a_data;
if ((location >= hfsmp->allocLimit) &&
(location != HFS_NO_UPDATE_NEXT_ALLOCATION)) {
}
MarkVCBDirty(hfsmp);
fail_change_next_allocation:
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount(hfsmp);
return (error);
}
hfsmp->hfs_sparsebandblks = bsdata->bandsize / HFSTOVCB(hfsmp)->blockSize;
hfsmp->hfs_sparsebandblks *= 4;
- vfs_markdependency(hfsmp->hfs_mp);
+ /* We check the MNTK_VIRTUALDEV bit instead of marking the dependent process */
/*
* If the sparse image is on a sparse image file (as opposed to a sparse
/* Must have catalog lock excl. to advance the CNID pointer */
lockflags = hfs_systemfile_lock (hfsmp, SFL_CATALOG , HFS_EXCLUSIVE_LOCK);
- HFS_MOUNT_LOCK(hfsmp, TRUE);
-
+ hfs_lock_mount(hfsmp);
+
/* If it is less than the current next CNID, force the wraparound bit to be set */
if (fileid < hfsmp->vcbNxtCNID) {
wraparound=1;
}
MarkVCBDirty(hfsmp);
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount(hfsmp);
hfs_systemfile_unlock (hfsmp, lockflags);
return (error);
* are enabled by default, so any change will be transient only
* till the volume is remounted.
*/
- if (!is_suser()) {
+ if (!kauth_cred_issuser(kauth_cred_get())) {
return (EPERM);
}
if (state == 0 || state == 1)
}
cp = VTOC(vp);
- error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK);
+ error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
if (error == 0) {
if (enable_static) {
cp->c_flag |= C_SSD_STATIC;
return error;
}
+ case F_SET_GREEDY_MODE: {
+ int error;
+ int enable_greedy_mode = 0;
+ struct cnode *cp = NULL;
+ /*
+ * lock the cnode, decorate the cnode flag, and bail out.
+ * VFS should have already authenticated the caller for us.
+ */
+
+ if (ap->a_data) {
+ /*
+ * Note that even though ap->a_data is of type caddr_t,
+ * the fcntl layer at the syscall handler will pass in NULL
+ * or 1 depending on what the argument supplied to the fcntl
+ * was. So it is in fact correct to check the ap->a_data
+ * argument for zero or non-zero value when deciding whether or not
+ * to enable the greedy mode bit in the cnode.
+ */
+ enable_greedy_mode = 1;
+ }
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return EROFS;
+ }
+ cp = VTOC(vp);
+
+ error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ if (error == 0) {
+ if (enable_greedy_mode) {
+ cp->c_flag |= C_SSD_GREEDY_MODE;
+ }
+ else {
+ cp->c_flag &= ~C_SSD_GREEDY_MODE;
+ }
+ hfs_unlock (cp);
+ }
+ return error;
+ }
+
+ case F_MAKECOMPRESSED: {
+ int error = 0;
+ uint32_t gen_counter;
+ struct cnode *cp = NULL;
+ int reset_decmp = 0;
+
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return EROFS;
+ }
+
+ /*
+ * acquire & lock the cnode.
+ * VFS should have already authenticated the caller for us.
+ */
+
+ if (ap->a_data) {
+ /*
+ * Cast the pointer into a uint32_t so we can extract the
+ * supplied generation counter.
+ */
+ gen_counter = *((uint32_t*)ap->a_data);
+ }
+ else {
+ return EINVAL;
+ }
+
+#if HFS_COMPRESSION
+ cp = VTOC(vp);
+ /* Grab truncate lock first; we may truncate the file */
+ hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+
+ error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ if (error) {
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
+ return error;
+ }
+
+ /* Are there any other usecounts/FDs? */
+ if (vnode_isinuse(vp, 1)) {
+ hfs_unlock(cp);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
+ return EBUSY;
+ }
+
+
+ /* now we have the cnode locked down; Validate arguments */
+ if (cp->c_attr.ca_flags & (UF_IMMUTABLE | UF_COMPRESSED)) {
+ /* EINVAL if you are trying to manipulate an IMMUTABLE file */
+ hfs_unlock(cp);
+ hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
+ return EINVAL;
+ }
+
+ if ((hfs_get_gencount (cp)) == gen_counter) {
+ /*
+ * OK, the gen_counter matched. Go for it:
+ * Toggle state bits, truncate file, and suppress mtime update
+ */
+ reset_decmp = 1;
+ cp->c_bsdflags |= UF_COMPRESSED;
+
+ error = hfs_truncate(vp, 0, IO_NDELAY, 0, (HFS_TRUNCATE_SKIPTIMES), ap->a_context);
+ }
+ else {
+ error = ESTALE;
+ }
+
+ /* Unlock cnode before executing decmpfs ; they may need to get an EA */
+ hfs_unlock(cp);
+
+ /*
+ * Reset the decmp state while still holding the truncate lock. We need to
+ * serialize here against a listxattr on this node which may occur at any
+ * time.
+ *
+ * Even if '0/skiplock' is passed in 2nd argument to hfs_file_is_compressed,
+ * that will still potentially require getting the com.apple.decmpfs EA. If the
+ * EA is required, then we can't hold the cnode lock, because the getxattr call is
+ * generic(through VFS), and can't pass along any info telling it that we're already
+ * holding it (the lock). If we don't serialize, then we risk listxattr stopping
+ * and trying to fill in the hfs_file_is_compressed info during the callback
+ * operation, which will result in deadlock against the b-tree node.
+ *
+ * So, to serialize against listxattr (which will grab buf_t meta references on
+ * the b-tree blocks), we hold the truncate lock as we're manipulating the
+ * decmpfs payload.
+ */
+ if ((reset_decmp) && (error == 0)) {
+ decmpfs_cnode *dp = VTOCMP (vp);
+ if (dp != NULL) {
+ decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
+ }
+
+ /* Initialize the decmpfs node as needed */
+ (void) hfs_file_is_compressed (cp, 0); /* ok to take lock */
+ }
+
+ hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
+
+#endif
+ return error;
+ }
+
case F_SETBACKINGSTORE: {
int error = 0;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
- error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
if (error == 0) {
error = hfs_fsync(vp, MNT_WAIT, TRUE, p);
hfs_unlock(VTOC(vp));
if (!vnode_isreg(vp))
return EINVAL;
- error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
if (error == 0) {
cp = VTOC(vp);
/*
fp = VTOF(vp);
/* Protect against a size change. */
- hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
#if HFS_COMPRESSION
if (compressed && (uncompressed_size == -1)) {
error = advisory_read(vp, fp->ff_size, ra->ra_offset, ra->ra_count);
}
- hfs_unlock_truncate(VTOC(vp), 0);
+ hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
return (error);
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount (hfsmp);
bcopy(ap->a_data, &hfsmp->vcbFndrInfo, sizeof(hfsmp->vcbFndrInfo));
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount (hfsmp);
(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
break;
case HFS_GET_BOOT_INFO:
if (!vnode_isvroot(vp))
return(EINVAL);
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount (hfsmp);
bcopy(&hfsmp->vcbFndrInfo, ap->a_data, sizeof(hfsmp->vcbFndrInfo));
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount(hfsmp);
break;
case HFS_MARK_BOOT_CORRUPT:
* kHFSVolumeInconsistentBit in the volume header. This will
* force fsck_hfs on next mount.
*/
- if (!is_suser()) {
+ if (!kauth_cred_issuser(kauth_cred_get())) {
return EACCES;
}
case HFS_DISABLE_METAZONE: {
/* Only root can disable metadata zone */
- if (!is_suser()) {
+ if (!kauth_cred_issuser(kauth_cred_get())) {
return EACCES;
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
if ( !vnode_issystem(vp) && !vnode_islnk(vp) && !vnode_isswap(vp)) {
if (VTOC(vp)->c_lockowner != current_thread()) {
- hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
+ hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
tooklock = 1;
}
}
cp->c_blocks += loanedBlocks;
fp->ff_blocks += loanedBlocks;
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount (hfsmp);
hfsmp->loanedBlocks += loanedBlocks;
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount (hfsmp);
hfs_systemfile_unlock(hfsmp, lockflags);
cp->c_flag |= C_MODIFIED;
/* Validate if the start offset is within logical file size */
if (ap->a_foffset >= fp->ff_size) {
- goto exit;
+ goto exit;
}
/*
buf_markstatic(bp);
}
+ /* Mark buffer as containing static data if cnode flag set */
+ if (VTOC(vp)->c_flag & C_SSD_GREEDY_MODE) {
+ bufattr_markgreedymode((bufattr_t)(&bp->b_attr));
+ }
+
#if CONFIG_PROTECT
cnode_t *cp = NULL;
}
int
-do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skipupdate, vfs_context_t context)
+do_hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags, vfs_context_t context)
{
register struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
int blksize;
struct hfsmount *hfsmp;
int lockflags;
-
+ int skipupdate = (truncateflags & HFS_TRUNCATE_SKIPUPDATE);
+ int suppress_times = (truncateflags & HFS_TRUNCATE_SKIPTIMES);
+
blksize = VTOVCB(vp)->blockSize;
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
if (skipupdate) {
(void) hfs_minorupdate(vp);
}
- else {
+ else {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
}
retval = cluster_write(vp, (struct uio *) 0, fp->ff_size, zero_limit,
fp->ff_size, (off_t)0,
(flags & IO_SYNC) | IO_HEADZEROFILL | IO_NOZERODIRTY);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
if (retval) goto Err_Exit;
/* Merely invalidate the remaining area, if necessary: */
panic("hfs_truncate: invoked on non-UBC object?!");
};
}
- cp->c_touch_modtime = TRUE;
+ if (suppress_times == 0) {
+ cp->c_touch_modtime = TRUE;
+ }
fp->ff_size = length;
} else { /* Shorten the size of the file */
u_int32_t finalblks;
u_int32_t loanedBlocks;
- HFS_MOUNT_LOCK(hfsmp, TRUE);
-
+ hfs_lock_mount(hfsmp);
loanedBlocks = fp->ff_unallocblocks;
cp->c_blocks -= loanedBlocks;
fp->ff_blocks -= loanedBlocks;
cp->c_blocks += loanedBlocks;
fp->ff_blocks += loanedBlocks;
}
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount (hfsmp);
}
/*
(void) hfs_chkdq(cp, (int64_t)-(savedbytes - filebytes), NOCRED, 0);
#endif /* QUOTA */
}
- /* Only set update flag if the logical length changes */
- if ((off_t)fp->ff_size != length)
+ /*
+ * Only set update flag if the logical length changes & we aren't
+ * suppressing modtime updates.
+ */
+ if (((off_t)fp->ff_size != length) && (suppress_times == 0)) {
cp->c_touch_modtime = TRUE;
+ }
fp->ff_size = length;
}
if (cp->c_mode & (S_ISUID | S_ISGID)) {
}
else {
cp->c_touch_chgtime = TRUE; /* status changed */
- cp->c_touch_modtime = TRUE; /* file data was modified */
+ if (suppress_times == 0) {
+ cp->c_touch_modtime = TRUE; /* file data was modified */
+
+ /*
+ * If we are not suppressing the modtime update, then
+ * update the gen count as well.
+ */
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK (cp->c_attr.ca_mode)) {
+ hfs_incr_gencount(cp);
+ }
+ }
+
retval = hfs_update(vp, MNT_WAIT);
}
if (retval) {
if (fp->ff_unallocblocks > 0) {
u_int32_t loanedBlocks;
- HFS_MOUNT_LOCK(hfsmp, TRUE);
-
+ hfs_lock_mount (hfsmp);
loanedBlocks = fp->ff_unallocblocks;
cp->c_blocks -= loanedBlocks;
fp->ff_blocks -= loanedBlocks;
hfsmp->loanedBlocks -= loanedBlocks;
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount (hfsmp);
}
return 0;
*/
int
hfs_truncate(struct vnode *vp, off_t length, int flags, int skipsetsize,
- int skipupdate, vfs_context_t context)
+ int truncateflags, vfs_context_t context)
{
struct filefork *fp = VTOF(vp);
off_t filebytes;
filebytes = length;
}
cp->c_flag |= C_FORCEUPDATE;
- error = do_hfs_truncate(vp, filebytes, flags, skipupdate, context);
+ error = do_hfs_truncate(vp, filebytes, flags, truncateflags, context);
if (error)
break;
}
filebytes = length;
}
cp->c_flag |= C_FORCEUPDATE;
- error = do_hfs_truncate(vp, filebytes, flags, skipupdate, context);
+ error = do_hfs_truncate(vp, filebytes, flags, truncateflags, context);
if (error)
break;
}
} else /* Same logical size */ {
- error = do_hfs_truncate(vp, length, flags, skipupdate, context);
+ error = do_hfs_truncate(vp, length, flags, truncateflags, context);
}
/* Files that are changing size are not hot file candidates. */
if (VTOHFS(vp)->hfc_stage == HFC_RECORDING) {
check_for_tracked_file(vp, orig_ctime, ap->a_length == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
- if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
+ if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
goto Err_Exit;
}
hfs_unlock(cp);
ubc_setsize(vp, fp->ff_size);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
}
if (retval == 0)
retval = retval2;
Err_Exit:
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
hfs_unlock(cp);
return (retval);
}
#if CONFIG_PROTECT
if ((error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0)) != 0) {
+ /*
+ * If we errored here, then this means that one of two things occurred:
+ * 1. there was a problem with the decryption of the key.
+ * 2. the device is locked and we are not allowed to access this particular file.
+ *
+ * Either way, this means that we need to shut down this upl now. As long as
+ * the pl pointer is NULL (meaning that we're supposed to create the UPL ourselves)
+ * then we create a upl and immediately abort it.
+ */
+ if (ap->a_pl == NULL) {
+ /* create the upl */
+ ubc_create_upl (vp, ap->a_f_offset, ap->a_size, &upl, &pl,
+ UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT);
+ /* mark the range as needed so it doesn't immediately get discarded upon abort */
+ ubc_upl_range_needed (upl, ap->a_pl_offset / PAGE_SIZE, 1);
+
+ /* Abort the range */
+ ubc_upl_abort_range (upl, 0, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+ }
+
+
return error;
}
#endif /* CONFIG_PROTECT */
if (vfs_isforce(vp->v_mount)) {
if (cp->c_flag & C_DELETED) {
/* If we don't get it, then just go ahead and operate without the lock */
- truncate_lock_held = hfs_try_trunclock(cp, HFS_RECURSE_TRUNCLOCK);
+ truncate_lock_held = hfs_try_trunclock(cp, HFS_SHARED_LOCK, HFS_LOCK_SKIP_IF_EXCLUSIVE);
}
}
else {
- hfs_lock_truncate(cp, HFS_RECURSE_TRUNCLOCK);
+ hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_SKIP_IF_EXCLUSIVE);
truncate_lock_held = TRUE;
}
* takes the lock shared, we can deadlock if some other thread
* tries to grab the lock exclusively in between.
*/
- hfs_unlock_truncate(cp, 1);
+ hfs_unlock_truncate(cp, HFS_LOCK_SKIP_IF_EXCLUSIVE);
truncate_lock_held = FALSE;
}
ap->a_pl = upl;
/* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
if ((fp->ff_bytesread + bytesread) > 0x00000000ffffffff && cp->c_lockowner != current_thread()) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
took_cnode_lock = 1;
}
/*
pagein_done:
if (truncate_lock_held == TRUE) {
/* Note 1 is passed to hfs_unlock_truncate in been_recursed argument */
- hfs_unlock_truncate(cp, 1);
+ hfs_unlock_truncate(cp, HFS_LOCK_SKIP_IF_EXCLUSIVE);
}
return (error);
a_flags = ap->a_flags;
a_pl_offset = ap->a_pl_offset;
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ hfs_incr_gencount (cp);
+ }
+
/*
* we can tell if we're getting the new or old behavior from the UPL
*/
* because we may be already holding the truncate lock exclusive to force any other
* IOs to have blocked behind us.
*/
- hfs_lock_truncate(cp, HFS_RECURSE_TRUNCLOCK);
+ hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_SKIP_IF_EXCLUSIVE);
if (a_flags & UPL_MSYNC) {
request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
tooklock = 0;
if (cp->c_lockowner != current_thread()) {
- if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
+ if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
/*
* we're in the v2 path, so we are the
* owner of the UPL... we may have already
int tooklock = 0;
if (cp->c_lockowner != current_thread()) {
- if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
+ if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
if (!(a_flags & UPL_NOCOMMIT)) {
ubc_upl_abort_range(upl,
a_pl_offset,
cp->c_touch_chgtime = TRUE;
if ((cp->c_mode & (S_ISUID | S_ISGID)) &&
(vfs_context_suser(ap->a_context) != 0)) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cp->c_mode &= ~(S_ISUID | S_ISGID);
hfs_unlock(cp);
}
* being invoked via ubc_msync due to lockdown,
* we should release it recursively, too.
*/
- hfs_unlock_truncate(cp, 1);
+ hfs_unlock_truncate(cp, HFS_LOCK_SKIP_IF_EXCLUSIVE);
}
return (retval);
}
vnodetype = vnode_vtype(vp);
if (vnodetype != VREG) {
- /* Note symlinks are not allowed to be relocated */
+ /* Not allowed to move symlinks. */
return (EPERM);
}
if (blockHint == 0)
blockHint = hfsmp->nextAllocation;
- if ((fp->ff_size > 0x7fffffff)) {
+ if (fp->ff_size > 0x7fffffff) {
return (EFBIG);
}
if (!vnode_issystem(vp) && (vnodetype != VLNK)) {
hfs_unlock(cp);
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
/* Force lock since callers expects lock to be held. */
- if ((retval = hfs_lock(cp, HFS_FORCE_LOCK))) {
- hfs_unlock_truncate(cp, 0);
+ if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS))) {
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
return (retval);
}
/* No need to continue if file was removed. */
if (cp->c_flag & C_NOEXISTS) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
return (ENOENT);
}
took_trunc_lock = 1;
if (hfs_start_transaction(hfsmp) != 0) {
if (took_trunc_lock)
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
return (EINVAL);
}
started_tr = 1;
nextallocsave = hfsmp->nextAllocation;
retval = ExtendFileC(hfsmp, (FCB*)fp, growsize, blockHint, eflags, &newbytes);
if (eflags & kEFMetadataMask) {
- HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfs_lock_mount(hfsmp);
HFS_UPDATE_NEXT_ALLOCATION(hfsmp, nextallocsave);
MarkVCBDirty(hfsmp);
- HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ hfs_unlock_mount(hfsmp);
}
retval = MacToVFSError(retval);
retval = ENOSPC;
goto restore;
} else if (fp->ff_blocks < (headblks + datablks)) {
- printf("hfs_relocate: allocation failed");
+ printf("hfs_relocate: allocation failed id=%u, vol=%s\n", cp->c_cnid, hfsmp->vcbVN);
retval = ENOSPC;
goto restore;
}
*/
if (vnodetype == VLNK)
- retval = hfs_clonelink(vp, blksize, cred, p);
+ retval = EPERM;
else if (vnode_issystem(vp))
retval = hfs_clonesysfile(vp, headblks, datablks, blksize, cred, p);
else
goto restore;
out:
if (took_trunc_lock)
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
if (lockflags) {
hfs_systemfile_unlock(hfsmp, lockflags);
restore:
if (fp->ff_blocks == headblks) {
if (took_trunc_lock)
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
goto exit;
}
/*
lockflags = 0;
if (took_trunc_lock)
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
goto exit;
}
-/*
- * Clone a symlink.
- *
- */
-static int
-hfs_clonelink(struct vnode *vp, int blksize, kauth_cred_t cred, __unused struct proc *p)
-{
- struct buf *head_bp = NULL;
- struct buf *tail_bp = NULL;
- int error;
-
-
- error = (int)buf_meta_bread(vp, (daddr64_t)0, blksize, cred, &head_bp);
- if (error)
- goto out;
-
- tail_bp = buf_getblk(vp, (daddr64_t)1, blksize, 0, 0, BLK_META);
- if (tail_bp == NULL) {
- error = EIO;
- goto out;
- }
- bcopy((char *)buf_dataptr(head_bp), (char *)buf_dataptr(tail_bp), blksize);
- error = (int)buf_bwrite(tail_bp);
-out:
- if (head_bp) {
- buf_markinvalid(head_bp);
- buf_brelse(head_bp);
- }
- (void) buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
-
- return (error);
-}
-
/*
* Clone a file's data within the file.
*
#if CONFIG_PROTECT
if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
- hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
+ hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
return (error);
}
#endif /* CONFIG_PROTECT */
if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) {
- hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
+ hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
return (ENOMEM);
}
}
kmem_free(kernel_map, (vm_offset_t)bufp, bufsize);
- hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
+ hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
return (error);
}