switch (ap->a_command) {
+ case HFS_RESIZE_PROGRESS: {
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ return hfs_resize_progress(hfsmp, (u_int32_t *)ap->a_data);
+ }
case HFS_RESIZE_VOLUME: {
u_int64_t newsize;
u_int64_t cursize;
dev_t dev = VTOC(vp)->c_dev;
short flags;
- struct ucred myucred; /* XXX ILLEGAL */
+ struct ucred myucred;
int num_files;
int *file_ids = NULL;
short *access = NULL;
struct cnode *skip_cp = VTOC(vp);
struct vfs_context my_context;
+ /* set up front for common exit code */
+ my_context.vc_ucred = NOCRED;
+
/* first, return error if not run as root */
if (cred->cr_ruid != 0) {
return EPERM;
check_leaf = false;
}
+ /*
+ * Create a templated credential; this credential may *NOT*
+ * be used unless instantiated with a kauth_cred_create();
+ * there must be a correcponding kauth_cred_unref() when it
+ * is no longer in use (i.e. before it goes out of scope).
+ */
memset(&myucred, 0, sizeof(myucred));
myucred.cr_ref = 1;
myucred.cr_uid = myucred.cr_ruid = myucred.cr_svuid = user_access_structp->uid;
myucred.cr_gmuid = myucred.cr_uid;
my_context.vc_proc = p;
- my_context.vc_ucred = &myucred;
+ my_context.vc_ucred = kauth_cred_create(&myucred);
/* Check access to each file_id passed in */
for (i = 0; i < num_files; i++) {
cnid = (cnid_t) file_ids[i];
/* root always has access */
- if (!suser(&myucred, NULL)) {
+ if (!suser(my_context.vc_ucred, NULL)) {
access[i] = 0;
continue;
}
/* before calling CheckAccess(), check the target file for read access */
myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
- cnattr.ca_mode, hfsmp->hfs_mp, &myucred, p );
+ cnattr.ca_mode, hfsmp->hfs_mp, my_context.vc_ucred, p );
/* fail fast if no access */
}
myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
- skip_cp, p, &myucred, dev);
+ skip_cp, p, my_context.vc_ucred, dev);
if ( myaccess ) {
access[i] = 0; // have access.. no errors to report
hfs_unlock(VTOC(vp));
if (vnode_vtype(vp) == VDIR) {
+ /*
+ * XXX This code assumes that none of the
+ * XXX callbacks from vnode_authorize() will
+ * XXX take a persistent ref on the context
+ * XXX credential, which is a bad assumption.
+ */
myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
} else {
myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context);
release_pathbuff((char *) cache.haveaccess);
release_pathbuff((char *) file_ids);
release_pathbuff((char *) access);
+ /* clean up local context, if needed */
+ if (IS_VALID_CRED(my_context.vc_ucred))
+ kauth_cred_unref(&my_context.vc_ucred);
return (error);
} /* HFS_BULKACCESS */
if (length < filebytes) {
while (filebytes > length) {
- if ((filebytes - length) > HFS_BIGFILE_SIZE) {
+ if ((filebytes - length) > HFS_BIGFILE_SIZE && overflow_extents(fp)) {
filebytes -= HFS_BIGFILE_SIZE;
} else {
filebytes = length;
}
} else if (length > filebytes) {
while (filebytes < length) {
- if ((length - filebytes) > HFS_BIGFILE_SIZE) {
+ if ((length - filebytes) > HFS_BIGFILE_SIZE && overflow_extents(fp)) {
filebytes += HFS_BIGFILE_SIZE;
} else {
filebytes = length;
/* Trap B-Tree writes */
if ((VTOC(vp)->c_fileid == kHFSExtentsFileID) ||
(VTOC(vp)->c_fileid == kHFSCatalogFileID) ||
- (VTOC(vp)->c_fileid == kHFSAttributesFileID)) {
+ (VTOC(vp)->c_fileid == kHFSAttributesFileID) ||
+ (vp == VTOHFS(vp)->hfc_filevp)) {
/*
* Swap and validate the node if it is in native byte order.
lockflags = 0;
}
- // See comment up above about calls to hfs_fsync()
- //
- //if (retval == 0)
- // retval = hfs_fsync(vp, MNT_WAIT, 0, p);
+ /* Push cnode's new extent data to disk. */
+ if (retval == 0) {
+ (void) hfs_update(vp, MNT_WAIT);
+ }
if (hfsmp->jnl) {
if (cp->c_cnid < kHFSFirstUserCatalogNodeID)
filesize = VTOF(vp)->ff_blocks * blksize; /* virtual file size */
writebase = blkstart * blksize;
copysize = blkcnt * blksize;
- iosize = bufsize = MIN(copysize, 4096 * 16);
+ iosize = bufsize = MIN(copysize, 128 * 1024);
offset = 0;
if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) {