+ case F_FREEZE_FS: {
+ struct mount *mp;
+ task_t task;
+
+ if (!is_suser())
+ return (EACCES);
+
+ mp = vnode_mount(vp);
+ hfsmp = VFSTOHFS(mp);
+
+ if (!(hfsmp->jnl))
+ return (ENOTSUP);
+
+ lck_rw_lock_exclusive(&hfsmp->hfs_insync);
+
+ task = current_task();
+ task_working_set_disable(task);
+
+ // flush things before we get started to try and prevent
+ // dirty data from being paged out while we're frozen.
+ // note: can't do this after taking the lock as it will
+ // deadlock against ourselves.
+ vnode_iterate(mp, 0, hfs_freezewrite_callback, NULL);
+ hfs_global_exclusive_lock_acquire(hfsmp);
+ journal_flush(hfsmp->jnl);
+
+ // don't need to iterate on all vnodes, we just need to
+ // wait for writes to the system files and the device vnode
+ if (HFSTOVCB(hfsmp)->extentsRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->extentsRefNum, 0, 0, 0, "hfs freeze");
+ if (HFSTOVCB(hfsmp)->catalogRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->catalogRefNum, 0, 0, 0, "hfs freeze");
+ if (HFSTOVCB(hfsmp)->allocationsRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->allocationsRefNum, 0, 0, 0, "hfs freeze");
+ if (hfsmp->hfs_attribute_vp)
+ vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs freeze");
+ vnode_waitforwrites(hfsmp->hfs_devvp, 0, 0, 0, "hfs freeze");
+
+ hfsmp->hfs_freezing_proc = current_proc();
+
+ return (0);
+ }
+
+ case F_THAW_FS: {
+ if (!is_suser())
+ return (EACCES);
+
+ // if we're not the one who froze the fs then we
+ // can't thaw it.
+ if (hfsmp->hfs_freezing_proc != current_proc()) {
+ return EPERM;
+ }
+
+ // NOTE: if you add code here, also go check the
+ // code that "thaws" the fs in hfs_vnop_close()
+ //
+ hfsmp->hfs_freezing_proc = NULL;
+ hfs_global_exclusive_lock_release(hfsmp);
+ lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
+
+ return (0);
+ }
+
+#define HFSIOC_BULKACCESS _IOW('h', 9, struct access_t)
+#define HFS_BULKACCESS_FSCTL IOCBASECMD(HFSIOC_BULKACCESS)
+
+ case HFS_BULKACCESS_FSCTL:
+ case HFS_BULKACCESS: {
+ /*
+ * NOTE: on entry, the vnode is locked. Incase this vnode
+ * happens to be in our list of file_ids, we'll note it
+ * avoid calling hfs_chashget_nowait() on that id as that
+ * will cause a "locking against myself" panic.
+ */
+ Boolean check_leaf = true;
+
+ struct user_access_t *user_access_structp;
+ struct user_access_t tmp_user_access_t;
+ struct access_cache cache;
+
+ int error = 0, i;
+
+ dev_t dev = VTOC(vp)->c_dev;
+
+ short flags;
+ struct ucred myucred; /* XXX ILLEGAL */
+ int num_files;
+ int *file_ids = NULL;
+ short *access = NULL;
+
+ cnid_t cnid;
+ cnid_t prevParent_cnid = 0;
+ unsigned long myPerms;
+ short myaccess = 0;
+ struct cat_attr cnattr;
+ CatalogKey catkey;
+ struct cnode *skip_cp = VTOC(vp);
+ struct vfs_context my_context;
+
+ /* first, return error if not run as root */
+ if (cred->cr_ruid != 0) {
+ return EPERM;
+ }
+
+ /* initialize the local cache and buffers */
+ cache.numcached = 0;
+ cache.cachehits = 0;
+ cache.lookups = 0;
+
+ file_ids = (int *) get_pathbuff();
+ access = (short *) get_pathbuff();
+ cache.acache = (int *) get_pathbuff();
+ cache.haveaccess = (Boolean *) get_pathbuff();
+
+ if (file_ids == NULL || access == NULL || cache.acache == NULL || cache.haveaccess == NULL) {
+ release_pathbuff((char *) file_ids);
+ release_pathbuff((char *) access);
+ release_pathbuff((char *) cache.acache);
+ release_pathbuff((char *) cache.haveaccess);
+
+ return ENOMEM;
+ }
+
+ /* struct copyin done during dispatch... need to copy file_id array separately */
+ if (ap->a_data == NULL) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (is64bit) {
+ user_access_structp = (struct user_access_t *)ap->a_data;
+ }
+ else {
+ struct access_t * accessp = (struct access_t *)ap->a_data;
+ tmp_user_access_t.uid = accessp->uid;
+ tmp_user_access_t.flags = accessp->flags;
+ tmp_user_access_t.num_groups = accessp->num_groups;
+ tmp_user_access_t.num_files = accessp->num_files;
+ tmp_user_access_t.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access_t.groups = CAST_USER_ADDR_T(accessp->groups);
+ tmp_user_access_t.access = CAST_USER_ADDR_T(accessp->access);
+ user_access_structp = &tmp_user_access_t;
+ }
+
+ num_files = user_access_structp->num_files;
+ if (num_files < 1) {
+ goto err_exit_bulk_access;
+ }
+ if (num_files > 256) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if ((error = copyin(user_access_structp->file_ids, (caddr_t)file_ids,
+ num_files * sizeof(int)))) {
+ goto err_exit_bulk_access;
+ }
+
+ /* fill in the ucred structure */
+ flags = user_access_structp->flags;
+ if ((flags & (F_OK | R_OK | W_OK | X_OK)) == 0) {
+ flags = R_OK;
+ }
+
+ /* check if we've been passed leaf node ids or parent ids */
+ if (flags & PARENT_IDS_FLAG) {
+ check_leaf = false;
+ }
+
+ memset(&myucred, 0, sizeof(myucred));
+ myucred.cr_ref = 1;
+ myucred.cr_uid = myucred.cr_ruid = myucred.cr_svuid = user_access_structp->uid;
+ myucred.cr_ngroups = user_access_structp->num_groups;
+ if (myucred.cr_ngroups < 1 || myucred.cr_ngroups > 16) {
+ myucred.cr_ngroups = 0;
+ } else if ((error = copyin(user_access_structp->groups, (caddr_t)myucred.cr_groups,
+ myucred.cr_ngroups * sizeof(gid_t)))) {
+ goto err_exit_bulk_access;
+ }
+ myucred.cr_rgid = myucred.cr_svgid = myucred.cr_groups[0];
+ myucred.cr_gmuid = myucred.cr_uid;
+
+ my_context.vc_proc = p;
+ my_context.vc_ucred = &myucred;
+
+ /* Check access to each file_id passed in */
+ for (i = 0; i < num_files; i++) {
+#if 0
+ cnid = (cnid_t) file_ids[i];
+
+ /* root always has access */
+ if (!suser(&myucred, NULL)) {
+ access[i] = 0;
+ continue;
+ }
+
+ if (check_leaf) {
+
+ /* do the lookup (checks the cnode hash, then the catalog) */
+ error = do_attr_lookup(hfsmp, &cache, dev, cnid, skip_cp, &catkey, &cnattr, p);
+ if (error) {
+ access[i] = (short) error;
+ continue;
+ }
+
+ /* before calling CheckAccess(), check the target file for read access */
+ myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
+ cnattr.ca_mode, hfsmp->hfs_mp, &myucred, p );
+
+
+ /* fail fast if no access */
+ if ((myPerms & flags) == 0) {
+ access[i] = EACCES;
+ continue;
+ }
+ } else {
+ /* we were passed an array of parent ids */
+ catkey.hfsPlus.parentID = cnid;
+ }
+
+ /* if the last guy had the same parent and had access, we're done */
+ if (i > 0 && catkey.hfsPlus.parentID == prevParent_cnid && access[i-1] == 0) {
+ cache.cachehits++;
+ access[i] = 0;
+ continue;
+ }
+
+ myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
+ skip_cp, p, &myucred, dev);
+
+ if ( myaccess ) {
+ access[i] = 0; // have access.. no errors to report
+ } else {
+ access[i] = (error != 0 ? (short) error : EACCES);
+ }
+
+ prevParent_cnid = catkey.hfsPlus.parentID;
+#else
+ int myErr;
+
+ cnid = (cnid_t)file_ids[i];
+
+ while (cnid >= kRootDirID) {
+ /* get the vnode for this cnid */
+ myErr = hfs_vget(hfsmp, cnid, &vp, 0);
+ if ( myErr ) {
+ access[i] = EACCES;
+ break;
+ }
+
+ cnid = VTOC(vp)->c_parentcnid;
+
+ hfs_unlock(VTOC(vp));
+ if (vnode_vtype(vp) == VDIR) {
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
+ } else {
+ myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context);
+ }
+ vnode_put(vp);
+ access[i] = myErr;
+ if (myErr) {
+ break;
+ }
+ }
+#endif
+ }
+
+ /* copyout the access array */
+ if ((error = copyout((caddr_t)access, user_access_structp->access,
+ num_files * sizeof (short)))) {
+ goto err_exit_bulk_access;
+ }
+
+ err_exit_bulk_access:
+
+ //printf("on exit (err %d), numfiles/numcached/cachehits/lookups is %d/%d/%d/%d\n", error, num_files, cache.numcached, cache.cachehits, cache.lookups);
+
+ release_pathbuff((char *) cache.acache);
+ release_pathbuff((char *) cache.haveaccess);
+ release_pathbuff((char *) file_ids);
+ release_pathbuff((char *) access);
+
+ return (error);
+ } /* HFS_BULKACCESS */
+
+ case HFS_SETACLSTATE: {
+ int state;
+
+ if (ap->a_data == NULL) {
+ return (EINVAL);
+ }
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ state = *(int *)ap->a_data;
+
+ // super-user can enable or disable acl's on a volume.
+ // the volume owner can only enable acl's
+ if (!is_suser() && (state == 0 || kauth_cred_getuid(cred) != vfsp->f_owner)) {
+ return (EPERM);
+ }
+ if (state == 0 || state == 1)
+ return hfs_setextendedsecurity(hfsmp, state);
+ else
+ return (EINVAL);
+ }
+
+ case F_FULLFSYNC: {