#include <machine/spl.h>
#include <sys/kdebug.h>
+#include <sys/sysctl.h>
#include "hfs.h"
#include "hfs_catalog.h"
/* Global vfs data structures for hfs */
+/* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
+int always_do_fullfsync = 0;
+SYSCTL_INT (_kern, OID_AUTO, always_do_fullfsync, CTLFLAG_RW, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
extern unsigned long strtoul(const char *, char **, int);
if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
hfsmp->hfs_freezing_proc = NULL;
hfs_global_exclusive_lock_release(hfsmp);
+ lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
}
busy = vnode_isinuse(vp, 1);
if (vnode_isvroot(vp)) {
if (hfsmp->hfs_privdir_desc.cd_cnid != 0)
--entries; /* hide private dir */
- if (hfsmp->jnl)
+ if (hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
entries -= 2; /* hide the journal files */
}
VATTR_RETURN(vap, va_nlink, (uint64_t)entries);
from_cp->c_uid = to_cp->c_uid;
from_cp->c_flags = to_cp->c_flags;
from_cp->c_mode = to_cp->c_mode;
+ from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
to_cp->c_uid = tempattr.ca_uid;
to_cp->c_flags = tempattr.ca_flags;
to_cp->c_mode = tempattr.ca_mode;
+ to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
/* Rehash the cnodes using their new file IDs */
cp->c_touch_acctime = FALSE;
cp->c_touch_chgtime = FALSE;
cp->c_touch_modtime = FALSE;
- } else /* User file */ {
+ } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
retval = hfs_update(vp, wait);
/* When MNT_WAIT is requested push out any delayed meta data */
// fsync() and if so push out any pending transactions
// that this file might is a part of (and get them on
// stable storage).
- if (fullsync) {
+ if (fullsync || always_do_fullfsync) {
if (hfsmp->jnl) {
journal_flush(hfsmp->jnl);
} else {
__private_extern__ void
replace_desc(struct cnode *cp, struct cat_desc *cdp)
{
+ if (&cp->c_desc == cdp) {
+ return;
+ }
+
/* First release allocated name buffer */
if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
char *name = cp->c_desc.cd_nameptr;
if (nfs_cookies) {
cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
+ if (cnid_hint == INT_MAX) { /* searching pass the last item */
+ eofflag = 1;
+ goto out;
+ }
}
/*
* Synthesize entries for "." and ".."
}
/* Pack the buffer with dirent entries. */
- error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items);
+ error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
hfs_systemfile_unlock(hfsmp, lockflags);
struct componentname cn;
int lockflags;
+ /*
+ * Make sure cnode lock is exclusive, if not upgrade it.
+ *
+ * We assume that we were called from a read-only VNOP (getattr)
+ * and that its safe to have the cnode lock dropped and reacquired.
+ */
+ if (cp->c_lockowner != current_thread()) {
+ /*
+ * If the upgrade fails we loose the lock and
+ * have to take the exclusive lock on our own.
+ */
+ if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) != 0)
+ lck_rw_lock_exclusive(&cp->c_rwlock);
+ cp->c_lockowner = current_thread();
+ }
+
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
/* Get resource fork data */