/*
- * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2002-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
};
typedef struct filefork filefork_t;
+
+#define HFS_TEMPLOOKUP_NAMELEN 32
+
+/*
+ * Catalog Lookup struct (runtime)
+ *
+ * This is used so that when we need to malloc a container for a catalog
+ * lookup operation, we can acquire memory for everything in one fell swoop
+ * as opposed to putting many of these objects on the stack. The cat_fork
+ * data structure can take up 100+bytes easily, and that can add to stack
+ * overhead.
+ *
+ * As a result, we use this to easily pass around the memory needed for a
+ * lookup operation.
+ */
+struct cat_lookup_buffer {
+ struct cat_desc lookup_desc;
+ struct cat_attr lookup_attr;
+ struct filefork lookup_fork;
+ struct componentname lookup_cn;
+ char lookup_name[HFS_TEMPLOOKUP_NAMELEN]; /* for open-unlinked paths only */
+};
+
+
/* Aliases for common fields */
#define ff_size ff_data.cf_size
#define ff_new_size ff_data.cf_new_size
#define c_ctime c_attr.ca_ctime
#define c_itime c_attr.ca_itime
#define c_btime c_attr.ca_btime
-#define c_flags c_attr.ca_flags
+#define c_bsdflags c_attr.ca_flags
#define c_finderinfo c_attr.ca_finderinfo
#define c_blocks c_attr.ca_union2.cau_blocks
#define c_entries c_attr.ca_union2.cau_entries
#define C_FORCEUPDATE 0x00100 /* force the catalog entry update */
#define C_HASXATTRS 0x00200 /* cnode has extended attributes */
#define C_NEG_ENTRIES 0x00400 /* directory has negative name entries */
-#define C_SWAPINPROGRESS 0x00800 /* cnode's data is about to be swapped. Issue synchronous cluster io */
+/*
+ * For C_SSD_STATIC: SSDs may want to deal with the file payload data in a
+ * different manner knowing that the content is not likely to be modified. This is
+ * purely advisory at the HFS level, and is not maintained after the cnode goes out of core.
+ */
+#define C_SSD_STATIC 0x00800 /* Assume future writes contain static content */
#define C_NEED_DATA_SETSIZE 0x01000 /* Do a ubc_setsize(0) on c_rsrc_vp after the unlock */
#define C_NEED_RSRC_SETSIZE 0x02000 /* Do a ubc_setsize(0) on c_vp after the unlock */
#define C_RENAMED 0x10000 /* cnode was deleted as part of rename; C_DELETED should also be set */
#define C_NEEDS_DATEADDED 0x20000 /* cnode needs date-added written to the finderinfo bit */
#define C_BACKINGSTORE 0x40000 /* cnode is a backing store for an existing or currently-mounting filesystem */
+#define C_SWAPINPROGRESS 0x80000 /* cnode's data is about to be swapped. Issue synchronous cluster io */
+
+/*
+ * For C_SSD_GREEDY_MODE: SSDs may want to write the file payload data using the greedy mode knowing
+ * that the content needs to be written out to the disk quicker than normal at the expense of storage efficiency.
+ * This is purely advisory at the HFS level, and is not maintained after the cnode goes out of core.
+ */
+#define C_SSD_GREEDY_MODE 0x100000 /* Assume future writes are recommended to be written in SLC mode */
+
#define ZFTIMELIMIT (5 * 60)
/*
extern void hfs_write_dateadded (struct cat_attr *cattrp, u_int32_t dateadded);
extern u_int32_t hfs_get_dateadded (struct cnode *cp);
+/* Gen counter methods */
+extern void hfs_write_gencount(struct cat_attr *cattrp, uint32_t gencount);
+extern uint32_t hfs_get_gencount(struct cnode *cp);
+extern uint32_t hfs_get_gencount_from_blob (const uint8_t *finfoblob, mode_t mode);
+extern uint32_t hfs_incr_gencount (struct cnode *cp);
+
+/* Document id methods */
+extern uint32_t hfs_get_document_id(struct cnode * /* cp */);
+extern uint32_t hfs_get_document_id_from_blob(const uint8_t * /* finderinfo */, mode_t /* mode */);
+
/* Zero-fill file and push regions out to disk */
extern int hfs_filedone(struct vnode *vp, vfs_context_t context);
extern void hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int flags);
extern void hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp);
-extern struct vnode * hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc,
- int skiplock, int allow_deleted);
+extern struct vnode * hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc,
+ int skiplock, int allow_deleted);
extern struct cnode * hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp,
int wantrsrc, int skiplock, int *out_flags, int *hflags);
-extern int hfs_chash_snoop(struct hfsmount *, ino_t, int (*)(const struct cat_desc *,
+extern int hfs_chash_snoop(struct hfsmount *, ino_t, int, int (*)(const struct cat_desc *,
const struct cat_attr *, void *), void *);
extern int hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp,
cnid_t cnid, struct cat_attr *cattr, int *error);
* HFS Locking Order:
*
* 1. cnode truncate lock (if needed)
+ * hfs_vnop_pagein/out can skip grabbing of this lock by flag option by
+ * HFS_LOCK_SKIP_IF_EXCLUSIVE if the truncate lock is already held exclusive
+ * by current thread from an earlier vnop.
* 2. cnode lock (in parent-child order if related, otherwise by address order)
* 3. journal (if needed)
* 4. system files (as needed)
* E. Overflow Extents B-tree file (always exclusive, supports recursion)
* 5. hfs mount point (always last)
*
+ *
+ * I. HFS cnode hash lock (must not acquire any new locks while holding this lock, always taken last)
*/
-enum hfslocktype {HFS_SHARED_LOCK = 1, HFS_EXCLUSIVE_LOCK = 2, HFS_FORCE_LOCK = 3, HFS_RECURSE_TRUNCLOCK = 4};
+
+
+enum hfs_locktype {
+ HFS_SHARED_LOCK = 1,
+ HFS_EXCLUSIVE_LOCK = 2
+};
+
+/* Option flags for cnode and truncate lock functions */
+enum hfs_lockflags {
+ HFS_LOCK_DEFAULT = 0x0, /* Default flag, no options provided */
+ HFS_LOCK_ALLOW_NOEXISTS = 0x1, /* Allow locking of all cnodes, including cnode marked deleted with no catalog entry */
+ HFS_LOCK_SKIP_IF_EXCLUSIVE = 0x2 /* Skip locking if the current thread already holds the lock exclusive */
+};
#define HFS_SHARED_OWNER (void *)0xffffffff
-int hfs_lock(struct cnode *, enum hfslocktype);
-int hfs_lockpair(struct cnode *, struct cnode *, enum hfslocktype);
+int hfs_lock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
+int hfs_lockpair(struct cnode *, struct cnode *, enum hfs_locktype);
int hfs_lockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *,
- enum hfslocktype, struct cnode **);
+ enum hfs_locktype, struct cnode **);
void hfs_unlock(struct cnode *);
void hfs_unlockpair(struct cnode *, struct cnode *);
void hfs_unlockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *);
-void hfs_lock_truncate(struct cnode *, enum hfslocktype);
-void hfs_unlock_truncate(struct cnode *, int been_recursed);
-
-int hfs_try_trunclock(struct cnode *, enum hfslocktype);
+void hfs_lock_truncate(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
+void hfs_unlock_truncate(struct cnode *, enum hfs_lockflags);
+int hfs_try_trunclock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
#endif /* __APPLE_API_PRIVATE */
#endif /* KERNEL */