- u_int32_t mnt_maxreadcnt; /* Max. byte count for read */
- u_int32_t mnt_maxwritecnt; /* Max. byte count for write */
- u_int32_t mnt_segreadcnt; /* Max. segment count for read */
- u_int32_t mnt_segwritecnt; /* Max. segment count for write */
- u_int32_t mnt_maxsegreadsize; /* Max. segment read size */
- u_int32_t mnt_maxsegwritesize; /* Max. segment write size */
- u_int32_t mnt_devblocksize; /* the underlying device block size */
- lck_rw_t mnt_rwlock; /* mutex readwrite lock */
- lck_mtx_t mnt_renamelock; /* mutex that serializes renames that change shape of tree */
- vnode_t mnt_devvp; /* the device mounted on for local file systems */
- int32_t mnt_crossref; /* refernces to cover lookups crossing into mp */
- int32_t mnt_iterref; /* refernces to cover iterations; drained makes it -ve */
-
- /* XXX 3762912 hack to support HFS filesystem 'owner' */
- uid_t mnt_fsowner;
- gid_t mnt_fsgroup;
+ uint32_t mnt_maxreadcnt; /* Max. byte count for read */
+ uint32_t mnt_maxwritecnt; /* Max. byte count for write */
+ uint32_t mnt_segreadcnt; /* Max. segment count for read */
+ uint32_t mnt_segwritecnt; /* Max. segment count for write */
+ uint32_t mnt_maxsegreadsize; /* Max. segment read size */
+ uint32_t mnt_maxsegwritesize; /* Max. segment write size */
+ uint32_t mnt_alignmentmask; /* Mask of bits that aren't addressable via DMA */
+ uint32_t mnt_devblocksize; /* the underlying device block size */
+ uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */
+ uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */
+ uint32_t mnt_ioflags; /* flags for underlying device */
+ uint32_t mnt_minsaturationbytecount; /* if non-zero, mininum amount of writes (in bytes) needed to max out throughput */
+ pending_io_t mnt_pending_write_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending writes */
+ pending_io_t mnt_pending_read_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending reads */
+ struct timeval mnt_last_write_issued_timestamp;
+ struct timeval mnt_last_write_completed_timestamp;
+ int64_t mnt_max_swappin_available;
+
+ lck_rw_t mnt_rwlock; /* mutex readwrite lock */
+ lck_mtx_t mnt_renamelock; /* mutex that serializes renames that change shape of tree */
+ vnode_t mnt_devvp; /* the device mounted on for local file systems */
+ uint32_t mnt_devbsdunit; /* the BSD unit number of the device */
+ uint64_t mnt_throttle_mask; /* the throttle mask of what devices will be affected by I/O from this mnt */
+ void *mnt_throttle_info; /* used by the throttle code */
+ int32_t mnt_crossref; /* refernces to cover lookups crossing into mp */
+ int32_t mnt_iterref; /* refernces to cover iterations; drained makes it -ve */
+#if CONFIG_TRIGGERS
+ int32_t mnt_numtriggers; /* num of trigger vnodes for this mount */
+ vfs_trigger_callback_t *mnt_triggercallback;
+ void *mnt_triggerdata;
+#endif
+ /* XXX 3762912 hack to support HFS filesystem 'owner' */
+ uid_t mnt_fsowner;
+ gid_t mnt_fsgroup;
+
+ struct label *mnt_mntlabel; /* MAC mount label */
+ struct label *mnt_fslabel; /* MAC default fs label */
+
+ /*
+ * cache the rootvp of the last mount point
+ * in the chain in the mount struct pointed
+ * to by the vnode sitting in '/'
+ * this cache is used to shortcircuit the
+ * mount chain traversal and allows us
+ * to traverse to the true underlying rootvp
+ * in 1 easy step inside of 'cache_lookup_path'
+ *
+ * make sure to validate against the cached vid
+ * in case the rootvp gets stolen away since
+ * we don't take an explicit long term reference
+ * on it when we mount it
+ */
+ vnode_t mnt_realrootvp;
+ uint32_t mnt_realrootvp_vid;
+ /*
+ * bumped each time a mount or unmount
+ * occurs... its used to invalidate
+ * 'mnt_realrootvp' from the cache
+ */
+ uint32_t mnt_generation;
+ /*
+ * if 'MNTK_AUTH_CACHE_TIMEOUT' is
+ * set, then 'mnt_authcache_ttl' is
+ * the time-to-live for the per-vnode authentication cache
+ * on this mount... if zero, no cache is maintained...
+ * if 'MNTK_AUTH_CACHE_TIMEOUT' isn't set, its the
+ * time-to-live for the cached lookup right for
+ * volumes marked 'MNTK_AUTH_OPAQUE'.
+ */
+ int mnt_authcache_ttl;
+ char fstypename_override[MFSTYPENAMELEN];
+
+ uint32_t mnt_iobufinuse;
+
+ void *mnt_disk_conditioner_info;
+
+ lck_mtx_t mnt_iter_lock; /* mutex that protects iteration of vnodes */