- struct kqueue *uu_kqueue_bound; /* kqueue we are bound to service */
- unsigned int uu_kqueue_qos_index; /* qos index we are bound to service */
- unsigned int uu_kqueue_flags; /* the flags we are using */
- boolean_t uu_kqueue_override_is_sync; /* sync qos override applied to servicer */
+ lck_spin_t uu_rethrottle_lock; /* locks was_rethrottled and is_throttled */
+ TAILQ_ENTRY(uthread) uu_throttlelist; /* List of uthreads currently throttled */
+ void * uu_throttle_info; /* pointer to throttled I/Os info */
+ int uu_on_throttlelist;
+ int uu_lowpri_window;
+ /* These boolean fields are protected by different locks */
+ bool uu_was_rethrottled;
+ bool uu_is_throttled;
+ bool uu_throttle_bc;
+
+ u_int32_t uu_network_marks; /* network control flow marks */
+
+ struct kern_sigaltstack uu_sigstk;
+ vnode_t uu_vreclaims;
+ vnode_t uu_cdir; /* per thread CWD */
+ int uu_dupfd; /* fd in fdesc_open/dupfdopen */
+ int uu_defer_reclaims;
+
+ /*
+ * Bound kqueue request. This field is only cleared by the current thread,
+ * hence can be dereferenced safely by the current thread without locks.
+ */
+ struct kqrequest *uu_kqr_bound;
+ TAILQ_ENTRY(uthread) uu_workq_entry;
+ mach_vm_offset_t uu_workq_stackaddr;
+ mach_port_name_t uu_workq_thport;
+ struct uu_workq_policy {
+ uint16_t qos_req : 4; /* requested QoS */
+ uint16_t qos_max : 4; /* current acked max qos */
+ uint16_t qos_override : 4; /* received async override */
+ uint16_t qos_bucket : 4; /* current acked bucket */
+ } uu_workq_pri;
+ uint8_t uu_workq_flags;
+ kq_index_t uu_kqueue_override;