- /* Counters for how many requests we have outstanding. The invariants here:
- * - reqcount == SUM(requests) + (event manager ? 1 : 0)
- * - SUM(ocrequests) + SUM(kevent_requests) + SUM(kevent_ocrequests) <= SUM(requests)
- * - # of constrained requests is difference between quantities above
- * i.e. a kevent+overcommit request will incrument reqcount, requests and
- * kevent_ocrequests only.
- */
- uint32_t wq_reqcount;
- uint16_t wq_requests[WORKQUEUE_NUM_BUCKETS];
- uint16_t wq_ocrequests[WORKQUEUE_NUM_BUCKETS];
- uint16_t wq_kevent_requests[WORKQUEUE_NUM_BUCKETS];
- uint16_t wq_kevent_ocrequests[WORKQUEUE_NUM_BUCKETS];
-
- uint16_t wq_reqconc[WORKQUEUE_NUM_BUCKETS]; /* requested concurrency for each priority level */
- uint16_t wq_thscheduled_count[WORKQUEUE_NUM_BUCKETS];
- uint32_t wq_thactive_count[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(4))); /* must be uint32_t since we OSAddAtomic on these */
- uint64_t wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(8))); /* XXX: why per bucket? */
+ uint32_t wq_reqcount; /* number of elements on the following lists */
+ struct threadreq_head wq_overcommit_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET];
+ struct threadreq_head wq_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET];
+ struct threadreq wq_event_manager_threadreq;