]> git.saurik.com Git - apple/xnu.git/blame - bsd/pthread/workqueue_internal.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / pthread / workqueue_internal.h
CommitLineData
d9a64523
A
1/*
2 * Copyright (c) 2014 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _WORKQUEUE_INTERNAL_H_
30#define _WORKQUEUE_INTERNAL_H_
31
32// Sometimes something gets passed a bucket number and we need a way to express
33// that it's actually the event manager. Use the (0)th bucket for that.
34#define WORKQ_THREAD_QOS_MIN (THREAD_QOS_MAINTENANCE)
35#define WORKQ_THREAD_QOS_MAX (THREAD_QOS_LAST)
36#define WORKQ_THREAD_QOS_CLEANUP (THREAD_QOS_LEGACY)
37#define WORKQ_THREAD_QOS_ABOVEUI (THREAD_QOS_LAST)
38#define WORKQ_THREAD_QOS_MANAGER (THREAD_QOS_LAST + 1) // outside of MIN/MAX
39
40#define WORKQ_NUM_QOS_BUCKETS (WORKQ_THREAD_QOS_MAX - 1) // MT/BG shared
41#define WORKQ_NUM_BUCKETS (WORKQ_NUM_QOS_BUCKETS + 1) // + mgr
42
43/* These definitions are only available to the kext, to avoid bleeding
44 * constants and types across the boundary to the userspace library.
45 */
46#ifdef KERNEL
47#pragma mark wq structs
48
49/* These defines come from kern/thread.h but are XNU_KERNEL_PRIVATE so do not get
50 * exported to kernel extensions.
51 */
52#define SCHED_CALL_BLOCK 0x1
53#define SCHED_CALL_UNBLOCK 0x2
54
55/* old workq priority scheme */
56
57#define WORKQUEUE_HIGH_PRIOQUEUE 0 /* high priority queue */
58#define WORKQUEUE_DEFAULT_PRIOQUEUE 1 /* default priority queue */
59#define WORKQUEUE_LOW_PRIOQUEUE 2 /* low priority queue */
60#define WORKQUEUE_BG_PRIOQUEUE 3 /* background priority queue */
61
62/* wq_max_constrained_threads = max(64, N_CPU * WORKQUEUE_CONSTRAINED_FACTOR)
63 * This used to be WORKQ_NUM_BUCKETS + 1 when NUM_BUCKETS was 4, yielding
64 * N_CPU * 5. When NUM_BUCKETS changed, we decided that the limit should
65 * not change. So the factor is now always 5.
66 */
67#define WORKQUEUE_CONSTRAINED_FACTOR 5
68
69#if BSD_KERNEL_PRIVATE
cb323159 70#include <kern/mpsc_queue.h>
d9a64523
A
71#include <kern/priority_queue.h>
72#include <kern/thread_call.h>
73#include <kern/turnstile.h>
74#include <mach/kern_return.h>
75#include <sys/queue.h>
76#include <sys/kernel_types.h>
77
78/* struct uthread::uu_workq_flags */
79#define UT_WORKQ_NEW 0x01 /* First return to userspace */
80#define UT_WORKQ_RUNNING 0x02 /* On thrunlist, not parked. */
81#define UT_WORKQ_DYING 0x04 /* Thread is being killed */
82#define UT_WORKQ_OVERCOMMIT 0x08 /* Overcommit thread. */
83#define UT_WORKQ_OUTSIDE_QOS 0x10 /* Thread should avoid send QoS changes to kernel */
84#define UT_WORKQ_IDLE_CLEANUP 0x20 /* Thread is removing its voucher or stack */
85#define UT_WORKQ_EARLY_BOUND 0x40 /* Thread has been bound early */
86#define UT_WORKQ_CPUPERCENT 0x80 /* Thread has CPU percent policy active */
87
88typedef union workq_threadreq_param_s {
89 struct {
90 uint16_t trp_flags;
91 uint8_t trp_pri;
92 uint8_t trp_pol;
93 uint32_t trp_cpupercent: 8,
0a7de745 94 trp_refillms: 24;
d9a64523
A
95 };
96 uint64_t trp_value;
97} workq_threadreq_param_t;
98
0a7de745 99#define TRP_PRIORITY 0x1
cb323159 100#define TRP_POLICY 0x2
0a7de745
A
101#define TRP_CPUPERCENT 0x4
102#define TRP_RELEASED 0x8000
d9a64523 103
cb323159
A
104/*!
105 * @enum workq_tr_state_t
106 *
107 * @brief
108 * This enum represents the state of a workq thread request.
109 *
110 * @discussion
111 * The states are used and set by both kevent and the workq subsystem under very
112 * precise locking domains.
113 *
114 * When for kevent requests, this structure is embedded on the kqueue itself,
115 * for non kevent related thread requests, it is allocated.
116 *
117 * Only the BINDING state isn't set under the kqlock, but then only QUEUED could
118 * be read by kqueue in its stead.
119 *
120 * @const WORKQ_TR_STATE_IDLE
121 * This thread request is idle.
122 * The state is only transient for non kevent thread requests.
123 * Set under the kqlock (kevent) or after allocation (workq).
124 *
125 * tr_entry/tr_thread are unused.
126 *
127 * @const WORKQ_TR_STATE_NEW
128 * This thread request is being initialized. This state is transient.
129 * Set workq lock for all kinds, set under the kqlock to for kevent requests.
130 *
131 * tr_entry is initialized, tr_thread is unused.
132 *
133 * @const WORKQ_TR_STATE_QUEUED
134 * This thread request has been pended, waiting for a thread to be bound.
135 * Set workq lock for all kinds, set under the kqlock to for kevent requests.
136 *
137 * tr_entry is used as linkage in a workq priority queue, tr_thread is unused.
138 *
139 * @const WORKQ_TR_STATE_CANCELED
140 * When the process exits, Queued thread requests are marked canceled.
141 * This happens under the workqueue lock.
142 *
143 * @const WORKQ_TR_STATE_BINDING (kevent only)
144 * A thread was found to bind to the thread request.
145 * The bind is preposted this way under the workq lock and will be
146 * acknowledged by the kevent subsystem.
147 *
148 * tr_entry is unused, tr_thread is the thread we're binding to.
149 *
150 * @const WORKQ_TR_STATE_BOUND (kevent only)
151 * A thread bind has been acknowledged by the kevent subsystem.
152 * This is always set under the kqlock, sometimes also under the workq lock.
153 *
154 * tr_entry is unused, tr_thread is the thread we're bound to.
155 */
156__enum_decl(workq_tr_state_t, uint8_t, {
157 WORKQ_TR_STATE_IDLE = 0, /* request isn't in flight */
158 WORKQ_TR_STATE_NEW = 1, /* request is being initiated */
159 WORKQ_TR_STATE_QUEUED = 2, /* request is being queued */
160 WORKQ_TR_STATE_CANCELED = 3, /* request is canceled */
161 WORKQ_TR_STATE_BINDING = 4, /* request is preposted for bind */
162 WORKQ_TR_STATE_BOUND = 5, /* request is bound to a thread */
163});
164
165__options_decl(workq_tr_flags_t, uint8_t, {
166 WORKQ_TR_FLAG_KEVENT = 0x01,
167 WORKQ_TR_FLAG_WORKLOOP = 0x02,
168 WORKQ_TR_FLAG_OVERCOMMIT = 0x04,
169 WORKQ_TR_FLAG_WL_PARAMS = 0x08,
170 WORKQ_TR_FLAG_WL_OUTSIDE_QOS = 0x10,
171});
172
d9a64523
A
173typedef struct workq_threadreq_s {
174 union {
175 struct priority_queue_entry tr_entry;
cb323159 176 thread_t tr_thread;
d9a64523 177 };
cb323159
A
178 uint16_t tr_count;
179 workq_tr_flags_t tr_flags;
180 workq_tr_state_t tr_state;
181 thread_qos_t tr_qos; /* qos for the thread request */
d9a64523 182
cb323159
A
183 /* kqueue states, modified under the kqlock */
184 kq_index_t tr_kq_override_index; /* highest wakeup override index */
185 kq_index_t tr_kq_qos_index; /* QoS for the servicer */
186 bool tr_kq_wakeup; /* an event has fired */
187} workq_threadreq_s, *workq_threadreq_t;
d9a64523 188
cb323159 189TAILQ_HEAD(threadreq_head, workq_threadreq_s);
d9a64523
A
190
191#if defined(__LP64__)
192typedef unsigned __int128 wq_thactive_t;
193#else
194typedef uint64_t wq_thactive_t;
195#endif
196
cb323159 197__options_decl(workq_state_flags_t, uint32_t, {
d9a64523
A
198 WQ_EXITING = 0x0001,
199 WQ_PROC_SUSPENDED = 0x0002,
200 WQ_DEATH_CALL_SCHEDULED = 0x0004,
201
202 WQ_DELAYED_CALL_SCHEDULED = 0x0010,
203 WQ_DELAYED_CALL_PENDED = 0x0020,
204 WQ_IMMEDIATE_CALL_SCHEDULED = 0x0040,
205 WQ_IMMEDIATE_CALL_PENDED = 0x0080,
cb323159 206});
d9a64523
A
207
208TAILQ_HEAD(workq_uthread_head, uthread);
209
210struct workqueue {
0a7de745
A
211 thread_call_t wq_delayed_call;
212 thread_call_t wq_immediate_call;
d9a64523 213 thread_call_t wq_death_call;
cb323159
A
214
215 union {
216 struct turnstile *wq_turnstile;
217 struct mpsc_queue_chain wq_destroy_link;
218 };
d9a64523 219
0a7de745 220 lck_spin_t wq_lock;
d9a64523 221
0a7de745 222 uint64_t wq_thread_call_last_run;
d9a64523
A
223 struct os_refcnt wq_refcnt;
224 workq_state_flags_t _Atomic wq_flags;
0a7de745
A
225 uint32_t wq_fulfilled;
226 uint32_t wq_creations;
227 uint32_t wq_timer_interval;
228 uint32_t wq_event_manager_priority;
229 uint32_t wq_reqcount; /* number of elements on the wq_*_reqlists */
230 uint16_t wq_thdying_count;
231 uint16_t wq_threads_scheduled;
232 uint16_t wq_constrained_threads_scheduled;
233 uint16_t wq_nthreads;
234 uint16_t wq_thidlecount;
235 uint16_t wq_thscheduled_count[WORKQ_NUM_BUCKETS]; // incl. manager
d9a64523
A
236
237 _Atomic wq_thactive_t wq_thactive;
238 _Atomic uint64_t wq_lastblocked_ts[WORKQ_NUM_QOS_BUCKETS];
239
240 struct proc *wq_proc;
241 struct uthread *wq_creator;
cb323159 242 turnstile_inheritor_t wq_inheritor;
d9a64523
A
243 thread_t wq_turnstile_updater; // thread doing a turnstile_update_ineritor
244 struct workq_uthread_head wq_thrunlist;
245 struct workq_uthread_head wq_thnewlist;
246 struct workq_uthread_head wq_thidlelist;
247
248 struct priority_queue wq_overcommit_queue;
249 struct priority_queue wq_constrained_queue;
250 struct priority_queue wq_special_queue;
251 workq_threadreq_t wq_event_manager_threadreq;
252};
253
0a7de745
A
254#define WORKQUEUE_MAXTHREADS 512
255#define WQ_STALLED_WINDOW_USECS 200
256#define WQ_REDUCE_POOL_WINDOW_USECS 5000000
257#define WQ_MAX_TIMER_INTERVAL_USECS 50000
d9a64523
A
258
259#pragma mark definitions
260
cb323159 261struct workq_threadreq_s;
d9a64523
A
262uint32_t _get_pwq_state_kdp(proc_t p);
263
264void workq_exit(struct proc *p);
265void workq_mark_exiting(struct proc *p);
266
267bool workq_is_exiting(struct proc *p);
268
cb323159 269void workq_thread_set_max_qos(struct proc *p, struct workq_threadreq_s *kqr);
d9a64523
A
270
271void workq_thread_terminate(struct proc *p, struct uthread *uth);
272
cb323159
A
273__options_decl(workq_kern_threadreq_flags_t, uint32_t, {
274 WORKQ_THREADREQ_NONE = 0x00,
275 WORKQ_THREADREQ_SET_AST_ON_FAILURE = 0x01,
276 WORKQ_THREADREQ_ATTEMPT_REBIND = 0x02,
277 WORKQ_THREADREQ_CAN_CREATE_THREADS = 0x04,
278 WORKQ_THREADREQ_MAKE_OVERCOMMIT = 0x08,
279});
d9a64523
A
280
281// called with the kq req lock held
cb323159
A
282bool workq_kern_threadreq_initiate(struct proc *p, struct workq_threadreq_s *kqr,
283 struct turnstile *ts, thread_qos_t qos, workq_kern_threadreq_flags_t flags);
d9a64523
A
284
285// called with the kq req lock held
cb323159
A
286void workq_kern_threadreq_modify(struct proc *p, struct workq_threadreq_s *kqr,
287 thread_qos_t qos, workq_kern_threadreq_flags_t flags);
d9a64523
A
288
289// called with the kq req lock held
cb323159 290void workq_kern_threadreq_update_inheritor(struct proc *p, struct workq_threadreq_s *kqr,
0a7de745 291 thread_t owner, struct turnstile *ts, turnstile_update_flags_t flags);
d9a64523
A
292
293void workq_kern_threadreq_lock(struct proc *p);
294void workq_kern_threadreq_unlock(struct proc *p);
295
cb323159 296void workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags);
d9a64523 297
94ff46dc 298// This enum matches _pthread_set_flags in libpthread's qos_private.h
d9a64523 299enum workq_set_self_flags {
94ff46dc
A
300 WORKQ_SET_SELF_QOS_FLAG = 0x01,
301 WORKQ_SET_SELF_VOUCHER_FLAG = 0x02,
302 WORKQ_SET_SELF_FIXEDPRIORITY_FLAG = 0x04,
303 WORKQ_SET_SELF_TIMESHARE_FLAG = 0x08,
304 WORKQ_SET_SELF_WQ_KEVENT_UNBIND = 0x10,
305 WORKQ_SET_SELF_ALTERNATE_AMX = 0x20,
d9a64523
A
306};
307
308void workq_proc_suspended(struct proc *p);
309void workq_proc_resumed(struct proc *p);
310
311#endif // BSD_KERNEL_PRIVATE
312
313void workq_init(void);
314
315#endif // KERNEL
316
317#endif // _WORKQUEUE_INTERNAL_H_