]>
Commit | Line | Data |
---|---|---|
f1a1da6c A |
1 | /* |
2 | * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifndef _WORKQUEUE_INTERNAL_H_ | |
30 | #define _WORKQUEUE_INTERNAL_H_ | |
31 | ||
32 | /* These definitions are shared between the kext and userspace inside the pthread project. Consolidating | |
33 | * duplicate definitions that used to exist in both projects, when separate. | |
34 | */ | |
35 | ||
36 | /* workq_kernreturn commands */ | |
964d3577 A |
37 | #define WQOPS_THREAD_RETURN 0x04 /* parks the thread back into the kernel */ |
38 | #define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */ | |
39 | #define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */ | |
40 | #define WQOPS_QUEUE_REQTHREADS2 0x30 /* request a number of threads in a given priority bucket */ | |
41 | #define WQOPS_THREAD_KEVENT_RETURN 0x40 /* parks the thread after delivering the passed kevent array */ | |
42 | #define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80 /* max() in the provided priority in the the priority of the event manager */ | |
a0619f9c A |
43 | #define WQOPS_THREAD_WORKLOOP_RETURN 0x100 /* parks the thread after delivering the passed kevent array */ |
44 | #define WQOPS_SHOULD_NARROW 0x200 /* checks whether we should narrow our concurrency */ | |
f1a1da6c | 45 | |
2546420a A |
46 | /* flag values for upcall flags field, only 8 bits per struct threadlist */ |
47 | #define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff | |
48 | #define WQ_FLAG_THREAD_PRIOSHIFT 16 | |
49 | #define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */ | |
50 | #define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */ | |
51 | #define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */ | |
52 | #define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */ | |
53 | #define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */ | |
54 | #define WQ_FLAG_THREAD_TSD_BASE_SET 0x00200000 /* tsd base has already been set */ | |
a0619f9c | 55 | #define WQ_FLAG_THREAD_WORKLOOP 0x00400000 /* workloop thread */ |
2546420a | 56 | |
1feaeaac | 57 | #define WQ_THREAD_CLEANUP_QOS QOS_CLASS_DEFAULT |
f1a1da6c | 58 | |
a0619f9c A |
59 | #define WQ_KEVENT_LIST_LEN 16 // WORKQ_KEVENT_EVENT_BUFFER_LEN |
60 | #define WQ_KEVENT_DATA_SIZE (32 * 1024) | |
61 | ||
f1a1da6c A |
62 | /* These definitions are only available to the kext, to avoid bleeding constants and types across the boundary to |
63 | * the userspace library. | |
64 | */ | |
65 | #ifdef KERNEL | |
66 | ||
67 | /* These defines come from kern/thread.h but are XNU_KERNEL_PRIVATE so do not get | |
68 | * exported to kernel extensions. | |
69 | */ | |
70 | #define SCHED_CALL_BLOCK 0x1 | |
71 | #define SCHED_CALL_UNBLOCK 0x2 | |
72 | ||
73 | // kwe_state | |
74 | enum { | |
75 | KWE_THREAD_INWAIT = 1, | |
76 | KWE_THREAD_PREPOST, | |
77 | KWE_THREAD_BROADCAST, | |
78 | }; | |
79 | ||
80 | /* old workq priority scheme */ | |
81 | ||
82 | #define WORKQUEUE_HIGH_PRIOQUEUE 0 /* high priority queue */ | |
83 | #define WORKQUEUE_DEFAULT_PRIOQUEUE 1 /* default priority queue */ | |
84 | #define WORKQUEUE_LOW_PRIOQUEUE 2 /* low priority queue */ | |
85 | #define WORKQUEUE_BG_PRIOQUEUE 3 /* background priority queue */ | |
86 | ||
964d3577 A |
87 | #define WORKQUEUE_NUM_BUCKETS 7 |
88 | ||
89 | // Sometimes something gets passed a bucket number and we need a way to express | |
90 | // that it's actually the event manager. Use the (n+1)th bucket for that. | |
91 | #define WORKQUEUE_EVENT_MANAGER_BUCKET (WORKQUEUE_NUM_BUCKETS-1) | |
f1a1da6c A |
92 | |
93 | /* wq_max_constrained_threads = max(64, N_CPU * WORKQUEUE_CONSTRAINED_FACTOR) | |
94 | * This used to be WORKQUEUE_NUM_BUCKETS + 1 when NUM_BUCKETS was 4, yielding | |
95 | * N_CPU * 5. When NUM_BUCKETS changed, we decided that the limit should | |
96 | * not change. So the factor is now always 5. | |
97 | */ | |
98 | #define WORKQUEUE_CONSTRAINED_FACTOR 5 | |
99 | ||
100 | #define WORKQUEUE_OVERCOMMIT 0x10000 | |
101 | ||
2546420a A |
102 | /* |
103 | * A thread which is scheduled may read its own th_priority field without | |
104 | * taking the workqueue lock. Other fields should be assumed to require the | |
105 | * lock. | |
106 | */ | |
f1a1da6c A |
107 | struct threadlist { |
108 | TAILQ_ENTRY(threadlist) th_entry; | |
109 | thread_t th_thread; | |
f1a1da6c | 110 | struct workqueue *th_workq; |
f1a1da6c A |
111 | mach_vm_offset_t th_stackaddr; |
112 | mach_port_name_t th_thport; | |
2546420a A |
113 | uint16_t th_flags; |
114 | uint8_t th_upcall_flags; | |
115 | uint8_t th_priority; | |
f1a1da6c | 116 | }; |
f1a1da6c | 117 | |
a0619f9c A |
118 | #define TH_LIST_INITED 0x0001 /* Set at thread creation. */ |
119 | #define TH_LIST_RUNNING 0x0002 /* On thrunlist, not parked. */ | |
120 | #define TH_LIST_KEVENT 0x0004 /* Thread requested by kevent */ | |
121 | #define TH_LIST_NEW 0x0008 /* First return to userspace */ | |
122 | #define TH_LIST_BUSY 0x0010 /* Removed from idle list but not ready yet. */ | |
123 | #define TH_LIST_KEVENT_BOUND 0x0020 /* Thread bound to kqueues */ | |
124 | #define TH_LIST_CONSTRAINED 0x0040 /* Non-overcommit thread. */ | |
125 | #define TH_LIST_EVENT_MGR_SCHED_PRI 0x0080 /* Non-QoS Event Manager */ | |
126 | #define TH_LIST_UNBINDING 0x0100 /* Thread is unbinding during park */ | |
127 | #define TH_LIST_REMOVING_VOUCHER 0x0200 /* Thread is removing its voucher */ | |
128 | #define TH_LIST_PACING 0x0400 /* Thread is participating in pacing */ | |
129 | ||
130 | struct threadreq { | |
131 | TAILQ_ENTRY(threadreq) tr_entry; | |
132 | uint16_t tr_flags; | |
133 | uint8_t tr_state; | |
134 | uint8_t tr_priority; | |
135 | }; | |
136 | TAILQ_HEAD(threadreq_head, threadreq); | |
137 | ||
138 | #define TR_STATE_NEW 0 /* Not yet enqueued */ | |
139 | #define TR_STATE_WAITING 1 /* Waiting to be serviced - on reqlist */ | |
140 | #define TR_STATE_COMPLETE 2 /* Request handled - for caller to free */ | |
141 | #define TR_STATE_DEAD 3 | |
142 | ||
143 | #define TR_FLAG_KEVENT 0x01 | |
144 | #define TR_FLAG_OVERCOMMIT 0x02 | |
145 | #define TR_FLAG_ONSTACK 0x04 | |
146 | #define TR_FLAG_WORKLOOP 0x08 | |
147 | #define TR_FLAG_NO_PACING 0x10 | |
148 | ||
149 | #if defined(__LP64__) | |
150 | typedef unsigned __int128 wq_thactive_t; | |
151 | #else | |
152 | typedef uint64_t wq_thactive_t; | |
153 | #endif | |
f1a1da6c A |
154 | |
155 | struct workqueue { | |
156 | proc_t wq_proc; | |
157 | vm_map_t wq_map; | |
158 | task_t wq_task; | |
2546420a | 159 | |
2546420a | 160 | lck_spin_t wq_lock; |
2546420a A |
161 | |
162 | thread_call_t wq_atimer_delayed_call; | |
163 | thread_call_t wq_atimer_immediate_call; | |
164 | ||
a0619f9c | 165 | uint32_t _Atomic wq_flags; |
f1a1da6c | 166 | uint32_t wq_timer_interval; |
f1a1da6c A |
167 | uint32_t wq_threads_scheduled; |
168 | uint32_t wq_constrained_threads_scheduled; | |
169 | uint32_t wq_nthreads; | |
170 | uint32_t wq_thidlecount; | |
a0619f9c A |
171 | uint32_t wq_event_manager_priority; |
172 | uint8_t wq_lflags; // protected by wqueue lock | |
173 | uint8_t wq_paced; // protected by wqueue lock | |
174 | uint16_t __wq_unused; | |
2546420a | 175 | |
f1a1da6c A |
176 | TAILQ_HEAD(, threadlist) wq_thrunlist; |
177 | TAILQ_HEAD(, threadlist) wq_thidlelist; | |
2546420a | 178 | TAILQ_HEAD(, threadlist) wq_thidlemgrlist; |
964d3577 | 179 | |
a0619f9c A |
180 | uint32_t wq_reqcount; /* number of elements on the following lists */ |
181 | struct threadreq_head wq_overcommit_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET]; | |
182 | struct threadreq_head wq_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET]; | |
183 | struct threadreq wq_event_manager_threadreq; | |
964d3577 | 184 | |
a0619f9c | 185 | struct threadreq *wq_cached_threadreq; |
2546420a | 186 | |
a0619f9c A |
187 | uint16_t wq_thscheduled_count[WORKQUEUE_NUM_BUCKETS]; |
188 | _Atomic wq_thactive_t wq_thactive; | |
189 | _Atomic uint64_t wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS]; | |
190 | }; | |
191 | #define WQ_EXITING 0x01 | |
192 | #define WQ_ATIMER_DELAYED_RUNNING 0x02 | |
193 | #define WQ_ATIMER_IMMEDIATE_RUNNING 0x04 | |
f1a1da6c A |
194 | |
195 | #define WQL_ATIMER_BUSY 0x01 | |
196 | #define WQL_ATIMER_WAITING 0x02 | |
f1a1da6c A |
197 | |
198 | #define WORKQUEUE_MAXTHREADS 512 | |
f1a1da6c A |
199 | #define WQ_STALLED_WINDOW_USECS 200 |
200 | #define WQ_REDUCE_POOL_WINDOW_USECS 5000000 | |
201 | #define WQ_MAX_TIMER_INTERVAL_USECS 50000 | |
202 | ||
a0619f9c A |
203 | #define WQ_THREADLIST_EXITING_POISON (void *)~0ul |
204 | ||
f1a1da6c A |
205 | #endif // KERNEL |
206 | ||
207 | #endif // _WORKQUEUE_INTERNAL_H_ |