]> git.saurik.com Git - apple/libpthread.git/blob - kern/kern_internal.h
libpthread-218.51.1.tar.gz
[apple/libpthread.git] / kern / kern_internal.h
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _SYS_PTHREAD_INTERNAL_H_
30 #define _SYS_PTHREAD_INTERNAL_H_
31
32 #ifdef KERNEL
33 #include <kern/thread_call.h>
34 #include <sys/pthread_shims.h>
35 #include <sys/queue.h>
36 #include <kern/kcdata.h>
37 #endif
38
39 #include "kern/synch_internal.h"
40 #include "kern/workqueue_internal.h"
41 #include "kern/kern_trace.h"
42 #include "pthread/qos.h"
43 #include "private/qos_private.h"
44
45 /* pthread userspace SPI feature checking, these constants are returned from bsdthread_register,
46 * as a bitmask, to inform userspace of the supported feature set. Old releases of OS X return
47 * from this call either zero or -1, allowing us to return a positive number for feature bits.
48 */
49 #define PTHREAD_FEATURE_DISPATCHFUNC 0x01 /* same as WQOPS_QUEUE_NEWSPISUPP, checks for dispatch function support */
50 #define PTHREAD_FEATURE_FINEPRIO 0x02 /* are fine grained prioirities available */
51 #define PTHREAD_FEATURE_BSDTHREADCTL 0x04 /* is the bsdthread_ctl syscall available */
52 #define PTHREAD_FEATURE_SETSELF 0x08 /* is the BSDTHREAD_CTL_SET_SELF command of bsdthread_ctl available */
53 #define PTHREAD_FEATURE_QOS_MAINTENANCE 0x10 /* is QOS_CLASS_MAINTENANCE available */
54 #define PTHREAD_FEATURE_RESERVED 0x20 /* burnt, shipped in OSX 10.11 & iOS 9 with partial kevent delivery support */
55 #define PTHREAD_FEATURE_KEVENT 0x40 /* supports direct kevent delivery */
56 #define PTHREAD_FEATURE_QOS_DEFAULT 0x40000000 /* the kernel supports QOS_CLASS_DEFAULT */
57
58 /* pthread bsdthread_ctl sysctl commands */
59 #define BSDTHREAD_CTL_SET_QOS 0x10 /* bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, thread_port, tsd_entry_addr, 0) */
60 #define BSDTHREAD_CTL_GET_QOS 0x20 /* bsdthread_ctl(BSDTHREAD_CTL_GET_QOS, thread_port, 0, 0) */
61 #define BSDTHREAD_CTL_QOS_OVERRIDE_START 0x40 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread_port, priority, 0) */
62 #define BSDTHREAD_CTL_QOS_OVERRIDE_END 0x80 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread_port, 0, 0) */
63 #define BSDTHREAD_CTL_SET_SELF 0x100 /* bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags) */
64 #define BSDTHREAD_CTL_QOS_OVERRIDE_RESET 0x200 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0) */
65 #define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH 0x400 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread_port, priority, 0) */
66 #define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */
67 #define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */
68
69 /* qos_class_t is mapped into one of these bits in the bitfield, this mapping now exists here because
70 * libdispatch requires the QoS class mask of the pthread_priority_t to be a bitfield.
71 */
72 #define __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE 0x20
73 #define __PTHREAD_PRIORITY_CBIT_USER_INITIATED 0x10
74 #define __PTHREAD_PRIORITY_CBIT_DEFAULT 0x8
75 #define __PTHREAD_PRIORITY_CBIT_UTILITY 0x4
76 #define __PTHREAD_PRIORITY_CBIT_BACKGROUND 0x2
77 #define __PTHREAD_PRIORITY_CBIT_MAINTENANCE 0x1
78 #define __PTHREAD_PRIORITY_CBIT_UNSPECIFIED 0x0
79
80 /* Added support for QOS_CLASS_MAINTENANCE */
81 static inline pthread_priority_t
82 _pthread_priority_make_newest(qos_class_t qc, int rel, unsigned long flags)
83 {
84 pthread_priority_t cls;
85 switch (qc) {
86 case QOS_CLASS_USER_INTERACTIVE: cls = __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE; break;
87 case QOS_CLASS_USER_INITIATED: cls = __PTHREAD_PRIORITY_CBIT_USER_INITIATED; break;
88 case QOS_CLASS_DEFAULT: cls = __PTHREAD_PRIORITY_CBIT_DEFAULT; break;
89 case QOS_CLASS_UTILITY: cls = __PTHREAD_PRIORITY_CBIT_UTILITY; break;
90 case QOS_CLASS_BACKGROUND: cls = __PTHREAD_PRIORITY_CBIT_BACKGROUND; break;
91 case QOS_CLASS_MAINTENANCE: cls = __PTHREAD_PRIORITY_CBIT_MAINTENANCE; break;
92 case QOS_CLASS_UNSPECIFIED:
93 default:
94 cls = __PTHREAD_PRIORITY_CBIT_UNSPECIFIED;
95 rel = 1; // results in priority bits == 0 <rdar://problem/16184900>
96 break;
97 }
98
99 pthread_priority_t p =
100 (flags & _PTHREAD_PRIORITY_FLAGS_MASK) |
101 ((cls << _PTHREAD_PRIORITY_QOS_CLASS_SHIFT) & _PTHREAD_PRIORITY_QOS_CLASS_MASK) |
102 (((uint8_t)rel - 1) & _PTHREAD_PRIORITY_PRIORITY_MASK);
103
104 return p;
105 }
106
107 /* Added support for QOS_CLASS_LEGACY and QOS_CLASS_INHERIT */
108 static inline pthread_priority_t
109 _pthread_priority_make_version2(qos_class_t qc, int rel, unsigned long flags)
110 {
111 pthread_priority_t cls;
112 switch (qc) {
113 case QOS_CLASS_USER_INTERACTIVE: cls = __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE; break;
114 case QOS_CLASS_USER_INITIATED: cls = __PTHREAD_PRIORITY_CBIT_USER_INITIATED; break;
115 case QOS_CLASS_DEFAULT: cls = __PTHREAD_PRIORITY_CBIT_DEFAULT; break;
116 case QOS_CLASS_UTILITY: cls = __PTHREAD_PRIORITY_CBIT_UTILITY; break;
117 case QOS_CLASS_BACKGROUND: cls = __PTHREAD_PRIORITY_CBIT_BACKGROUND; break;
118 case QOS_CLASS_UNSPECIFIED:
119 default:
120 cls = __PTHREAD_PRIORITY_CBIT_UNSPECIFIED;
121 rel = 1; // results in priority bits == 0 <rdar://problem/16184900>
122 break;
123 }
124
125 /*
126 * __PTHREAD_PRIORITY_CBIT_MAINTENANCE was defined as the 0th bit by shifting all the
127 * existing bits to the left by one. So for backward compatiblity for kernels that does
128 * not support QOS_CLASS_MAINTENANCE, we have to make it up by shifting the cls bit to
129 * right by one.
130 */
131 cls >>= 1;
132
133 pthread_priority_t p =
134 (flags & _PTHREAD_PRIORITY_FLAGS_MASK) |
135 ((cls << _PTHREAD_PRIORITY_QOS_CLASS_SHIFT) & _PTHREAD_PRIORITY_QOS_CLASS_MASK) |
136 (((uint8_t)rel - 1) & _PTHREAD_PRIORITY_PRIORITY_MASK);
137
138 return p;
139 }
140
141 /* QOS_CLASS_MAINTENANCE is supported */
142 static inline qos_class_t
143 _pthread_priority_get_qos_newest(pthread_priority_t priority)
144 {
145 qos_class_t qc;
146 switch ((priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT) {
147 case __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE: qc = QOS_CLASS_USER_INTERACTIVE; break;
148 case __PTHREAD_PRIORITY_CBIT_USER_INITIATED: qc = QOS_CLASS_USER_INITIATED; break;
149 case __PTHREAD_PRIORITY_CBIT_DEFAULT: qc = QOS_CLASS_DEFAULT; break;
150 case __PTHREAD_PRIORITY_CBIT_UTILITY: qc = QOS_CLASS_UTILITY; break;
151 case __PTHREAD_PRIORITY_CBIT_BACKGROUND: qc = QOS_CLASS_BACKGROUND; break;
152 case __PTHREAD_PRIORITY_CBIT_MAINTENANCE: qc = QOS_CLASS_MAINTENANCE; break;
153 case __PTHREAD_PRIORITY_CBIT_UNSPECIFIED:
154 default: qc = QOS_CLASS_UNSPECIFIED; break;
155 }
156 return qc;
157 }
158
159 /* QOS_CLASS_MAINTENANCE is not supported */
160 static inline qos_class_t
161 _pthread_priority_get_qos_version2(pthread_priority_t priority)
162 {
163 qos_class_t qc;
164 pthread_priority_t cls;
165
166 cls = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
167
168 /*
169 * __PTHREAD_PRIORITY_CBIT_MAINTENANCE was defined as the 0th bit by shifting all the
170 * existing bits to the left by one. So for backward compatiblity for kernels that does
171 * not support QOS_CLASS_MAINTENANCE, pthread_priority_make() shifted the cls bit to the
172 * right by one. Therefore we have to shift it back during decoding the priority bit.
173 */
174 cls <<= 1;
175
176 switch (cls) {
177 case __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE: qc = QOS_CLASS_USER_INTERACTIVE; break;
178 case __PTHREAD_PRIORITY_CBIT_USER_INITIATED: qc = QOS_CLASS_USER_INITIATED; break;
179 case __PTHREAD_PRIORITY_CBIT_DEFAULT: qc = QOS_CLASS_DEFAULT; break;
180 case __PTHREAD_PRIORITY_CBIT_UTILITY: qc = QOS_CLASS_UTILITY; break;
181 case __PTHREAD_PRIORITY_CBIT_BACKGROUND: qc = QOS_CLASS_BACKGROUND; break;
182 case __PTHREAD_PRIORITY_CBIT_UNSPECIFIED:
183 default: qc = QOS_CLASS_UNSPECIFIED; break;
184 }
185 return qc;
186 }
187
188 #define _pthread_priority_get_relpri(priority) \
189 ((int8_t)((priority & _PTHREAD_PRIORITY_PRIORITY_MASK) >> _PTHREAD_PRIORITY_PRIORITY_SHIFT) + 1)
190
191 #define _pthread_priority_get_flags(priority) \
192 (priority & _PTHREAD_PRIORITY_FLAGS_MASK)
193
194 #define _pthread_priority_split_newest(priority, qos, relpri) \
195 ({ qos = _pthread_priority_get_qos_newest(priority); \
196 relpri = (qos == QOS_CLASS_UNSPECIFIED) ? 0 : \
197 _pthread_priority_get_relpri(priority); \
198 })
199
200 #define _pthread_priority_split_version2(priority, qos, relpri) \
201 ({ qos = _pthread_priority_get_qos_version2(priority); \
202 relpri = (qos == QOS_CLASS_UNSPECIFIED) ? 0 : \
203 _pthread_priority_get_relpri(priority); \
204 })
205
206 /* <rdar://problem/15969976> Required for backward compatibility on older kernels. */
207 #define _pthread_priority_make_version1(qos, relpri, flags) \
208 (((flags >> 15) & 0xffff0000) | \
209 ((qos << 8) & 0x0000ff00) | \
210 (((uint8_t)relpri - 1) & 0x000000ff))
211
212 /* userspace <-> kernel registration struct, for passing data to/from the kext during main thread init. */
213 struct _pthread_registration_data {
214 /*
215 * version == sizeof(struct _pthread_registration_data)
216 *
217 * The structure can only grow, so we use its size as the version.
218 * Userspace initializes this to the size of its structure and the kext
219 * will copy out the version that was actually consumed.
220 *
221 * n.b. you must make sure the size of this structure isn't LP64-dependent
222 */
223 uint64_t version;
224
225 uint64_t dispatch_queue_offset; /* copy-in */
226 uint64_t /* pthread_priority_t */ main_qos; /* copy-out */
227 uint32_t tsd_offset; /* copy-in */
228 } __attribute__ ((packed));
229
230 #ifdef KERNEL
231
232 /* The set of features, from the feature bits above, that we support. */
233 #define PTHREAD_FEATURE_SUPPORTED ( \
234 PTHREAD_FEATURE_DISPATCHFUNC | \
235 PTHREAD_FEATURE_FINEPRIO | \
236 PTHREAD_FEATURE_BSDTHREADCTL | \
237 PTHREAD_FEATURE_SETSELF | \
238 PTHREAD_FEATURE_QOS_MAINTENANCE | \
239 PTHREAD_FEATURE_QOS_DEFAULT | \
240 PTHREAD_FEATURE_KEVENT )
241
242 extern pthread_callbacks_t pthread_kern;
243
244 struct ksyn_waitq_element {
245 TAILQ_ENTRY(ksyn_waitq_element) kwe_list; /* link to other list members */
246 void * kwe_kwqqueue; /* queue blocked on */
247 uint32_t kwe_state; /* state */
248 uint32_t kwe_lockseq; /* the sequence of the entry */
249 uint32_t kwe_count; /* upper bound on number of matches still pending */
250 uint32_t kwe_psynchretval; /* thread retval */
251 void *kwe_uth; /* uthread */
252 uint64_t kwe_tid; /* tid of waiter */
253 };
254 typedef struct ksyn_waitq_element * ksyn_waitq_element_t;
255
256 pthread_priority_t thread_qos_get_pthread_priority(int qos) __attribute__((const));
257 int thread_qos_get_class_index(int qos) __attribute__((const));
258 int pthread_priority_get_thread_qos(pthread_priority_t priority) __attribute__((const));
259 int pthread_priority_get_class_index(pthread_priority_t priority) __attribute__((const));
260 pthread_priority_t class_index_get_pthread_priority(int index) __attribute__((const));
261 int class_index_get_thread_qos(int index) __attribute__((const));
262 int qos_class_get_class_index(int qos) __attribute__((const));
263
264 #define PTH_DEFAULT_STACKSIZE 512*1024
265 #define MAX_PTHREAD_SIZE 64*1024
266
267 /* exported from the kernel but not present in any headers. */
268 extern thread_t port_name_to_thread(mach_port_name_t port_name);
269
270 /* function declarations for pthread_kext.c */
271 void pthread_init(void);
272 void psynch_zoneinit(void);
273 void _pth_proc_hashinit(proc_t p);
274 void _pth_proc_hashdelete(proc_t p);
275 void pth_global_hashinit(void);
276 void psynch_wq_cleanup(void*, void*);
277
278 void _pthread_init(void);
279 int _fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo);
280 uint32_t _get_pwq_state_kdp(proc_t p);
281 void _workqueue_exit(struct proc *p);
282 void _workqueue_mark_exiting(struct proc *p);
283 void _workqueue_thread_yielded(void);
284 sched_call_t _workqueue_get_sched_callback(void);
285
286 int _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcarg, user_addr_t user_stack, user_addr_t user_pthread, uint32_t flags, user_addr_t *retval);
287 int _bsdthread_register(struct proc *p, user_addr_t threadstart, user_addr_t wqthread, int pthsize, user_addr_t dummy_value, user_addr_t targetconc_ptr, uint64_t dispatchqueue_offset, int32_t *retval);
288 int _bsdthread_terminate(struct proc *p, user_addr_t stackaddr, size_t size, uint32_t kthport, uint32_t sem, int32_t *retval);
289 int _bsdthread_ctl_set_qos(struct proc *p, user_addr_t cmd, mach_port_name_t kport, user_addr_t tsd_priority_addr, user_addr_t arg3, int *retval);
290 int _bsdthread_ctl_set_self(struct proc *p, user_addr_t cmd, pthread_priority_t priority, mach_port_name_t voucher, _pthread_set_flags_t flags, int *retval);
291 int _bsdthread_ctl_qos_override_start(struct proc *p, user_addr_t cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int *retval);
292 int _bsdthread_ctl_qos_override_end(struct proc *p, user_addr_t cmd, mach_port_name_t kport, user_addr_t resource, user_addr_t arg3, int *retval);
293 int _bsdthread_ctl_qos_override_dispatch(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int __unused *retval);
294 int _bsdthread_ctl_qos_override_reset(struct proc __unused *p, user_addr_t __unused cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int __unused *retval);
295 int _bsdthread_ctl_qos_dispatch_asynchronous_override_add(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval);
296 int _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(struct proc __unused *p, user_addr_t __unused cmd, int reset_all, user_addr_t resource, user_addr_t arg3, int __unused *retval);
297 int _bsdthread_ctl(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval);
298 int _thread_selfid(__unused struct proc *p, uint64_t *retval);
299 int _workq_kernreturn(struct proc *p, int options, user_addr_t item, int arg2, int arg3, int32_t *retval);
300 int _workq_open(struct proc *p, int32_t *retval);
301
302 int _psynch_mutexwait(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t * retval);
303 int _psynch_mutexdrop(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t * retval);
304 int _psynch_cvbroad(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint64_t cvudgen, uint32_t flags, user_addr_t mutex, uint64_t mugen, uint64_t tid, uint32_t *retval);
305 int _psynch_cvsignal(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint32_t cvugen, int thread_port, user_addr_t mutex, uint64_t mugen, uint64_t tid, uint32_t flags, uint32_t * retval);
306 int _psynch_cvwait(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint32_t cvugen, user_addr_t mutex, uint64_t mugen, uint32_t flags, int64_t sec, uint32_t nsec, uint32_t * retval);
307 int _psynch_cvclrprepost(proc_t p, user_addr_t cv, uint32_t cvgen, uint32_t cvugen, uint32_t cvsgen, uint32_t prepocnt, uint32_t preposeq, uint32_t flags, int *retval);
308 int _psynch_rw_longrdlock(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t * retval);
309 int _psynch_rw_rdlock(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval);
310 int _psynch_rw_unlock(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval);
311 int _psynch_rw_wrlock(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval);
312 int _psynch_rw_yieldwrlock(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval);
313
314 void _pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo);
315 void * _pthread_get_thread_kwq(thread_t thread);
316
317 extern lck_grp_attr_t *pthread_lck_grp_attr;
318 extern lck_grp_t *pthread_lck_grp;
319 extern lck_attr_t *pthread_lck_attr;
320 extern lck_mtx_t *pthread_list_mlock;
321 extern thread_call_t psynch_thcall;
322
323 struct uthread* current_uthread(void);
324
325 // Call for the kernel's kevent system to request threads. A list of QoS/event
326 // counts should be provided, sorted by flags and then QoS class. If the
327 // identity of the thread to handle the request is known, it will be returned.
328 // If a new thread must be created, NULL will be returned.
329 thread_t _workq_reqthreads(struct proc *p, int requests_count,
330 workq_reqthreads_req_t requests);
331
332 // Resolve a pthread_priority_t to a QoS/relative pri
333 integer_t _thread_qos_from_pthread_priority(unsigned long pri, unsigned long *flags);
334 // Clear out extraneous flags/pri info for putting in voucher
335 pthread_priority_t _pthread_priority_canonicalize(pthread_priority_t pri, boolean_t for_propagation);
336
337 #endif // KERNEL
338
339 #endif /* _SYS_PTHREAD_INTERNAL_H_ */
340