]>
Commit | Line | Data |
---|---|---|
f1a1da6c A |
1 | /* |
2 | * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifndef _WORKQUEUE_INTERNAL_H_ | |
30 | #define _WORKQUEUE_INTERNAL_H_ | |
31 | ||
32 | /* These definitions are shared between the kext and userspace inside the pthread project. Consolidating | |
33 | * duplicate definitions that used to exist in both projects, when separate. | |
34 | */ | |
35 | ||
36 | /* workq_kernreturn commands */ | |
964d3577 A |
37 | #define WQOPS_THREAD_RETURN 0x04 /* parks the thread back into the kernel */ |
38 | #define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */ | |
39 | #define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */ | |
40 | #define WQOPS_QUEUE_REQTHREADS2 0x30 /* request a number of threads in a given priority bucket */ | |
41 | #define WQOPS_THREAD_KEVENT_RETURN 0x40 /* parks the thread after delivering the passed kevent array */ | |
42 | #define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80 /* max() in the provided priority in the the priority of the event manager */ | |
f1a1da6c | 43 | |
2546420a A |
44 | /* flag values for upcall flags field, only 8 bits per struct threadlist */ |
45 | #define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff | |
46 | #define WQ_FLAG_THREAD_PRIOSHIFT 16 | |
47 | #define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */ | |
48 | #define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */ | |
49 | #define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */ | |
50 | #define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */ | |
51 | #define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */ | |
52 | #define WQ_FLAG_THREAD_TSD_BASE_SET 0x00200000 /* tsd base has already been set */ | |
53 | ||
1feaeaac | 54 | #define WQ_THREAD_CLEANUP_QOS QOS_CLASS_DEFAULT |
f1a1da6c A |
55 | |
56 | /* These definitions are only available to the kext, to avoid bleeding constants and types across the boundary to | |
57 | * the userspace library. | |
58 | */ | |
59 | #ifdef KERNEL | |
60 | ||
61 | /* These defines come from kern/thread.h but are XNU_KERNEL_PRIVATE so do not get | |
62 | * exported to kernel extensions. | |
63 | */ | |
64 | #define SCHED_CALL_BLOCK 0x1 | |
65 | #define SCHED_CALL_UNBLOCK 0x2 | |
66 | ||
67 | // kwe_state | |
68 | enum { | |
69 | KWE_THREAD_INWAIT = 1, | |
70 | KWE_THREAD_PREPOST, | |
71 | KWE_THREAD_BROADCAST, | |
72 | }; | |
73 | ||
74 | /* old workq priority scheme */ | |
75 | ||
76 | #define WORKQUEUE_HIGH_PRIOQUEUE 0 /* high priority queue */ | |
77 | #define WORKQUEUE_DEFAULT_PRIOQUEUE 1 /* default priority queue */ | |
78 | #define WORKQUEUE_LOW_PRIOQUEUE 2 /* low priority queue */ | |
79 | #define WORKQUEUE_BG_PRIOQUEUE 3 /* background priority queue */ | |
80 | ||
964d3577 A |
81 | #define WORKQUEUE_NUM_BUCKETS 7 |
82 | ||
83 | // Sometimes something gets passed a bucket number and we need a way to express | |
84 | // that it's actually the event manager. Use the (n+1)th bucket for that. | |
85 | #define WORKQUEUE_EVENT_MANAGER_BUCKET (WORKQUEUE_NUM_BUCKETS-1) | |
f1a1da6c A |
86 | |
87 | /* wq_max_constrained_threads = max(64, N_CPU * WORKQUEUE_CONSTRAINED_FACTOR) | |
88 | * This used to be WORKQUEUE_NUM_BUCKETS + 1 when NUM_BUCKETS was 4, yielding | |
89 | * N_CPU * 5. When NUM_BUCKETS changed, we decided that the limit should | |
90 | * not change. So the factor is now always 5. | |
91 | */ | |
92 | #define WORKQUEUE_CONSTRAINED_FACTOR 5 | |
93 | ||
94 | #define WORKQUEUE_OVERCOMMIT 0x10000 | |
95 | ||
2546420a A |
96 | /* |
97 | * A thread which is scheduled may read its own th_priority field without | |
98 | * taking the workqueue lock. Other fields should be assumed to require the | |
99 | * lock. | |
100 | */ | |
f1a1da6c A |
101 | struct threadlist { |
102 | TAILQ_ENTRY(threadlist) th_entry; | |
103 | thread_t th_thread; | |
f1a1da6c | 104 | struct workqueue *th_workq; |
f1a1da6c A |
105 | mach_vm_offset_t th_stackaddr; |
106 | mach_port_name_t th_thport; | |
2546420a A |
107 | uint16_t th_flags; |
108 | uint8_t th_upcall_flags; | |
109 | uint8_t th_priority; | |
f1a1da6c | 110 | }; |
f1a1da6c | 111 | |
2546420a A |
112 | #define TH_LIST_INITED 0x01 /* Set at thread creation. */ |
113 | #define TH_LIST_RUNNING 0x02 /* On thrunlist, not parked. */ | |
114 | #define TH_LIST_KEVENT 0x04 /* Thread requested by kevent */ | |
115 | #define TH_LIST_NEW 0x08 /* First return to userspace */ | |
116 | #define TH_LIST_BUSY 0x10 /* Removed from idle list but not ready yet. */ | |
117 | #define TH_LIST_KEVENT_BOUND 0x20 /* Thread bound to kqueues */ | |
118 | #define TH_LIST_CONSTRAINED 0x40 /* Non-overcommit thread. */ | |
119 | #define TH_LIST_EVENT_MGR_SCHED_PRI 0x80 /* Non-QoS Event Manager */ | |
f1a1da6c A |
120 | |
121 | struct workqueue { | |
122 | proc_t wq_proc; | |
123 | vm_map_t wq_map; | |
124 | task_t wq_task; | |
2546420a A |
125 | |
126 | _Atomic uint32_t wq_flags; // updated atomically | |
127 | uint32_t wq_lflags; // protected by wqueue lock | |
128 | ||
129 | lck_spin_t wq_lock; | |
130 | boolean_t wq_interrupt_state; | |
131 | ||
132 | thread_call_t wq_atimer_delayed_call; | |
133 | thread_call_t wq_atimer_immediate_call; | |
134 | ||
135 | uint64_t wq_thread_yielded_timestamp; | |
f1a1da6c A |
136 | uint32_t wq_thread_yielded_count; |
137 | uint32_t wq_timer_interval; | |
138 | uint32_t wq_max_concurrency; | |
139 | uint32_t wq_threads_scheduled; | |
140 | uint32_t wq_constrained_threads_scheduled; | |
141 | uint32_t wq_nthreads; | |
142 | uint32_t wq_thidlecount; | |
2546420a | 143 | |
f1a1da6c A |
144 | TAILQ_HEAD(, threadlist) wq_thrunlist; |
145 | TAILQ_HEAD(, threadlist) wq_thidlelist; | |
2546420a | 146 | TAILQ_HEAD(, threadlist) wq_thidlemgrlist; |
964d3577 A |
147 | |
148 | /* Counters for how many requests we have outstanding. The invariants here: | |
149 | * - reqcount == SUM(requests) + (event manager ? 1 : 0) | |
150 | * - SUM(ocrequests) + SUM(kevent_requests) + SUM(kevent_ocrequests) <= SUM(requests) | |
151 | * - # of constrained requests is difference between quantities above | |
2546420a | 152 | * i.e. a kevent+overcommit request will increment reqcount, requests and |
964d3577 A |
153 | * kevent_ocrequests only. |
154 | */ | |
155 | uint32_t wq_reqcount; | |
f1a1da6c A |
156 | uint16_t wq_requests[WORKQUEUE_NUM_BUCKETS]; |
157 | uint16_t wq_ocrequests[WORKQUEUE_NUM_BUCKETS]; | |
964d3577 A |
158 | uint16_t wq_kevent_requests[WORKQUEUE_NUM_BUCKETS]; |
159 | uint16_t wq_kevent_ocrequests[WORKQUEUE_NUM_BUCKETS]; | |
160 | ||
f1a1da6c A |
161 | uint16_t wq_reqconc[WORKQUEUE_NUM_BUCKETS]; /* requested concurrency for each priority level */ |
162 | uint16_t wq_thscheduled_count[WORKQUEUE_NUM_BUCKETS]; | |
163 | uint32_t wq_thactive_count[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(4))); /* must be uint32_t since we OSAddAtomic on these */ | |
964d3577 A |
164 | uint64_t wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(8))); /* XXX: why per bucket? */ |
165 | ||
2546420a | 166 | uint32_t wq_event_manager_priority; |
f1a1da6c A |
167 | }; |
168 | #define WQ_LIST_INITED 0x01 | |
2546420a A |
169 | #define WQ_EXITING 0x02 |
170 | #define WQ_ATIMER_DELAYED_RUNNING 0x04 | |
171 | #define WQ_ATIMER_IMMEDIATE_RUNNING 0x08 | |
172 | ||
173 | #define WQ_SETFLAG(wq, flag) __c11_atomic_fetch_or(&wq->wq_flags, flag, __ATOMIC_SEQ_CST) | |
174 | #define WQ_UNSETFLAG(wq, flag) __c11_atomic_fetch_and(&wq->wq_flags, ~flag, __ATOMIC_SEQ_CST) | |
f1a1da6c A |
175 | |
176 | #define WQL_ATIMER_BUSY 0x01 | |
177 | #define WQL_ATIMER_WAITING 0x02 | |
f1a1da6c A |
178 | |
179 | #define WORKQUEUE_MAXTHREADS 512 | |
180 | #define WQ_YIELDED_THRESHOLD 2000 | |
181 | #define WQ_YIELDED_WINDOW_USECS 30000 | |
182 | #define WQ_STALLED_WINDOW_USECS 200 | |
183 | #define WQ_REDUCE_POOL_WINDOW_USECS 5000000 | |
184 | #define WQ_MAX_TIMER_INTERVAL_USECS 50000 | |
185 | ||
186 | #endif // KERNEL | |
187 | ||
188 | #endif // _WORKQUEUE_INTERNAL_H_ |