]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
1c79356b A |
28 | |
29 | #include <mach/mach_types.h> | |
91447636 | 30 | #include <mach/thread_act.h> |
1c79356b | 31 | |
91447636 | 32 | #include <kern/kern_types.h> |
c910b4d9 | 33 | #include <kern/zalloc.h> |
1c79356b A |
34 | #include <kern/sched_prim.h> |
35 | #include <kern/clock.h> | |
36 | #include <kern/task.h> | |
37 | #include <kern/thread.h> | |
91447636 A |
38 | #include <kern/wait_queue.h> |
39 | ||
40 | #include <vm/vm_pageout.h> | |
1c79356b A |
41 | |
42 | #include <kern/thread_call.h> | |
43 | #include <kern/call_entry.h> | |
1c79356b A |
44 | #include <kern/timer_call.h> |
45 | ||
316670eb A |
46 | #include <libkern/OSAtomic.h> |
47 | ||
55e303ae | 48 | #include <sys/kdebug.h> |
4b17d6b6 A |
49 | #if CONFIG_DTRACE |
50 | #include <mach/sdt.h> | |
51 | #endif | |
1c79356b | 52 | |
316670eb A |
53 | static zone_t thread_call_zone; |
54 | static struct wait_queue daemon_wqueue; | |
1c79356b | 55 | |
c910b4d9 A |
56 | struct thread_call_group { |
57 | queue_head_t pending_queue; | |
6d2010ae | 58 | uint32_t pending_count; |
1c79356b | 59 | |
c910b4d9 | 60 | queue_head_t delayed_queue; |
316670eb | 61 | uint32_t delayed_count; |
1c79356b | 62 | |
c910b4d9 | 63 | timer_call_data_t delayed_timer; |
316670eb | 64 | timer_call_data_t dealloc_timer; |
1c79356b | 65 | |
c910b4d9 | 66 | struct wait_queue idle_wqueue; |
6d2010ae | 67 | uint32_t idle_count, active_count; |
1c79356b | 68 | |
316670eb A |
69 | integer_t pri; |
70 | uint32_t target_thread_count; | |
71 | uint64_t idle_timestamp; | |
c910b4d9 | 72 | |
316670eb A |
73 | uint32_t flags; |
74 | sched_call_t sched_call; | |
75 | }; | |
c910b4d9 | 76 | |
316670eb | 77 | typedef struct thread_call_group *thread_call_group_t; |
c910b4d9 | 78 | |
316670eb A |
79 | #define TCG_PARALLEL 0x01 |
80 | #define TCG_DEALLOC_ACTIVE 0x02 | |
81 | ||
82 | #define THREAD_CALL_GROUP_COUNT 4 | |
83 | #define THREAD_CALL_THREAD_MIN 4 | |
84 | #define INTERNAL_CALL_COUNT 768 | |
85 | #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */ | |
86 | #define THREAD_CALL_ADD_RATIO 4 | |
87 | #define THREAD_CALL_MACH_FACTOR_CAP 3 | |
88 | ||
89 | static struct thread_call_group thread_call_groups[THREAD_CALL_GROUP_COUNT]; | |
90 | static boolean_t thread_call_daemon_awake; | |
91 | static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; | |
92 | static queue_head_t thread_call_internal_queue; | |
93 | static uint64_t thread_call_dealloc_interval_abs; | |
94 | ||
95 | static __inline__ thread_call_t _internal_call_allocate(void); | |
96 | static __inline__ void _internal_call_release(thread_call_t call); | |
97 | static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); | |
98 | static __inline__ boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline); | |
99 | static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); | |
100 | static __inline__ void thread_call_wake(thread_call_group_t group); | |
101 | static __inline__ void _set_delayed_call_timer(thread_call_t call, thread_call_group_t group); | |
102 | static boolean_t _remove_from_pending_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all); | |
103 | static boolean_t _remove_from_delayed_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all); | |
104 | static void thread_call_daemon(void *arg); | |
105 | static void thread_call_thread(thread_call_group_t group, wait_result_t wres); | |
106 | extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); | |
107 | static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); | |
108 | static void thread_call_group_setup(thread_call_group_t group, thread_call_priority_t pri, uint32_t target_thread_count, boolean_t parallel); | |
109 | static void sched_call_thread(int type, thread_t thread); | |
110 | static void thread_call_start_deallocate_timer(thread_call_group_t group); | |
111 | static void thread_call_wait_locked(thread_call_t call); | |
1c79356b A |
112 | |
113 | #define qe(x) ((queue_entry_t)(x)) | |
114 | #define TC(x) ((thread_call_t)(x)) | |
115 | ||
6d2010ae A |
116 | |
117 | lck_grp_t thread_call_queues_lck_grp; | |
118 | lck_grp_t thread_call_lck_grp; | |
119 | lck_attr_t thread_call_lck_attr; | |
120 | lck_grp_attr_t thread_call_lck_grp_attr; | |
121 | ||
122 | #if defined(__i386__) || defined(__x86_64__) | |
123 | lck_mtx_t thread_call_lock_data; | |
124 | #else | |
125 | lck_spin_t thread_call_lock_data; | |
126 | #endif | |
127 | ||
316670eb | 128 | |
6d2010ae A |
129 | #define thread_call_lock_spin() \ |
130 | lck_mtx_lock_spin_always(&thread_call_lock_data) | |
131 | ||
132 | #define thread_call_unlock() \ | |
133 | lck_mtx_unlock_always(&thread_call_lock_data) | |
134 | ||
135 | ||
316670eb A |
136 | static inline spl_t |
137 | disable_ints_and_lock(void) | |
138 | { | |
139 | spl_t s; | |
140 | ||
141 | s = splsched(); | |
142 | thread_call_lock_spin(); | |
143 | ||
144 | return s; | |
145 | } | |
146 | ||
147 | static inline void | |
148 | enable_ints_and_unlock(void) | |
149 | { | |
150 | thread_call_unlock(); | |
151 | (void)spllo(); | |
152 | } | |
153 | ||
154 | ||
155 | static inline boolean_t | |
156 | group_isparallel(thread_call_group_t group) | |
157 | { | |
158 | return ((group->flags & TCG_PARALLEL) != 0); | |
159 | } | |
160 | ||
161 | static boolean_t | |
162 | thread_call_group_should_add_thread(thread_call_group_t group) | |
163 | { | |
164 | uint32_t thread_count; | |
165 | ||
166 | if (!group_isparallel(group)) { | |
167 | if (group->pending_count > 0 && group->active_count == 0) { | |
168 | return TRUE; | |
169 | } | |
170 | ||
171 | return FALSE; | |
172 | } | |
173 | ||
174 | if (group->pending_count > 0) { | |
175 | if (group->idle_count > 0) { | |
176 | panic("Pending work, but threads are idle?"); | |
177 | } | |
178 | ||
179 | thread_count = group->active_count; | |
180 | ||
181 | /* | |
182 | * Add a thread if either there are no threads, | |
183 | * the group has fewer than its target number of | |
184 | * threads, or the amount of work is large relative | |
185 | * to the number of threads. In the last case, pay attention | |
186 | * to the total load on the system, and back off if | |
187 | * it's high. | |
188 | */ | |
189 | if ((thread_count == 0) || | |
190 | (thread_count < group->target_thread_count) || | |
191 | ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && | |
192 | (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { | |
193 | return TRUE; | |
194 | } | |
195 | } | |
196 | ||
197 | return FALSE; | |
198 | } | |
199 | ||
200 | static inline integer_t | |
201 | thread_call_priority_to_sched_pri(thread_call_priority_t pri) | |
202 | { | |
203 | switch (pri) { | |
204 | case THREAD_CALL_PRIORITY_HIGH: | |
205 | return BASEPRI_PREEMPT; | |
206 | case THREAD_CALL_PRIORITY_KERNEL: | |
207 | return BASEPRI_KERNEL; | |
208 | case THREAD_CALL_PRIORITY_USER: | |
209 | return BASEPRI_DEFAULT; | |
210 | case THREAD_CALL_PRIORITY_LOW: | |
211 | return DEPRESSPRI; | |
212 | default: | |
213 | panic("Invalid priority."); | |
214 | } | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | /* Lock held */ | |
220 | static inline thread_call_group_t | |
221 | thread_call_get_group( | |
222 | thread_call_t call) | |
223 | { | |
224 | thread_call_priority_t pri = call->tc_pri; | |
225 | ||
226 | assert(pri == THREAD_CALL_PRIORITY_LOW || | |
227 | pri == THREAD_CALL_PRIORITY_USER || | |
228 | pri == THREAD_CALL_PRIORITY_KERNEL || | |
229 | pri == THREAD_CALL_PRIORITY_HIGH); | |
230 | ||
231 | return &thread_call_groups[pri]; | |
232 | } | |
233 | ||
234 | static void | |
235 | thread_call_group_setup( | |
236 | thread_call_group_t group, | |
237 | thread_call_priority_t pri, | |
238 | uint32_t target_thread_count, | |
239 | boolean_t parallel) | |
240 | { | |
241 | queue_init(&group->pending_queue); | |
242 | queue_init(&group->delayed_queue); | |
243 | ||
244 | timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); | |
245 | timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); | |
246 | ||
247 | wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); | |
248 | ||
249 | group->target_thread_count = target_thread_count; | |
250 | group->pri = thread_call_priority_to_sched_pri(pri); | |
251 | ||
252 | group->sched_call = sched_call_thread; | |
253 | if (parallel) { | |
254 | group->flags |= TCG_PARALLEL; | |
255 | group->sched_call = NULL; | |
256 | } | |
257 | } | |
258 | ||
259 | /* | |
260 | * Simple wrapper for creating threads bound to | |
261 | * thread call groups. | |
262 | */ | |
263 | static kern_return_t | |
264 | thread_call_thread_create( | |
265 | thread_call_group_t group) | |
266 | { | |
267 | thread_t thread; | |
268 | kern_return_t result; | |
269 | ||
270 | result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, group->pri, &thread); | |
271 | if (result != KERN_SUCCESS) { | |
272 | return result; | |
273 | } | |
274 | ||
275 | if (group->pri < BASEPRI_PREEMPT) { | |
276 | /* | |
277 | * New style doesn't get to run to completion in | |
278 | * kernel if there are higher priority threads | |
279 | * available. | |
280 | */ | |
281 | thread_set_eager_preempt(thread); | |
282 | } | |
283 | ||
284 | thread_deallocate(thread); | |
285 | return KERN_SUCCESS; | |
286 | } | |
287 | ||
1c79356b | 288 | /* |
c910b4d9 | 289 | * thread_call_initialize: |
1c79356b | 290 | * |
c910b4d9 A |
291 | * Initialize this module, called |
292 | * early during system initialization. | |
1c79356b | 293 | */ |
1c79356b A |
294 | void |
295 | thread_call_initialize(void) | |
296 | { | |
6d2010ae | 297 | thread_call_t call; |
c910b4d9 | 298 | kern_return_t result; |
316670eb A |
299 | thread_t thread; |
300 | int i; | |
c910b4d9 A |
301 | |
302 | i = sizeof (thread_call_data_t); | |
303 | thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); | |
6d2010ae | 304 | zone_change(thread_call_zone, Z_CALLERACCT, FALSE); |
0b4c1975 | 305 | zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); |
1c79356b | 306 | |
6d2010ae A |
307 | lck_attr_setdefault(&thread_call_lck_attr); |
308 | lck_grp_attr_setdefault(&thread_call_lck_grp_attr); | |
309 | lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); | |
310 | lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); | |
1c79356b | 311 | |
6d2010ae A |
312 | #if defined(__i386__) || defined(__x86_64__) |
313 | lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); | |
314 | #else | |
315 | lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); | |
316 | #endif | |
1c79356b | 317 | |
316670eb A |
318 | nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); |
319 | wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO); | |
c910b4d9 | 320 | |
316670eb A |
321 | thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE); |
322 | thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE); | |
323 | thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE); | |
324 | thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE); | |
1c79356b | 325 | |
316670eb | 326 | disable_ints_and_lock(); |
c910b4d9 | 327 | |
6d2010ae A |
328 | queue_init(&thread_call_internal_queue); |
329 | for ( | |
316670eb A |
330 | call = internal_call_storage; |
331 | call < &internal_call_storage[INTERNAL_CALL_COUNT]; | |
1c79356b A |
332 | call++) { |
333 | ||
c910b4d9 | 334 | enqueue_tail(&thread_call_internal_queue, qe(call)); |
6d2010ae | 335 | } |
1c79356b | 336 | |
c910b4d9 | 337 | thread_call_daemon_awake = TRUE; |
1c79356b | 338 | |
316670eb | 339 | enable_ints_and_unlock(); |
1c79356b | 340 | |
316670eb | 341 | result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread); |
91447636 A |
342 | if (result != KERN_SUCCESS) |
343 | panic("thread_call_initialize"); | |
344 | ||
345 | thread_deallocate(thread); | |
1c79356b A |
346 | } |
347 | ||
348 | void | |
349 | thread_call_setup( | |
350 | thread_call_t call, | |
351 | thread_call_func_t func, | |
c910b4d9 | 352 | thread_call_param_t param0) |
1c79356b | 353 | { |
316670eb A |
354 | bzero(call, sizeof(*call)); |
355 | call_entry_setup((call_entry_t)call, func, param0); | |
356 | call->tc_pri = THREAD_CALL_PRIORITY_HIGH; /* Default priority */ | |
1c79356b A |
357 | } |
358 | ||
359 | /* | |
c910b4d9 | 360 | * _internal_call_allocate: |
1c79356b | 361 | * |
c910b4d9 | 362 | * Allocate an internal callout entry. |
1c79356b | 363 | * |
c910b4d9 | 364 | * Called with thread_call_lock held. |
1c79356b | 365 | */ |
1c79356b A |
366 | static __inline__ thread_call_t |
367 | _internal_call_allocate(void) | |
368 | { | |
369 | thread_call_t call; | |
370 | ||
c910b4d9 | 371 | if (queue_empty(&thread_call_internal_queue)) |
1c79356b A |
372 | panic("_internal_call_allocate"); |
373 | ||
c910b4d9 | 374 | call = TC(dequeue_head(&thread_call_internal_queue)); |
1c79356b A |
375 | |
376 | return (call); | |
377 | } | |
378 | ||
379 | /* | |
c910b4d9 | 380 | * _internal_call_release: |
1c79356b | 381 | * |
c910b4d9 A |
382 | * Release an internal callout entry which |
383 | * is no longer pending (or delayed). | |
1c79356b | 384 | * |
c910b4d9 | 385 | * Called with thread_call_lock held. |
1c79356b | 386 | */ |
c910b4d9 | 387 | static __inline__ void |
1c79356b | 388 | _internal_call_release( |
c910b4d9 | 389 | thread_call_t call) |
1c79356b A |
390 | { |
391 | if ( call >= internal_call_storage && | |
316670eb | 392 | call < &internal_call_storage[INTERNAL_CALL_COUNT] ) |
c910b4d9 | 393 | enqueue_head(&thread_call_internal_queue, qe(call)); |
1c79356b A |
394 | } |
395 | ||
396 | /* | |
c910b4d9 | 397 | * _pending_call_enqueue: |
1c79356b | 398 | * |
c910b4d9 A |
399 | * Place an entry at the end of the |
400 | * pending queue, to be executed soon. | |
1c79356b | 401 | * |
c910b4d9 A |
402 | * Returns TRUE if the entry was already |
403 | * on a queue. | |
1c79356b | 404 | * |
c910b4d9 | 405 | * Called with thread_call_lock held. |
1c79356b | 406 | */ |
c910b4d9 | 407 | static __inline__ boolean_t |
1c79356b | 408 | _pending_call_enqueue( |
c910b4d9 A |
409 | thread_call_t call, |
410 | thread_call_group_t group) | |
1c79356b | 411 | { |
6d2010ae | 412 | queue_head_t *old_queue; |
1c79356b | 413 | |
316670eb A |
414 | old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); |
415 | ||
416 | if (old_queue == NULL) { | |
417 | call->tc_submit_count++; | |
418 | } | |
1c79356b | 419 | |
c910b4d9 | 420 | group->pending_count++; |
1c79356b | 421 | |
316670eb A |
422 | thread_call_wake(group); |
423 | ||
c910b4d9 | 424 | return (old_queue != NULL); |
1c79356b A |
425 | } |
426 | ||
427 | /* | |
c910b4d9 | 428 | * _delayed_call_enqueue: |
1c79356b | 429 | * |
c910b4d9 A |
430 | * Place an entry on the delayed queue, |
431 | * after existing entries with an earlier | |
432 | * (or identical) deadline. | |
1c79356b | 433 | * |
c910b4d9 A |
434 | * Returns TRUE if the entry was already |
435 | * on a queue. | |
1c79356b | 436 | * |
c910b4d9 | 437 | * Called with thread_call_lock held. |
1c79356b | 438 | */ |
c910b4d9 | 439 | static __inline__ boolean_t |
1c79356b | 440 | _delayed_call_enqueue( |
316670eb | 441 | thread_call_t call, |
c910b4d9 | 442 | thread_call_group_t group, |
6d2010ae | 443 | uint64_t deadline) |
1c79356b | 444 | { |
6d2010ae | 445 | queue_head_t *old_queue; |
1c79356b | 446 | |
316670eb | 447 | old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline); |
c910b4d9 A |
448 | |
449 | if (old_queue == &group->pending_queue) | |
450 | group->pending_count--; | |
316670eb A |
451 | else if (old_queue == NULL) |
452 | call->tc_submit_count++; | |
c910b4d9 A |
453 | |
454 | return (old_queue != NULL); | |
1c79356b A |
455 | } |
456 | ||
457 | /* | |
c910b4d9 | 458 | * _call_dequeue: |
1c79356b | 459 | * |
c910b4d9 | 460 | * Remove an entry from a queue. |
1c79356b | 461 | * |
c910b4d9 | 462 | * Returns TRUE if the entry was on a queue. |
1c79356b | 463 | * |
c910b4d9 | 464 | * Called with thread_call_lock held. |
1c79356b | 465 | */ |
c910b4d9 A |
466 | static __inline__ boolean_t |
467 | _call_dequeue( | |
468 | thread_call_t call, | |
469 | thread_call_group_t group) | |
1c79356b | 470 | { |
6d2010ae | 471 | queue_head_t *old_queue; |
c910b4d9 | 472 | |
316670eb | 473 | old_queue = call_entry_dequeue(CE(call)); |
c910b4d9 | 474 | |
316670eb A |
475 | if (old_queue != NULL) { |
476 | call->tc_finish_count++; | |
477 | if (old_queue == &group->pending_queue) | |
478 | group->pending_count--; | |
479 | } | |
c910b4d9 A |
480 | |
481 | return (old_queue != NULL); | |
1c79356b A |
482 | } |
483 | ||
484 | /* | |
c910b4d9 | 485 | * _set_delayed_call_timer: |
1c79356b | 486 | * |
c910b4d9 A |
487 | * Reset the timer so that it |
488 | * next expires when the entry is due. | |
1c79356b | 489 | * |
c910b4d9 | 490 | * Called with thread_call_lock held. |
1c79356b | 491 | */ |
1c79356b A |
492 | static __inline__ void |
493 | _set_delayed_call_timer( | |
c910b4d9 A |
494 | thread_call_t call, |
495 | thread_call_group_t group) | |
1c79356b | 496 | { |
316670eb | 497 | timer_call_enter(&group->delayed_timer, call->tc_call.deadline, 0); |
1c79356b A |
498 | } |
499 | ||
500 | /* | |
c910b4d9 | 501 | * _remove_from_pending_queue: |
1c79356b | 502 | * |
c910b4d9 A |
503 | * Remove the first (or all) matching |
504 | * entries from the pending queue. | |
1c79356b | 505 | * |
c910b4d9 A |
506 | * Returns TRUE if any matching entries |
507 | * were found. | |
1c79356b | 508 | * |
c910b4d9 | 509 | * Called with thread_call_lock held. |
1c79356b | 510 | */ |
c910b4d9 | 511 | static boolean_t |
1c79356b A |
512 | _remove_from_pending_queue( |
513 | thread_call_func_t func, | |
514 | thread_call_param_t param0, | |
c910b4d9 | 515 | boolean_t remove_all) |
1c79356b | 516 | { |
316670eb | 517 | boolean_t call_removed = FALSE; |
c910b4d9 | 518 | thread_call_t call; |
316670eb A |
519 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; |
520 | ||
521 | call = TC(queue_first(&group->pending_queue)); | |
522 | ||
523 | while (!queue_end(&group->pending_queue, qe(call))) { | |
524 | if (call->tc_call.func == func && | |
525 | call->tc_call.param0 == param0) { | |
1c79356b | 526 | thread_call_t next = TC(queue_next(qe(call))); |
316670eb | 527 | |
c910b4d9 | 528 | _call_dequeue(call, group); |
1c79356b A |
529 | |
530 | _internal_call_release(call); | |
316670eb | 531 | |
1c79356b A |
532 | call_removed = TRUE; |
533 | if (!remove_all) | |
534 | break; | |
316670eb | 535 | |
1c79356b A |
536 | call = next; |
537 | } | |
538 | else | |
539 | call = TC(queue_next(qe(call))); | |
316670eb A |
540 | } |
541 | ||
542 | return (call_removed); | |
1c79356b A |
543 | } |
544 | ||
545 | /* | |
c910b4d9 | 546 | * _remove_from_delayed_queue: |
1c79356b | 547 | * |
c910b4d9 A |
548 | * Remove the first (or all) matching |
549 | * entries from the delayed queue. | |
1c79356b | 550 | * |
c910b4d9 A |
551 | * Returns TRUE if any matching entries |
552 | * were found. | |
1c79356b | 553 | * |
c910b4d9 | 554 | * Called with thread_call_lock held. |
1c79356b | 555 | */ |
c910b4d9 | 556 | static boolean_t |
1c79356b A |
557 | _remove_from_delayed_queue( |
558 | thread_call_func_t func, | |
559 | thread_call_param_t param0, | |
c910b4d9 | 560 | boolean_t remove_all) |
1c79356b | 561 | { |
316670eb A |
562 | boolean_t call_removed = FALSE; |
563 | thread_call_t call; | |
564 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; | |
565 | ||
566 | call = TC(queue_first(&group->delayed_queue)); | |
567 | ||
568 | while (!queue_end(&group->delayed_queue, qe(call))) { | |
569 | if (call->tc_call.func == func && | |
570 | call->tc_call.param0 == param0) { | |
1c79356b | 571 | thread_call_t next = TC(queue_next(qe(call))); |
316670eb | 572 | |
c910b4d9 | 573 | _call_dequeue(call, group); |
316670eb | 574 | |
1c79356b | 575 | _internal_call_release(call); |
316670eb | 576 | |
1c79356b A |
577 | call_removed = TRUE; |
578 | if (!remove_all) | |
579 | break; | |
316670eb | 580 | |
1c79356b A |
581 | call = next; |
582 | } | |
583 | else | |
584 | call = TC(queue_next(qe(call))); | |
316670eb A |
585 | } |
586 | ||
587 | return (call_removed); | |
1c79356b A |
588 | } |
589 | ||
b0d623f7 A |
590 | #ifndef __LP64__ |
591 | ||
1c79356b | 592 | /* |
c910b4d9 | 593 | * thread_call_func: |
1c79356b | 594 | * |
c910b4d9 | 595 | * Enqueue a function callout. |
1c79356b | 596 | * |
c910b4d9 A |
597 | * Guarantees { function, argument } |
598 | * uniqueness if unique_call is TRUE. | |
1c79356b | 599 | */ |
1c79356b A |
600 | void |
601 | thread_call_func( | |
602 | thread_call_func_t func, | |
603 | thread_call_param_t param, | |
c910b4d9 | 604 | boolean_t unique_call) |
1c79356b | 605 | { |
316670eb A |
606 | thread_call_t call; |
607 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; | |
608 | spl_t s; | |
609 | ||
610 | s = splsched(); | |
611 | thread_call_lock_spin(); | |
612 | ||
613 | call = TC(queue_first(&group->pending_queue)); | |
614 | ||
c910b4d9 | 615 | while (unique_call && !queue_end(&group->pending_queue, qe(call))) { |
316670eb | 616 | if (call->tc_call.func == func && call->tc_call.param0 == param) { |
1c79356b A |
617 | break; |
618 | } | |
316670eb | 619 | |
1c79356b | 620 | call = TC(queue_next(qe(call))); |
316670eb A |
621 | } |
622 | ||
623 | if (!unique_call || queue_end(&group->pending_queue, qe(call))) { | |
1c79356b | 624 | call = _internal_call_allocate(); |
316670eb A |
625 | call->tc_call.func = func; |
626 | call->tc_call.param0 = param; | |
627 | call->tc_call.param1 = NULL; | |
628 | ||
c910b4d9 | 629 | _pending_call_enqueue(call, group); |
316670eb | 630 | } |
1c79356b | 631 | |
316670eb A |
632 | thread_call_unlock(); |
633 | splx(s); | |
1c79356b A |
634 | } |
635 | ||
b0d623f7 A |
636 | #endif /* __LP64__ */ |
637 | ||
1c79356b | 638 | /* |
c910b4d9 | 639 | * thread_call_func_delayed: |
1c79356b | 640 | * |
c910b4d9 A |
641 | * Enqueue a function callout to |
642 | * occur at the stated time. | |
1c79356b | 643 | */ |
1c79356b A |
644 | void |
645 | thread_call_func_delayed( | |
316670eb A |
646 | thread_call_func_t func, |
647 | thread_call_param_t param, | |
648 | uint64_t deadline) | |
1c79356b | 649 | { |
316670eb A |
650 | thread_call_t call; |
651 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; | |
652 | spl_t s; | |
653 | ||
654 | s = splsched(); | |
655 | thread_call_lock_spin(); | |
656 | ||
657 | call = _internal_call_allocate(); | |
658 | call->tc_call.func = func; | |
659 | call->tc_call.param0 = param; | |
660 | call->tc_call.param1 = 0; | |
661 | ||
662 | _delayed_call_enqueue(call, group, deadline); | |
663 | ||
664 | if (queue_first(&group->delayed_queue) == qe(call)) | |
665 | _set_delayed_call_timer(call, group); | |
666 | ||
667 | thread_call_unlock(); | |
668 | splx(s); | |
1c79356b A |
669 | } |
670 | ||
671 | /* | |
c910b4d9 | 672 | * thread_call_func_cancel: |
1c79356b | 673 | * |
c910b4d9 | 674 | * Dequeue a function callout. |
1c79356b | 675 | * |
c910b4d9 A |
676 | * Removes one (or all) { function, argument } |
677 | * instance(s) from either (or both) | |
678 | * the pending and the delayed queue, | |
679 | * in that order. | |
1c79356b | 680 | * |
c910b4d9 | 681 | * Returns TRUE if any calls were cancelled. |
1c79356b | 682 | */ |
1c79356b A |
683 | boolean_t |
684 | thread_call_func_cancel( | |
316670eb A |
685 | thread_call_func_t func, |
686 | thread_call_param_t param, | |
687 | boolean_t cancel_all) | |
1c79356b | 688 | { |
316670eb A |
689 | boolean_t result; |
690 | spl_t s; | |
1c79356b | 691 | |
316670eb A |
692 | s = splsched(); |
693 | thread_call_lock_spin(); | |
694 | ||
695 | if (cancel_all) | |
1c79356b | 696 | result = _remove_from_pending_queue(func, param, cancel_all) | |
316670eb | 697 | _remove_from_delayed_queue(func, param, cancel_all); |
1c79356b A |
698 | else |
699 | result = _remove_from_pending_queue(func, param, cancel_all) || | |
316670eb A |
700 | _remove_from_delayed_queue(func, param, cancel_all); |
701 | ||
702 | thread_call_unlock(); | |
703 | splx(s); | |
1c79356b A |
704 | |
705 | return (result); | |
706 | } | |
707 | ||
316670eb A |
708 | /* |
709 | * Allocate a thread call with a given priority. Importances | |
710 | * other than THREAD_CALL_PRIORITY_HIGH will be run in threads | |
711 | * with eager preemption enabled (i.e. may be aggressively preempted | |
712 | * by higher-priority threads which are not in the normal "urgent" bands). | |
713 | */ | |
714 | thread_call_t | |
715 | thread_call_allocate_with_priority( | |
716 | thread_call_func_t func, | |
717 | thread_call_param_t param0, | |
718 | thread_call_priority_t pri) | |
719 | { | |
720 | thread_call_t call; | |
721 | ||
722 | if (pri > THREAD_CALL_PRIORITY_LOW) { | |
723 | panic("Invalid pri: %d\n", pri); | |
724 | } | |
725 | ||
726 | call = thread_call_allocate(func, param0); | |
727 | call->tc_pri = pri; | |
728 | ||
729 | return call; | |
730 | } | |
731 | ||
1c79356b | 732 | /* |
c910b4d9 | 733 | * thread_call_allocate: |
1c79356b | 734 | * |
c910b4d9 | 735 | * Allocate a callout entry. |
1c79356b | 736 | */ |
1c79356b A |
737 | thread_call_t |
738 | thread_call_allocate( | |
316670eb A |
739 | thread_call_func_t func, |
740 | thread_call_param_t param0) | |
1c79356b | 741 | { |
316670eb | 742 | thread_call_t call = zalloc(thread_call_zone); |
c910b4d9 | 743 | |
316670eb A |
744 | thread_call_setup(call, func, param0); |
745 | call->tc_refs = 1; | |
746 | call->tc_flags = THREAD_CALL_ALLOC; | |
c910b4d9 | 747 | |
316670eb | 748 | return (call); |
1c79356b A |
749 | } |
750 | ||
751 | /* | |
c910b4d9 | 752 | * thread_call_free: |
1c79356b | 753 | * |
316670eb A |
754 | * Release a callout. If the callout is currently |
755 | * executing, it will be freed when all invocations | |
756 | * finish. | |
1c79356b | 757 | */ |
1c79356b A |
758 | boolean_t |
759 | thread_call_free( | |
316670eb | 760 | thread_call_t call) |
1c79356b | 761 | { |
316670eb A |
762 | spl_t s; |
763 | int32_t refs; | |
1c79356b | 764 | |
316670eb A |
765 | s = splsched(); |
766 | thread_call_lock_spin(); | |
767 | ||
768 | if (call->tc_call.queue != NULL) { | |
769 | thread_call_unlock(); | |
770 | splx(s); | |
771 | ||
772 | return (FALSE); | |
773 | } | |
774 | ||
775 | refs = --call->tc_refs; | |
776 | if (refs < 0) { | |
777 | panic("Refcount negative: %d\n", refs); | |
778 | } | |
779 | ||
780 | thread_call_unlock(); | |
781 | splx(s); | |
782 | ||
783 | if (refs == 0) { | |
784 | zfree(thread_call_zone, call); | |
785 | } | |
1c79356b A |
786 | |
787 | return (TRUE); | |
788 | } | |
789 | ||
790 | /* | |
c910b4d9 | 791 | * thread_call_enter: |
1c79356b | 792 | * |
c910b4d9 | 793 | * Enqueue a callout entry to occur "soon". |
1c79356b | 794 | * |
c910b4d9 A |
795 | * Returns TRUE if the call was |
796 | * already on a queue. | |
1c79356b | 797 | */ |
1c79356b A |
798 | boolean_t |
799 | thread_call_enter( | |
316670eb | 800 | thread_call_t call) |
1c79356b | 801 | { |
316670eb A |
802 | boolean_t result = TRUE; |
803 | thread_call_group_t group; | |
804 | spl_t s; | |
805 | ||
806 | group = thread_call_get_group(call); | |
807 | ||
6d2010ae A |
808 | s = splsched(); |
809 | thread_call_lock_spin(); | |
316670eb A |
810 | |
811 | if (call->tc_call.queue != &group->pending_queue) { | |
812 | result = _pending_call_enqueue(call, group); | |
1c79356b A |
813 | } |
814 | ||
316670eb | 815 | call->tc_call.param1 = 0; |
1c79356b | 816 | |
6d2010ae A |
817 | thread_call_unlock(); |
818 | splx(s); | |
1c79356b A |
819 | |
820 | return (result); | |
821 | } | |
822 | ||
823 | boolean_t | |
824 | thread_call_enter1( | |
316670eb A |
825 | thread_call_t call, |
826 | thread_call_param_t param1) | |
1c79356b | 827 | { |
316670eb A |
828 | boolean_t result = TRUE; |
829 | thread_call_group_t group; | |
830 | spl_t s; | |
831 | ||
832 | group = thread_call_get_group(call); | |
833 | ||
6d2010ae A |
834 | s = splsched(); |
835 | thread_call_lock_spin(); | |
316670eb A |
836 | |
837 | if (call->tc_call.queue != &group->pending_queue) { | |
838 | result = _pending_call_enqueue(call, group); | |
c910b4d9 | 839 | } |
1c79356b | 840 | |
316670eb | 841 | call->tc_call.param1 = param1; |
1c79356b | 842 | |
6d2010ae A |
843 | thread_call_unlock(); |
844 | splx(s); | |
1c79356b A |
845 | |
846 | return (result); | |
847 | } | |
848 | ||
849 | /* | |
c910b4d9 | 850 | * thread_call_enter_delayed: |
1c79356b | 851 | * |
c910b4d9 A |
852 | * Enqueue a callout entry to occur |
853 | * at the stated time. | |
1c79356b | 854 | * |
c910b4d9 A |
855 | * Returns TRUE if the call was |
856 | * already on a queue. | |
1c79356b | 857 | */ |
1c79356b A |
858 | boolean_t |
859 | thread_call_enter_delayed( | |
316670eb A |
860 | thread_call_t call, |
861 | uint64_t deadline) | |
1c79356b | 862 | { |
316670eb A |
863 | boolean_t result = TRUE; |
864 | thread_call_group_t group; | |
865 | spl_t s; | |
866 | ||
867 | group = thread_call_get_group(call); | |
1c79356b | 868 | |
6d2010ae A |
869 | s = splsched(); |
870 | thread_call_lock_spin(); | |
1c79356b | 871 | |
c910b4d9 | 872 | result = _delayed_call_enqueue(call, group, deadline); |
1c79356b | 873 | |
c910b4d9 A |
874 | if (queue_first(&group->delayed_queue) == qe(call)) |
875 | _set_delayed_call_timer(call, group); | |
1c79356b | 876 | |
316670eb | 877 | call->tc_call.param1 = 0; |
1c79356b | 878 | |
6d2010ae A |
879 | thread_call_unlock(); |
880 | splx(s); | |
1c79356b A |
881 | |
882 | return (result); | |
883 | } | |
884 | ||
885 | boolean_t | |
886 | thread_call_enter1_delayed( | |
316670eb A |
887 | thread_call_t call, |
888 | thread_call_param_t param1, | |
889 | uint64_t deadline) | |
1c79356b | 890 | { |
316670eb A |
891 | boolean_t result = TRUE; |
892 | thread_call_group_t group; | |
893 | spl_t s; | |
4b17d6b6 | 894 | uint64_t abstime; |
316670eb A |
895 | |
896 | group = thread_call_get_group(call); | |
1c79356b | 897 | |
6d2010ae A |
898 | s = splsched(); |
899 | thread_call_lock_spin(); | |
4b17d6b6 | 900 | abstime = mach_absolute_time(); |
1c79356b | 901 | |
c910b4d9 | 902 | result = _delayed_call_enqueue(call, group, deadline); |
1c79356b | 903 | |
c910b4d9 A |
904 | if (queue_first(&group->delayed_queue) == qe(call)) |
905 | _set_delayed_call_timer(call, group); | |
1c79356b | 906 | |
316670eb | 907 | call->tc_call.param1 = param1; |
1c79356b | 908 | |
4b17d6b6 A |
909 | call->ttd = (deadline > abstime) ? (deadline - abstime) : 0; |
910 | #if CONFIG_DTRACE | |
911 | DTRACE_TMR4(thread_callout__create, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); | |
912 | #endif | |
6d2010ae A |
913 | thread_call_unlock(); |
914 | splx(s); | |
1c79356b A |
915 | |
916 | return (result); | |
917 | } | |
918 | ||
919 | /* | |
c910b4d9 | 920 | * thread_call_cancel: |
1c79356b | 921 | * |
c910b4d9 | 922 | * Dequeue a callout entry. |
1c79356b | 923 | * |
c910b4d9 A |
924 | * Returns TRUE if the call was |
925 | * on a queue. | |
1c79356b | 926 | */ |
1c79356b A |
927 | boolean_t |
928 | thread_call_cancel( | |
316670eb | 929 | thread_call_t call) |
1c79356b | 930 | { |
316670eb A |
931 | boolean_t result; |
932 | thread_call_group_t group; | |
933 | spl_t s; | |
934 | ||
935 | group = thread_call_get_group(call); | |
936 | ||
6d2010ae A |
937 | s = splsched(); |
938 | thread_call_lock_spin(); | |
c910b4d9 A |
939 | |
940 | result = _call_dequeue(call, group); | |
316670eb | 941 | |
6d2010ae A |
942 | thread_call_unlock(); |
943 | splx(s); | |
4b17d6b6 A |
944 | #if CONFIG_DTRACE |
945 | DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); | |
946 | #endif | |
1c79356b A |
947 | |
948 | return (result); | |
949 | } | |
950 | ||
316670eb A |
951 | /* |
952 | * Cancel a thread call. If it cannot be cancelled (i.e. | |
953 | * is already in flight), waits for the most recent invocation | |
954 | * to finish. Note that if clients re-submit this thread call, | |
955 | * it may still be pending or in flight when thread_call_cancel_wait | |
956 | * returns, but all requests to execute this work item prior | |
957 | * to the call to thread_call_cancel_wait will have finished. | |
958 | */ | |
959 | boolean_t | |
960 | thread_call_cancel_wait( | |
961 | thread_call_t call) | |
962 | { | |
963 | boolean_t result; | |
964 | thread_call_group_t group; | |
965 | ||
966 | if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { | |
967 | panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__); | |
968 | } | |
969 | ||
970 | group = thread_call_get_group(call); | |
971 | ||
972 | (void) splsched(); | |
973 | thread_call_lock_spin(); | |
974 | ||
975 | result = _call_dequeue(call, group); | |
976 | if (result == FALSE) { | |
977 | thread_call_wait_locked(call); | |
978 | } | |
979 | ||
980 | thread_call_unlock(); | |
981 | (void) spllo(); | |
982 | ||
983 | return result; | |
984 | } | |
985 | ||
986 | ||
b0d623f7 A |
987 | #ifndef __LP64__ |
988 | ||
1c79356b | 989 | /* |
c910b4d9 | 990 | * thread_call_is_delayed: |
1c79356b | 991 | * |
c910b4d9 A |
992 | * Returns TRUE if the call is |
993 | * currently on a delayed queue. | |
1c79356b | 994 | * |
c910b4d9 | 995 | * Optionally returns the expiration time. |
1c79356b | 996 | */ |
1c79356b A |
997 | boolean_t |
998 | thread_call_is_delayed( | |
999 | thread_call_t call, | |
0b4e3aa0 | 1000 | uint64_t *deadline) |
1c79356b | 1001 | { |
316670eb A |
1002 | boolean_t result = FALSE; |
1003 | thread_call_group_t group; | |
1004 | spl_t s; | |
1005 | ||
1006 | group = thread_call_get_group(call); | |
1c79356b A |
1007 | |
1008 | s = splsched(); | |
6d2010ae | 1009 | thread_call_lock_spin(); |
1c79356b | 1010 | |
316670eb | 1011 | if (call->tc_call.queue == &group->delayed_queue) { |
1c79356b | 1012 | if (deadline != NULL) |
316670eb | 1013 | *deadline = call->tc_call.deadline; |
1c79356b A |
1014 | result = TRUE; |
1015 | } | |
1016 | ||
6d2010ae | 1017 | thread_call_unlock(); |
1c79356b A |
1018 | splx(s); |
1019 | ||
1020 | return (result); | |
1021 | } | |
1022 | ||
b0d623f7 A |
1023 | #endif /* __LP64__ */ |
1024 | ||
1c79356b | 1025 | /* |
c910b4d9 | 1026 | * thread_call_wake: |
1c79356b | 1027 | * |
c910b4d9 A |
1028 | * Wake a call thread to service |
1029 | * pending call entries. May wake | |
1030 | * the daemon thread in order to | |
1031 | * create additional call threads. | |
1c79356b | 1032 | * |
c910b4d9 | 1033 | * Called with thread_call_lock held. |
316670eb A |
1034 | * |
1035 | * For high-priority group, only does wakeup/creation if there are no threads | |
1036 | * running. | |
1c79356b | 1037 | */ |
c910b4d9 A |
1038 | static __inline__ void |
1039 | thread_call_wake( | |
1040 | thread_call_group_t group) | |
1c79356b | 1041 | { |
316670eb A |
1042 | /* |
1043 | * New behavior: use threads if you've got 'em. | |
1044 | * Traditional behavior: wake only if no threads running. | |
1045 | */ | |
1046 | if (group_isparallel(group) || group->active_count == 0) { | |
1047 | if (wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_AWAKENED, -1) == KERN_SUCCESS) { | |
1048 | group->idle_count--; group->active_count++; | |
1049 | ||
1050 | if (group->idle_count == 0) { | |
1051 | timer_call_cancel(&group->dealloc_timer); | |
1052 | group->flags &= TCG_DEALLOC_ACTIVE; | |
1053 | } | |
1054 | } else { | |
1055 | if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { | |
1056 | thread_call_daemon_awake = TRUE; | |
1057 | wait_queue_wakeup_one(&daemon_wqueue, NO_EVENT, THREAD_AWAKENED, -1); | |
1058 | } | |
1059 | } | |
1c79356b A |
1060 | } |
1061 | } | |
1062 | ||
9bccf70c | 1063 | /* |
2d21ac55 | 1064 | * sched_call_thread: |
9bccf70c | 1065 | * |
316670eb A |
1066 | * Call out invoked by the scheduler. Used only for high-priority |
1067 | * thread call group. | |
9bccf70c | 1068 | */ |
2d21ac55 A |
1069 | static void |
1070 | sched_call_thread( | |
316670eb A |
1071 | int type, |
1072 | __unused thread_t thread) | |
9bccf70c | 1073 | { |
316670eb A |
1074 | thread_call_group_t group; |
1075 | ||
1076 | group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; /* XXX */ | |
c910b4d9 | 1077 | |
6d2010ae | 1078 | thread_call_lock_spin(); |
9bccf70c | 1079 | |
2d21ac55 | 1080 | switch (type) { |
9bccf70c | 1081 | |
316670eb A |
1082 | case SCHED_CALL_BLOCK: |
1083 | --group->active_count; | |
1084 | if (group->pending_count > 0) | |
1085 | thread_call_wake(group); | |
1086 | break; | |
9bccf70c | 1087 | |
316670eb A |
1088 | case SCHED_CALL_UNBLOCK: |
1089 | group->active_count++; | |
1090 | break; | |
2d21ac55 | 1091 | } |
9bccf70c | 1092 | |
6d2010ae | 1093 | thread_call_unlock(); |
9bccf70c | 1094 | } |
1c79356b | 1095 | |
316670eb A |
1096 | /* |
1097 | * Interrupts disabled, lock held; returns the same way. | |
1098 | * Only called on thread calls whose storage we own. Wakes up | |
1099 | * anyone who might be waiting on this work item and frees it | |
1100 | * if the client has so requested. | |
1101 | */ | |
1102 | static void | |
1103 | thread_call_finish(thread_call_t call) | |
1104 | { | |
1105 | boolean_t dowake = FALSE; | |
1106 | ||
1107 | call->tc_finish_count++; | |
1108 | call->tc_refs--; | |
1109 | ||
1110 | if ((call->tc_flags & THREAD_CALL_WAIT) != 0) { | |
1111 | dowake = TRUE; | |
1112 | call->tc_flags &= ~THREAD_CALL_WAIT; | |
1113 | ||
1114 | /* | |
1115 | * Dropping lock here because the sched call for the | |
1116 | * high-pri group can take the big lock from under | |
1117 | * a thread lock. | |
1118 | */ | |
1119 | thread_call_unlock(); | |
1120 | thread_wakeup((event_t)call); | |
1121 | thread_call_lock_spin(); | |
1122 | } | |
1123 | ||
1124 | if (call->tc_refs == 0) { | |
1125 | if (dowake) { | |
1126 | panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func); | |
1127 | } | |
1128 | ||
1129 | enable_ints_and_unlock(); | |
1130 | ||
1131 | zfree(thread_call_zone, call); | |
1132 | ||
1133 | (void)disable_ints_and_lock(); | |
1134 | } | |
1135 | ||
1136 | } | |
1137 | ||
1c79356b | 1138 | /* |
c910b4d9 | 1139 | * thread_call_thread: |
1c79356b | 1140 | */ |
c910b4d9 A |
1141 | static void |
1142 | thread_call_thread( | |
316670eb A |
1143 | thread_call_group_t group, |
1144 | wait_result_t wres) | |
1c79356b | 1145 | { |
316670eb A |
1146 | thread_t self = current_thread(); |
1147 | boolean_t canwait; | |
1c79356b | 1148 | |
4b17d6b6 A |
1149 | if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) |
1150 | (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); | |
1151 | ||
316670eb A |
1152 | /* |
1153 | * A wakeup with THREAD_INTERRUPTED indicates that | |
1154 | * we should terminate. | |
1155 | */ | |
1156 | if (wres == THREAD_INTERRUPTED) { | |
1157 | thread_terminate(self); | |
1158 | ||
1159 | /* NOTREACHED */ | |
1160 | panic("thread_terminate() returned?"); | |
1161 | } | |
1162 | ||
1163 | (void)disable_ints_and_lock(); | |
1c79356b | 1164 | |
316670eb | 1165 | thread_sched_call(self, group->sched_call); |
9bccf70c | 1166 | |
316670eb | 1167 | while (group->pending_count > 0) { |
1c79356b A |
1168 | thread_call_t call; |
1169 | thread_call_func_t func; | |
1170 | thread_call_param_t param0, param1; | |
1171 | ||
c910b4d9 A |
1172 | call = TC(dequeue_head(&group->pending_queue)); |
1173 | group->pending_count--; | |
1c79356b | 1174 | |
316670eb A |
1175 | func = call->tc_call.func; |
1176 | param0 = call->tc_call.param0; | |
1177 | param1 = call->tc_call.param1; | |
1178 | ||
1179 | call->tc_call.queue = NULL; | |
1c79356b A |
1180 | |
1181 | _internal_call_release(call); | |
1182 | ||
316670eb A |
1183 | /* |
1184 | * Can only do wakeups for thread calls whose storage | |
1185 | * we control. | |
1186 | */ | |
1187 | if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { | |
1188 | canwait = TRUE; | |
1189 | call->tc_refs++; /* Delay free until we're done */ | |
1190 | } else | |
1191 | canwait = FALSE; | |
1192 | ||
1193 | enable_ints_and_unlock(); | |
1c79356b | 1194 | |
55e303ae | 1195 | KERNEL_DEBUG_CONSTANT( |
316670eb A |
1196 | MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, |
1197 | VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0); | |
55e303ae | 1198 | |
1c79356b A |
1199 | (*func)(param0, param1); |
1200 | ||
6d2010ae A |
1201 | if (get_preemption_level() != 0) { |
1202 | int pl = get_preemption_level(); | |
1203 | panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", | |
316670eb | 1204 | pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); |
6d2010ae | 1205 | } |
316670eb | 1206 | |
c910b4d9 | 1207 | (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */ |
1c79356b | 1208 | |
316670eb A |
1209 | (void) disable_ints_and_lock(); |
1210 | ||
1211 | if (canwait) { | |
1212 | /* Frees if so desired */ | |
1213 | thread_call_finish(call); | |
1214 | } | |
1215 | } | |
9bccf70c | 1216 | |
2d21ac55 | 1217 | thread_sched_call(self, NULL); |
c910b4d9 | 1218 | group->active_count--; |
9bccf70c | 1219 | |
316670eb A |
1220 | if (group_isparallel(group)) { |
1221 | /* | |
1222 | * For new style of thread group, thread always blocks. | |
1223 | * If we have more than the target number of threads, | |
1224 | * and this is the first to block, and it isn't active | |
1225 | * already, set a timer for deallocating a thread if we | |
1226 | * continue to have a surplus. | |
1227 | */ | |
c910b4d9 | 1228 | group->idle_count++; |
1c79356b | 1229 | |
316670eb A |
1230 | if (group->idle_count == 1) { |
1231 | group->idle_timestamp = mach_absolute_time(); | |
1232 | } | |
1233 | ||
1234 | if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && | |
1235 | ((group->active_count + group->idle_count) > group->target_thread_count)) { | |
1236 | group->flags |= TCG_DEALLOC_ACTIVE; | |
1237 | thread_call_start_deallocate_timer(group); | |
1238 | } | |
1239 | ||
1240 | /* Wait for more work (or termination) */ | |
1241 | wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); | |
1242 | if (wres != THREAD_WAITING) { | |
1243 | panic("kcall worker unable to assert wait?"); | |
1244 | } | |
1245 | ||
1246 | enable_ints_and_unlock(); | |
1c79356b | 1247 | |
c910b4d9 | 1248 | thread_block_parameter((thread_continue_t)thread_call_thread, group); |
316670eb A |
1249 | } else { |
1250 | if (group->idle_count < group->target_thread_count) { | |
1251 | group->idle_count++; | |
c910b4d9 | 1252 | |
316670eb A |
1253 | wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */ |
1254 | ||
1255 | enable_ints_and_unlock(); | |
1256 | ||
1257 | thread_block_parameter((thread_continue_t)thread_call_thread, group); | |
1258 | /* NOTREACHED */ | |
1259 | } | |
1260 | } | |
1261 | ||
1262 | enable_ints_and_unlock(); | |
1263 | ||
1264 | thread_terminate(self); | |
1c79356b A |
1265 | /* NOTREACHED */ |
1266 | } | |
1267 | ||
1c79356b | 1268 | /* |
316670eb A |
1269 | * thread_call_daemon: walk list of groups, allocating |
1270 | * threads if appropriate (as determined by | |
1271 | * thread_call_group_should_add_thread()). | |
1c79356b | 1272 | */ |
c910b4d9 | 1273 | static void |
316670eb | 1274 | thread_call_daemon_continue(__unused void *arg) |
1c79356b | 1275 | { |
316670eb A |
1276 | int i; |
1277 | kern_return_t kr; | |
1278 | thread_call_group_t group; | |
1279 | ||
1280 | (void)disable_ints_and_lock(); | |
1281 | ||
1282 | /* Starting at zero happens to be high-priority first. */ | |
1283 | for (i = 0; i < THREAD_CALL_GROUP_COUNT; i++) { | |
1284 | group = &thread_call_groups[i]; | |
1285 | while (thread_call_group_should_add_thread(group)) { | |
1286 | group->active_count++; | |
1287 | ||
1288 | enable_ints_and_unlock(); | |
1289 | ||
1290 | kr = thread_call_thread_create(group); | |
1291 | if (kr != KERN_SUCCESS) { | |
1292 | /* | |
1293 | * On failure, just pause for a moment and give up. | |
1294 | * We can try again later. | |
1295 | */ | |
1296 | delay(10000); /* 10 ms */ | |
1297 | (void)disable_ints_and_lock(); | |
1298 | goto out; | |
1299 | } | |
1300 | ||
1301 | (void)disable_ints_and_lock(); | |
1302 | } | |
1303 | } | |
91447636 | 1304 | |
316670eb A |
1305 | out: |
1306 | thread_call_daemon_awake = FALSE; | |
1307 | wait_queue_assert_wait(&daemon_wqueue, NO_EVENT, THREAD_UNINT, 0); | |
55e303ae | 1308 | |
316670eb | 1309 | enable_ints_and_unlock(); |
c910b4d9 | 1310 | |
316670eb | 1311 | thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL); |
1c79356b A |
1312 | /* NOTREACHED */ |
1313 | } | |
1314 | ||
c910b4d9 A |
1315 | static void |
1316 | thread_call_daemon( | |
316670eb | 1317 | __unused void *arg) |
1c79356b | 1318 | { |
55e303ae | 1319 | thread_t self = current_thread(); |
1c79356b | 1320 | |
91447636 | 1321 | self->options |= TH_OPT_VMPRIV; |
1c79356b | 1322 | vm_page_free_reserve(2); /* XXX */ |
316670eb A |
1323 | |
1324 | thread_call_daemon_continue(NULL); | |
1325 | /* NOTREACHED */ | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * Schedule timer to deallocate a worker thread if we have a surplus | |
1330 | * of threads (in excess of the group's target) and at least one thread | |
1331 | * is idle the whole time. | |
1332 | */ | |
1333 | static void | |
1334 | thread_call_start_deallocate_timer( | |
1335 | thread_call_group_t group) | |
1336 | { | |
1337 | uint64_t deadline; | |
1338 | boolean_t onqueue; | |
1339 | ||
1340 | assert(group->idle_count > 0); | |
1341 | ||
1342 | group->flags |= TCG_DEALLOC_ACTIVE; | |
1343 | deadline = group->idle_timestamp + thread_call_dealloc_interval_abs; | |
1344 | onqueue = timer_call_enter(&group->dealloc_timer, deadline, 0); | |
1345 | ||
1346 | if (onqueue) { | |
1347 | panic("Deallocate timer already active?"); | |
1348 | } | |
1c79356b A |
1349 | } |
1350 | ||
6d2010ae | 1351 | void |
c910b4d9 | 1352 | thread_call_delayed_timer( |
316670eb A |
1353 | timer_call_param_t p0, |
1354 | __unused timer_call_param_t p1 | |
1c79356b A |
1355 | ) |
1356 | { | |
316670eb | 1357 | thread_call_t call; |
c910b4d9 | 1358 | thread_call_group_t group = p0; |
c910b4d9 | 1359 | uint64_t timestamp; |
1c79356b | 1360 | |
6d2010ae | 1361 | thread_call_lock_spin(); |
1c79356b | 1362 | |
c910b4d9 | 1363 | timestamp = mach_absolute_time(); |
316670eb A |
1364 | |
1365 | call = TC(queue_first(&group->delayed_queue)); | |
1366 | ||
1367 | while (!queue_end(&group->delayed_queue, qe(call))) { | |
1368 | if (call->tc_call.deadline <= timestamp) { | |
c910b4d9 | 1369 | _pending_call_enqueue(call, group); |
1c79356b A |
1370 | } |
1371 | else | |
1372 | break; | |
316670eb | 1373 | |
c910b4d9 | 1374 | call = TC(queue_first(&group->delayed_queue)); |
316670eb | 1375 | } |
1c79356b | 1376 | |
c910b4d9 A |
1377 | if (!queue_end(&group->delayed_queue, qe(call))) |
1378 | _set_delayed_call_timer(call, group); | |
1c79356b | 1379 | |
316670eb A |
1380 | thread_call_unlock(); |
1381 | } | |
1382 | ||
1383 | /* | |
1384 | * Timer callback to tell a thread to terminate if | |
1385 | * we have an excess of threads and at least one has been | |
1386 | * idle for a long time. | |
1387 | */ | |
1388 | static void | |
1389 | thread_call_dealloc_timer( | |
1390 | timer_call_param_t p0, | |
1391 | __unused timer_call_param_t p1) | |
1392 | { | |
1393 | thread_call_group_t group = (thread_call_group_t)p0; | |
1394 | uint64_t now; | |
1395 | kern_return_t res; | |
1396 | boolean_t terminated = FALSE; | |
1397 | ||
1398 | thread_call_lock_spin(); | |
1399 | ||
1400 | now = mach_absolute_time(); | |
1401 | if (group->idle_count > 0) { | |
1402 | if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) { | |
1403 | terminated = TRUE; | |
1404 | group->idle_count--; | |
1405 | res = wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTED, -1); | |
1406 | if (res != KERN_SUCCESS) { | |
1407 | panic("Unable to wake up idle thread for termination?"); | |
1408 | } | |
1409 | } | |
1410 | ||
1411 | } | |
1412 | ||
1413 | /* | |
1414 | * If we still have an excess of threads, schedule another | |
1415 | * invocation of this function. | |
1416 | */ | |
1417 | if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) { | |
1418 | /* | |
1419 | * If we killed someone just now, push out the | |
1420 | * next deadline. | |
1421 | */ | |
1422 | if (terminated) { | |
1423 | group->idle_timestamp = now; | |
1424 | } | |
1c79356b | 1425 | |
316670eb A |
1426 | thread_call_start_deallocate_timer(group); |
1427 | } else { | |
1428 | group->flags &= ~TCG_DEALLOC_ACTIVE; | |
1429 | } | |
1430 | ||
1431 | thread_call_unlock(); | |
1c79356b | 1432 | } |
316670eb A |
1433 | |
1434 | /* | |
1435 | * Wait for all requested invocations of a thread call prior to now | |
1436 | * to finish. Can only be invoked on thread calls whose storage we manage. | |
1437 | * Just waits for the finish count to catch up to the submit count we find | |
1438 | * at the beginning of our wait. | |
1439 | */ | |
1440 | static void | |
1441 | thread_call_wait_locked(thread_call_t call) | |
1442 | { | |
1443 | uint64_t submit_count; | |
1444 | wait_result_t res; | |
1445 | ||
1446 | assert(call->tc_flags & THREAD_CALL_ALLOC); | |
1447 | ||
1448 | submit_count = call->tc_submit_count; | |
1449 | ||
1450 | while (call->tc_finish_count < submit_count) { | |
1451 | call->tc_flags |= THREAD_CALL_WAIT; | |
1452 | ||
1453 | res = assert_wait(call, THREAD_UNINT); | |
1454 | if (res != THREAD_WAITING) { | |
1455 | panic("Unable to assert wait?"); | |
1456 | } | |
1457 | ||
1458 | thread_call_unlock(); | |
1459 | (void) spllo(); | |
1460 | ||
1461 | res = thread_block(NULL); | |
1462 | if (res != THREAD_AWAKENED) { | |
1463 | panic("Awoken with %d?", res); | |
1464 | } | |
1465 | ||
1466 | (void) splsched(); | |
1467 | thread_call_lock_spin(); | |
1468 | } | |
1469 | } | |
1470 | ||
1471 | /* | |
1472 | * Determine whether a thread call is either on a queue or | |
1473 | * currently being executed. | |
1474 | */ | |
1475 | boolean_t | |
1476 | thread_call_isactive(thread_call_t call) | |
1477 | { | |
1478 | boolean_t active; | |
1479 | ||
1480 | disable_ints_and_lock(); | |
1481 | active = (call->tc_submit_count > call->tc_finish_count); | |
1482 | enable_ints_and_unlock(); | |
1483 | ||
1484 | return active; | |
1485 | } | |
1486 |