]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/thread_act.h> | |
31 | ||
32 | #include <kern/kern_types.h> | |
33 | #include <kern/zalloc.h> | |
34 | #include <kern/sched_prim.h> | |
35 | #include <kern/clock.h> | |
36 | #include <kern/task.h> | |
37 | #include <kern/thread.h> | |
38 | #include <kern/wait_queue.h> | |
39 | ||
40 | #include <vm/vm_pageout.h> | |
41 | ||
42 | #include <kern/thread_call.h> | |
43 | #include <kern/call_entry.h> | |
44 | ||
45 | #include <kern/timer_call.h> | |
46 | ||
47 | #include <sys/kdebug.h> | |
48 | ||
49 | ||
50 | static zone_t thread_call_zone; | |
51 | ||
52 | struct thread_call_group { | |
53 | queue_head_t pending_queue; | |
54 | uint32_t pending_count; | |
55 | ||
56 | queue_head_t delayed_queue; | |
57 | ||
58 | timer_call_data_t delayed_timer; | |
59 | ||
60 | struct wait_queue idle_wqueue; | |
61 | struct wait_queue daemon_wqueue; | |
62 | uint32_t idle_count, active_count; | |
63 | }; | |
64 | ||
65 | typedef struct thread_call_group *thread_call_group_t; | |
66 | ||
67 | static struct thread_call_group thread_call_group0; | |
68 | ||
69 | static boolean_t thread_call_daemon_awake; | |
70 | ||
71 | #define thread_call_thread_min 4 | |
72 | ||
73 | #define internal_call_count 768 | |
74 | ||
75 | static thread_call_data_t internal_call_storage[internal_call_count]; | |
76 | static queue_head_t thread_call_internal_queue; | |
77 | ||
78 | static __inline__ thread_call_t _internal_call_allocate(void); | |
79 | ||
80 | static __inline__ void _internal_call_release( | |
81 | thread_call_t call); | |
82 | ||
83 | static __inline__ boolean_t _pending_call_enqueue( | |
84 | thread_call_t call, | |
85 | thread_call_group_t group), | |
86 | _delayed_call_enqueue( | |
87 | thread_call_t call, | |
88 | thread_call_group_t group, | |
89 | uint64_t deadline), | |
90 | _call_dequeue( | |
91 | thread_call_t call, | |
92 | thread_call_group_t group); | |
93 | ||
94 | static __inline__ void thread_call_wake( | |
95 | thread_call_group_t group); | |
96 | ||
97 | static __inline__ void _set_delayed_call_timer( | |
98 | thread_call_t call, | |
99 | thread_call_group_t group); | |
100 | ||
101 | static boolean_t _remove_from_pending_queue( | |
102 | thread_call_func_t func, | |
103 | thread_call_param_t param0, | |
104 | boolean_t remove_all), | |
105 | _remove_from_delayed_queue( | |
106 | thread_call_func_t func, | |
107 | thread_call_param_t param0, | |
108 | boolean_t remove_all); | |
109 | ||
110 | static void thread_call_daemon( | |
111 | thread_call_group_t group), | |
112 | thread_call_thread( | |
113 | thread_call_group_t group); | |
114 | ||
115 | extern void thread_call_delayed_timer( | |
116 | timer_call_param_t p0, | |
117 | timer_call_param_t p1); | |
118 | ||
119 | #define qe(x) ((queue_entry_t)(x)) | |
120 | #define TC(x) ((thread_call_t)(x)) | |
121 | ||
122 | ||
123 | lck_grp_t thread_call_queues_lck_grp; | |
124 | lck_grp_t thread_call_lck_grp; | |
125 | lck_attr_t thread_call_lck_attr; | |
126 | lck_grp_attr_t thread_call_lck_grp_attr; | |
127 | ||
128 | #if defined(__i386__) || defined(__x86_64__) | |
129 | lck_mtx_t thread_call_lock_data; | |
130 | #else | |
131 | lck_spin_t thread_call_lock_data; | |
132 | #endif | |
133 | ||
134 | #define thread_call_lock_spin() \ | |
135 | lck_mtx_lock_spin_always(&thread_call_lock_data) | |
136 | ||
137 | #define thread_call_unlock() \ | |
138 | lck_mtx_unlock_always(&thread_call_lock_data) | |
139 | ||
140 | ||
141 | /* | |
142 | * thread_call_initialize: | |
143 | * | |
144 | * Initialize this module, called | |
145 | * early during system initialization. | |
146 | */ | |
147 | void | |
148 | thread_call_initialize(void) | |
149 | { | |
150 | thread_call_t call; | |
151 | thread_call_group_t group = &thread_call_group0; | |
152 | kern_return_t result; | |
153 | thread_t thread; | |
154 | int i; | |
155 | spl_t s; | |
156 | ||
157 | i = sizeof (thread_call_data_t); | |
158 | thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); | |
159 | zone_change(thread_call_zone, Z_CALLERACCT, FALSE); | |
160 | zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); | |
161 | ||
162 | lck_attr_setdefault(&thread_call_lck_attr); | |
163 | lck_grp_attr_setdefault(&thread_call_lck_grp_attr); | |
164 | lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); | |
165 | lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); | |
166 | ||
167 | #if defined(__i386__) || defined(__x86_64__) | |
168 | lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); | |
169 | #else | |
170 | lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); | |
171 | #endif | |
172 | queue_init(&group->pending_queue); | |
173 | queue_init(&group->delayed_queue); | |
174 | ||
175 | s = splsched(); | |
176 | thread_call_lock_spin(); | |
177 | ||
178 | timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); | |
179 | ||
180 | wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); | |
181 | wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO); | |
182 | ||
183 | queue_init(&thread_call_internal_queue); | |
184 | for ( | |
185 | call = internal_call_storage; | |
186 | call < &internal_call_storage[internal_call_count]; | |
187 | call++) { | |
188 | ||
189 | enqueue_tail(&thread_call_internal_queue, qe(call)); | |
190 | } | |
191 | ||
192 | thread_call_daemon_awake = TRUE; | |
193 | ||
194 | thread_call_unlock(); | |
195 | splx(s); | |
196 | ||
197 | result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread); | |
198 | if (result != KERN_SUCCESS) | |
199 | panic("thread_call_initialize"); | |
200 | ||
201 | thread_deallocate(thread); | |
202 | } | |
203 | ||
204 | void | |
205 | thread_call_setup( | |
206 | thread_call_t call, | |
207 | thread_call_func_t func, | |
208 | thread_call_param_t param0) | |
209 | { | |
210 | call_entry_setup(call, func, param0); | |
211 | } | |
212 | ||
213 | /* | |
214 | * _internal_call_allocate: | |
215 | * | |
216 | * Allocate an internal callout entry. | |
217 | * | |
218 | * Called with thread_call_lock held. | |
219 | */ | |
220 | static __inline__ thread_call_t | |
221 | _internal_call_allocate(void) | |
222 | { | |
223 | thread_call_t call; | |
224 | ||
225 | if (queue_empty(&thread_call_internal_queue)) | |
226 | panic("_internal_call_allocate"); | |
227 | ||
228 | call = TC(dequeue_head(&thread_call_internal_queue)); | |
229 | ||
230 | return (call); | |
231 | } | |
232 | ||
233 | /* | |
234 | * _internal_call_release: | |
235 | * | |
236 | * Release an internal callout entry which | |
237 | * is no longer pending (or delayed). | |
238 | * | |
239 | * Called with thread_call_lock held. | |
240 | */ | |
241 | static __inline__ void | |
242 | _internal_call_release( | |
243 | thread_call_t call) | |
244 | { | |
245 | if ( call >= internal_call_storage && | |
246 | call < &internal_call_storage[internal_call_count] ) | |
247 | enqueue_head(&thread_call_internal_queue, qe(call)); | |
248 | } | |
249 | ||
250 | /* | |
251 | * _pending_call_enqueue: | |
252 | * | |
253 | * Place an entry at the end of the | |
254 | * pending queue, to be executed soon. | |
255 | * | |
256 | * Returns TRUE if the entry was already | |
257 | * on a queue. | |
258 | * | |
259 | * Called with thread_call_lock held. | |
260 | */ | |
261 | static __inline__ boolean_t | |
262 | _pending_call_enqueue( | |
263 | thread_call_t call, | |
264 | thread_call_group_t group) | |
265 | { | |
266 | queue_head_t *old_queue; | |
267 | ||
268 | old_queue = call_entry_enqueue_tail(call, &group->pending_queue); | |
269 | ||
270 | group->pending_count++; | |
271 | ||
272 | return (old_queue != NULL); | |
273 | } | |
274 | ||
275 | /* | |
276 | * _delayed_call_enqueue: | |
277 | * | |
278 | * Place an entry on the delayed queue, | |
279 | * after existing entries with an earlier | |
280 | * (or identical) deadline. | |
281 | * | |
282 | * Returns TRUE if the entry was already | |
283 | * on a queue. | |
284 | * | |
285 | * Called with thread_call_lock held. | |
286 | */ | |
287 | static __inline__ boolean_t | |
288 | _delayed_call_enqueue( | |
289 | thread_call_t call, | |
290 | thread_call_group_t group, | |
291 | uint64_t deadline) | |
292 | { | |
293 | queue_head_t *old_queue; | |
294 | ||
295 | old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline); | |
296 | ||
297 | if (old_queue == &group->pending_queue) | |
298 | group->pending_count--; | |
299 | ||
300 | return (old_queue != NULL); | |
301 | } | |
302 | ||
303 | /* | |
304 | * _call_dequeue: | |
305 | * | |
306 | * Remove an entry from a queue. | |
307 | * | |
308 | * Returns TRUE if the entry was on a queue. | |
309 | * | |
310 | * Called with thread_call_lock held. | |
311 | */ | |
312 | static __inline__ boolean_t | |
313 | _call_dequeue( | |
314 | thread_call_t call, | |
315 | thread_call_group_t group) | |
316 | { | |
317 | queue_head_t *old_queue; | |
318 | ||
319 | old_queue = call_entry_dequeue(call); | |
320 | ||
321 | if (old_queue == &group->pending_queue) | |
322 | group->pending_count--; | |
323 | ||
324 | return (old_queue != NULL); | |
325 | } | |
326 | ||
327 | /* | |
328 | * _set_delayed_call_timer: | |
329 | * | |
330 | * Reset the timer so that it | |
331 | * next expires when the entry is due. | |
332 | * | |
333 | * Called with thread_call_lock held. | |
334 | */ | |
335 | static __inline__ void | |
336 | _set_delayed_call_timer( | |
337 | thread_call_t call, | |
338 | thread_call_group_t group) | |
339 | { | |
340 | timer_call_enter(&group->delayed_timer, call->deadline, 0); | |
341 | } | |
342 | ||
343 | /* | |
344 | * _remove_from_pending_queue: | |
345 | * | |
346 | * Remove the first (or all) matching | |
347 | * entries from the pending queue. | |
348 | * | |
349 | * Returns TRUE if any matching entries | |
350 | * were found. | |
351 | * | |
352 | * Called with thread_call_lock held. | |
353 | */ | |
354 | static boolean_t | |
355 | _remove_from_pending_queue( | |
356 | thread_call_func_t func, | |
357 | thread_call_param_t param0, | |
358 | boolean_t remove_all) | |
359 | { | |
360 | boolean_t call_removed = FALSE; | |
361 | thread_call_t call; | |
362 | thread_call_group_t group = &thread_call_group0; | |
363 | ||
364 | call = TC(queue_first(&group->pending_queue)); | |
365 | ||
366 | while (!queue_end(&group->pending_queue, qe(call))) { | |
367 | if ( call->func == func && | |
368 | call->param0 == param0 ) { | |
369 | thread_call_t next = TC(queue_next(qe(call))); | |
370 | ||
371 | _call_dequeue(call, group); | |
372 | ||
373 | _internal_call_release(call); | |
374 | ||
375 | call_removed = TRUE; | |
376 | if (!remove_all) | |
377 | break; | |
378 | ||
379 | call = next; | |
380 | } | |
381 | else | |
382 | call = TC(queue_next(qe(call))); | |
383 | } | |
384 | ||
385 | return (call_removed); | |
386 | } | |
387 | ||
388 | /* | |
389 | * _remove_from_delayed_queue: | |
390 | * | |
391 | * Remove the first (or all) matching | |
392 | * entries from the delayed queue. | |
393 | * | |
394 | * Returns TRUE if any matching entries | |
395 | * were found. | |
396 | * | |
397 | * Called with thread_call_lock held. | |
398 | */ | |
399 | static boolean_t | |
400 | _remove_from_delayed_queue( | |
401 | thread_call_func_t func, | |
402 | thread_call_param_t param0, | |
403 | boolean_t remove_all) | |
404 | { | |
405 | boolean_t call_removed = FALSE; | |
406 | thread_call_t call; | |
407 | thread_call_group_t group = &thread_call_group0; | |
408 | ||
409 | call = TC(queue_first(&group->delayed_queue)); | |
410 | ||
411 | while (!queue_end(&group->delayed_queue, qe(call))) { | |
412 | if ( call->func == func && | |
413 | call->param0 == param0 ) { | |
414 | thread_call_t next = TC(queue_next(qe(call))); | |
415 | ||
416 | _call_dequeue(call, group); | |
417 | ||
418 | _internal_call_release(call); | |
419 | ||
420 | call_removed = TRUE; | |
421 | if (!remove_all) | |
422 | break; | |
423 | ||
424 | call = next; | |
425 | } | |
426 | else | |
427 | call = TC(queue_next(qe(call))); | |
428 | } | |
429 | ||
430 | return (call_removed); | |
431 | } | |
432 | ||
433 | #ifndef __LP64__ | |
434 | ||
435 | /* | |
436 | * thread_call_func: | |
437 | * | |
438 | * Enqueue a function callout. | |
439 | * | |
440 | * Guarantees { function, argument } | |
441 | * uniqueness if unique_call is TRUE. | |
442 | */ | |
443 | void | |
444 | thread_call_func( | |
445 | thread_call_func_t func, | |
446 | thread_call_param_t param, | |
447 | boolean_t unique_call) | |
448 | { | |
449 | thread_call_t call; | |
450 | thread_call_group_t group = &thread_call_group0; | |
451 | spl_t s; | |
452 | ||
453 | s = splsched(); | |
454 | thread_call_lock_spin(); | |
455 | ||
456 | call = TC(queue_first(&group->pending_queue)); | |
457 | ||
458 | while (unique_call && !queue_end(&group->pending_queue, qe(call))) { | |
459 | if ( call->func == func && | |
460 | call->param0 == param ) { | |
461 | break; | |
462 | } | |
463 | ||
464 | call = TC(queue_next(qe(call))); | |
465 | } | |
466 | ||
467 | if (!unique_call || queue_end(&group->pending_queue, qe(call))) { | |
468 | call = _internal_call_allocate(); | |
469 | call->func = func; | |
470 | call->param0 = param; | |
471 | call->param1 = NULL; | |
472 | ||
473 | _pending_call_enqueue(call, group); | |
474 | ||
475 | if (group->active_count == 0) | |
476 | thread_call_wake(group); | |
477 | } | |
478 | ||
479 | thread_call_unlock(); | |
480 | splx(s); | |
481 | } | |
482 | ||
483 | #endif /* __LP64__ */ | |
484 | ||
485 | /* | |
486 | * thread_call_func_delayed: | |
487 | * | |
488 | * Enqueue a function callout to | |
489 | * occur at the stated time. | |
490 | */ | |
491 | void | |
492 | thread_call_func_delayed( | |
493 | thread_call_func_t func, | |
494 | thread_call_param_t param, | |
495 | uint64_t deadline) | |
496 | { | |
497 | thread_call_t call; | |
498 | thread_call_group_t group = &thread_call_group0; | |
499 | spl_t s; | |
500 | ||
501 | s = splsched(); | |
502 | thread_call_lock_spin(); | |
503 | ||
504 | call = _internal_call_allocate(); | |
505 | call->func = func; | |
506 | call->param0 = param; | |
507 | call->param1 = 0; | |
508 | ||
509 | _delayed_call_enqueue(call, group, deadline); | |
510 | ||
511 | if (queue_first(&group->delayed_queue) == qe(call)) | |
512 | _set_delayed_call_timer(call, group); | |
513 | ||
514 | thread_call_unlock(); | |
515 | splx(s); | |
516 | } | |
517 | ||
518 | /* | |
519 | * thread_call_func_cancel: | |
520 | * | |
521 | * Dequeue a function callout. | |
522 | * | |
523 | * Removes one (or all) { function, argument } | |
524 | * instance(s) from either (or both) | |
525 | * the pending and the delayed queue, | |
526 | * in that order. | |
527 | * | |
528 | * Returns TRUE if any calls were cancelled. | |
529 | */ | |
530 | boolean_t | |
531 | thread_call_func_cancel( | |
532 | thread_call_func_t func, | |
533 | thread_call_param_t param, | |
534 | boolean_t cancel_all) | |
535 | { | |
536 | boolean_t result; | |
537 | spl_t s; | |
538 | ||
539 | s = splsched(); | |
540 | thread_call_lock_spin(); | |
541 | ||
542 | if (cancel_all) | |
543 | result = _remove_from_pending_queue(func, param, cancel_all) | | |
544 | _remove_from_delayed_queue(func, param, cancel_all); | |
545 | else | |
546 | result = _remove_from_pending_queue(func, param, cancel_all) || | |
547 | _remove_from_delayed_queue(func, param, cancel_all); | |
548 | ||
549 | thread_call_unlock(); | |
550 | splx(s); | |
551 | ||
552 | return (result); | |
553 | } | |
554 | ||
555 | /* | |
556 | * thread_call_allocate: | |
557 | * | |
558 | * Allocate a callout entry. | |
559 | */ | |
560 | thread_call_t | |
561 | thread_call_allocate( | |
562 | thread_call_func_t func, | |
563 | thread_call_param_t param0) | |
564 | { | |
565 | thread_call_t call = zalloc(thread_call_zone); | |
566 | ||
567 | call_entry_setup(call, func, param0); | |
568 | ||
569 | return (call); | |
570 | } | |
571 | ||
572 | /* | |
573 | * thread_call_free: | |
574 | * | |
575 | * Free a callout entry. | |
576 | */ | |
577 | boolean_t | |
578 | thread_call_free( | |
579 | thread_call_t call) | |
580 | { | |
581 | spl_t s; | |
582 | ||
583 | s = splsched(); | |
584 | thread_call_lock_spin(); | |
585 | ||
586 | if (call->queue != NULL) { | |
587 | thread_call_unlock(); | |
588 | splx(s); | |
589 | ||
590 | return (FALSE); | |
591 | } | |
592 | ||
593 | thread_call_unlock(); | |
594 | splx(s); | |
595 | ||
596 | zfree(thread_call_zone, call); | |
597 | ||
598 | return (TRUE); | |
599 | } | |
600 | ||
601 | /* | |
602 | * thread_call_enter: | |
603 | * | |
604 | * Enqueue a callout entry to occur "soon". | |
605 | * | |
606 | * Returns TRUE if the call was | |
607 | * already on a queue. | |
608 | */ | |
609 | boolean_t | |
610 | thread_call_enter( | |
611 | thread_call_t call) | |
612 | { | |
613 | boolean_t result = TRUE; | |
614 | thread_call_group_t group = &thread_call_group0; | |
615 | spl_t s; | |
616 | ||
617 | s = splsched(); | |
618 | thread_call_lock_spin(); | |
619 | ||
620 | if (call->queue != &group->pending_queue) { | |
621 | result = _pending_call_enqueue(call, group); | |
622 | ||
623 | if (group->active_count == 0) | |
624 | thread_call_wake(group); | |
625 | } | |
626 | ||
627 | call->param1 = 0; | |
628 | ||
629 | thread_call_unlock(); | |
630 | splx(s); | |
631 | ||
632 | return (result); | |
633 | } | |
634 | ||
635 | boolean_t | |
636 | thread_call_enter1( | |
637 | thread_call_t call, | |
638 | thread_call_param_t param1) | |
639 | { | |
640 | boolean_t result = TRUE; | |
641 | thread_call_group_t group = &thread_call_group0; | |
642 | spl_t s; | |
643 | ||
644 | s = splsched(); | |
645 | thread_call_lock_spin(); | |
646 | ||
647 | if (call->queue != &group->pending_queue) { | |
648 | result = _pending_call_enqueue(call, group); | |
649 | ||
650 | if (group->active_count == 0) | |
651 | thread_call_wake(group); | |
652 | } | |
653 | ||
654 | call->param1 = param1; | |
655 | ||
656 | thread_call_unlock(); | |
657 | splx(s); | |
658 | ||
659 | return (result); | |
660 | } | |
661 | ||
662 | /* | |
663 | * thread_call_enter_delayed: | |
664 | * | |
665 | * Enqueue a callout entry to occur | |
666 | * at the stated time. | |
667 | * | |
668 | * Returns TRUE if the call was | |
669 | * already on a queue. | |
670 | */ | |
671 | boolean_t | |
672 | thread_call_enter_delayed( | |
673 | thread_call_t call, | |
674 | uint64_t deadline) | |
675 | { | |
676 | boolean_t result = TRUE; | |
677 | thread_call_group_t group = &thread_call_group0; | |
678 | spl_t s; | |
679 | ||
680 | s = splsched(); | |
681 | thread_call_lock_spin(); | |
682 | ||
683 | result = _delayed_call_enqueue(call, group, deadline); | |
684 | ||
685 | if (queue_first(&group->delayed_queue) == qe(call)) | |
686 | _set_delayed_call_timer(call, group); | |
687 | ||
688 | call->param1 = 0; | |
689 | ||
690 | thread_call_unlock(); | |
691 | splx(s); | |
692 | ||
693 | return (result); | |
694 | } | |
695 | ||
696 | boolean_t | |
697 | thread_call_enter1_delayed( | |
698 | thread_call_t call, | |
699 | thread_call_param_t param1, | |
700 | uint64_t deadline) | |
701 | { | |
702 | boolean_t result = TRUE; | |
703 | thread_call_group_t group = &thread_call_group0; | |
704 | spl_t s; | |
705 | ||
706 | s = splsched(); | |
707 | thread_call_lock_spin(); | |
708 | ||
709 | result = _delayed_call_enqueue(call, group, deadline); | |
710 | ||
711 | if (queue_first(&group->delayed_queue) == qe(call)) | |
712 | _set_delayed_call_timer(call, group); | |
713 | ||
714 | call->param1 = param1; | |
715 | ||
716 | thread_call_unlock(); | |
717 | splx(s); | |
718 | ||
719 | return (result); | |
720 | } | |
721 | ||
722 | /* | |
723 | * thread_call_cancel: | |
724 | * | |
725 | * Dequeue a callout entry. | |
726 | * | |
727 | * Returns TRUE if the call was | |
728 | * on a queue. | |
729 | */ | |
730 | boolean_t | |
731 | thread_call_cancel( | |
732 | thread_call_t call) | |
733 | { | |
734 | boolean_t result; | |
735 | thread_call_group_t group = &thread_call_group0; | |
736 | spl_t s; | |
737 | ||
738 | s = splsched(); | |
739 | thread_call_lock_spin(); | |
740 | ||
741 | result = _call_dequeue(call, group); | |
742 | ||
743 | thread_call_unlock(); | |
744 | splx(s); | |
745 | ||
746 | return (result); | |
747 | } | |
748 | ||
749 | #ifndef __LP64__ | |
750 | ||
751 | /* | |
752 | * thread_call_is_delayed: | |
753 | * | |
754 | * Returns TRUE if the call is | |
755 | * currently on a delayed queue. | |
756 | * | |
757 | * Optionally returns the expiration time. | |
758 | */ | |
759 | boolean_t | |
760 | thread_call_is_delayed( | |
761 | thread_call_t call, | |
762 | uint64_t *deadline) | |
763 | { | |
764 | boolean_t result = FALSE; | |
765 | thread_call_group_t group = &thread_call_group0; | |
766 | spl_t s; | |
767 | ||
768 | s = splsched(); | |
769 | thread_call_lock_spin(); | |
770 | ||
771 | if (call->queue == &group->delayed_queue) { | |
772 | if (deadline != NULL) | |
773 | *deadline = call->deadline; | |
774 | result = TRUE; | |
775 | } | |
776 | ||
777 | thread_call_unlock(); | |
778 | splx(s); | |
779 | ||
780 | return (result); | |
781 | } | |
782 | ||
783 | #endif /* __LP64__ */ | |
784 | ||
785 | /* | |
786 | * thread_call_wake: | |
787 | * | |
788 | * Wake a call thread to service | |
789 | * pending call entries. May wake | |
790 | * the daemon thread in order to | |
791 | * create additional call threads. | |
792 | * | |
793 | * Called with thread_call_lock held. | |
794 | */ | |
795 | static __inline__ void | |
796 | thread_call_wake( | |
797 | thread_call_group_t group) | |
798 | { | |
799 | if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_AWAKENED, -1) == KERN_SUCCESS) { | |
800 | group->idle_count--; group->active_count++; | |
801 | } | |
802 | else | |
803 | if (!thread_call_daemon_awake) { | |
804 | thread_call_daemon_awake = TRUE; | |
805 | wait_queue_wakeup_one(&group->daemon_wqueue, NO_EVENT, THREAD_AWAKENED, -1); | |
806 | } | |
807 | } | |
808 | ||
809 | /* | |
810 | * sched_call_thread: | |
811 | * | |
812 | * Call out invoked by the scheduler. | |
813 | */ | |
814 | static void | |
815 | sched_call_thread( | |
816 | int type, | |
817 | __unused thread_t thread) | |
818 | { | |
819 | thread_call_group_t group = &thread_call_group0; | |
820 | ||
821 | thread_call_lock_spin(); | |
822 | ||
823 | switch (type) { | |
824 | ||
825 | case SCHED_CALL_BLOCK: | |
826 | if (--group->active_count == 0 && group->pending_count > 0) | |
827 | thread_call_wake(group); | |
828 | break; | |
829 | ||
830 | case SCHED_CALL_UNBLOCK: | |
831 | group->active_count++; | |
832 | break; | |
833 | } | |
834 | ||
835 | thread_call_unlock(); | |
836 | } | |
837 | ||
838 | /* | |
839 | * thread_call_thread: | |
840 | */ | |
841 | static void | |
842 | thread_call_thread( | |
843 | thread_call_group_t group) | |
844 | { | |
845 | thread_t self = current_thread(); | |
846 | ||
847 | (void) splsched(); | |
848 | thread_call_lock_spin(); | |
849 | ||
850 | thread_sched_call(self, sched_call_thread); | |
851 | ||
852 | while (group->pending_count > 0) { | |
853 | thread_call_t call; | |
854 | thread_call_func_t func; | |
855 | thread_call_param_t param0, param1; | |
856 | ||
857 | call = TC(dequeue_head(&group->pending_queue)); | |
858 | group->pending_count--; | |
859 | ||
860 | func = call->func; | |
861 | param0 = call->param0; | |
862 | param1 = call->param1; | |
863 | ||
864 | call->queue = NULL; | |
865 | ||
866 | _internal_call_release(call); | |
867 | ||
868 | thread_call_unlock(); | |
869 | (void) spllo(); | |
870 | ||
871 | KERNEL_DEBUG_CONSTANT( | |
872 | MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, | |
873 | func, param0, param1, 0, 0); | |
874 | ||
875 | (*func)(param0, param1); | |
876 | ||
877 | if (get_preemption_level() != 0) { | |
878 | int pl = get_preemption_level(); | |
879 | panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", | |
880 | pl, func, param0, param1); | |
881 | } | |
882 | ||
883 | (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */ | |
884 | ||
885 | (void) splsched(); | |
886 | thread_call_lock_spin(); | |
887 | } | |
888 | ||
889 | thread_sched_call(self, NULL); | |
890 | group->active_count--; | |
891 | ||
892 | if (group->idle_count < thread_call_thread_min) { | |
893 | group->idle_count++; | |
894 | ||
895 | wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); | |
896 | ||
897 | thread_call_unlock(); | |
898 | (void) spllo(); | |
899 | ||
900 | thread_block_parameter((thread_continue_t)thread_call_thread, group); | |
901 | /* NOTREACHED */ | |
902 | } | |
903 | ||
904 | thread_call_unlock(); | |
905 | (void) spllo(); | |
906 | ||
907 | thread_terminate(self); | |
908 | /* NOTREACHED */ | |
909 | } | |
910 | ||
911 | /* | |
912 | * thread_call_daemon: | |
913 | */ | |
914 | static void | |
915 | thread_call_daemon_continue( | |
916 | thread_call_group_t group) | |
917 | { | |
918 | kern_return_t result; | |
919 | thread_t thread; | |
920 | ||
921 | (void) splsched(); | |
922 | thread_call_lock_spin(); | |
923 | ||
924 | while (group->active_count == 0 && group->pending_count > 0) { | |
925 | group->active_count++; | |
926 | ||
927 | thread_call_unlock(); | |
928 | (void) spllo(); | |
929 | ||
930 | result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread); | |
931 | if (result != KERN_SUCCESS) | |
932 | panic("thread_call_daemon"); | |
933 | ||
934 | thread_deallocate(thread); | |
935 | ||
936 | (void) splsched(); | |
937 | thread_call_lock_spin(); | |
938 | } | |
939 | ||
940 | thread_call_daemon_awake = FALSE; | |
941 | wait_queue_assert_wait(&group->daemon_wqueue, NO_EVENT, THREAD_UNINT, 0); | |
942 | ||
943 | thread_call_unlock(); | |
944 | (void) spllo(); | |
945 | ||
946 | thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group); | |
947 | /* NOTREACHED */ | |
948 | } | |
949 | ||
950 | static void | |
951 | thread_call_daemon( | |
952 | thread_call_group_t group) | |
953 | { | |
954 | thread_t self = current_thread(); | |
955 | ||
956 | self->options |= TH_OPT_VMPRIV; | |
957 | vm_page_free_reserve(2); /* XXX */ | |
958 | ||
959 | thread_call_daemon_continue(group); | |
960 | /* NOTREACHED */ | |
961 | } | |
962 | ||
963 | void | |
964 | thread_call_delayed_timer( | |
965 | timer_call_param_t p0, | |
966 | __unused timer_call_param_t p1 | |
967 | ) | |
968 | { | |
969 | thread_call_t call; | |
970 | thread_call_group_t group = p0; | |
971 | boolean_t new_pending = FALSE; | |
972 | uint64_t timestamp; | |
973 | ||
974 | thread_call_lock_spin(); | |
975 | ||
976 | timestamp = mach_absolute_time(); | |
977 | ||
978 | call = TC(queue_first(&group->delayed_queue)); | |
979 | ||
980 | while (!queue_end(&group->delayed_queue, qe(call))) { | |
981 | if (call->deadline <= timestamp) { | |
982 | _pending_call_enqueue(call, group); | |
983 | new_pending = TRUE; | |
984 | } | |
985 | else | |
986 | break; | |
987 | ||
988 | call = TC(queue_first(&group->delayed_queue)); | |
989 | } | |
990 | ||
991 | if (!queue_end(&group->delayed_queue, qe(call))) | |
992 | _set_delayed_call_timer(call, group); | |
993 | ||
994 | if (new_pending && group->active_count == 0) | |
995 | thread_call_wake(group); | |
996 | ||
997 | thread_call_unlock(); | |
998 | } |