]>
Commit | Line | Data |
---|---|---|
3e170ce0 A |
1 | /* |
2 | * Copyright (c) 2000-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_FREE_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <mach/mach_types.h> | |
58 | ||
59 | #include <kern/sched.h> | |
60 | #include <kern/sched_prim.h> | |
61 | ||
62 | static boolean_t | |
63 | sched_traditional_use_pset_runqueue = FALSE; | |
64 | ||
65 | static void | |
66 | sched_traditional_init(void); | |
67 | ||
68 | static thread_t | |
69 | sched_traditional_steal_thread(processor_set_t pset); | |
70 | ||
71 | static thread_t | |
72 | sched_traditional_steal_processor_thread(processor_t processor); | |
73 | ||
74 | static void | |
75 | sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context); | |
76 | ||
77 | static void | |
78 | sched_traditional_processor_queue_shutdown(processor_t processor); | |
79 | ||
80 | static boolean_t | |
81 | sched_traditional_processor_enqueue(processor_t processor, thread_t thread, integer_t options); | |
82 | ||
83 | static boolean_t | |
84 | sched_traditional_processor_queue_remove(processor_t processor, thread_t thread); | |
85 | ||
86 | static boolean_t | |
87 | sched_traditional_processor_queue_empty(processor_t processor); | |
88 | ||
89 | static ast_t | |
90 | sched_traditional_processor_csw_check(processor_t processor); | |
91 | ||
92 | static boolean_t | |
93 | sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte); | |
94 | ||
95 | static int | |
96 | sched_traditional_processor_runq_count(processor_t processor); | |
97 | ||
98 | static boolean_t | |
99 | sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor); | |
100 | ||
101 | static uint64_t | |
102 | sched_traditional_processor_runq_stats_count_sum(processor_t processor); | |
103 | ||
104 | static uint64_t | |
105 | sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor); | |
106 | ||
107 | static int | |
108 | sched_traditional_processor_bound_count(processor_t processor); | |
109 | ||
110 | extern void | |
111 | sched_traditional_quantum_expire(thread_t thread); | |
112 | ||
113 | static void | |
114 | sched_traditional_processor_init(processor_t processor); | |
115 | ||
116 | static void | |
117 | sched_traditional_pset_init(processor_set_t pset); | |
118 | ||
119 | static void | |
120 | sched_traditional_with_pset_runqueue_init(void); | |
121 | ||
122 | static sched_mode_t | |
123 | sched_traditional_initial_thread_sched_mode(task_t parent_task); | |
124 | ||
125 | static thread_t | |
126 | sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason); | |
127 | ||
128 | /* Choose a thread from a processor's priority-based runq */ | |
129 | static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority); | |
130 | ||
131 | const struct sched_dispatch_table sched_traditional_dispatch = { | |
132 | .sched_name = "traditional", | |
133 | .init = sched_traditional_init, | |
134 | .timebase_init = sched_timeshare_timebase_init, | |
135 | .processor_init = sched_traditional_processor_init, | |
136 | .pset_init = sched_traditional_pset_init, | |
137 | .maintenance_continuation = sched_timeshare_maintenance_continue, | |
138 | .choose_thread = sched_traditional_choose_thread, | |
139 | .steal_thread_enabled = TRUE, | |
140 | .steal_thread = sched_traditional_steal_thread, | |
141 | .compute_timeshare_priority = sched_compute_timeshare_priority, | |
142 | .choose_processor = choose_processor, | |
143 | .processor_enqueue = sched_traditional_processor_enqueue, | |
144 | .processor_queue_shutdown = sched_traditional_processor_queue_shutdown, | |
145 | .processor_queue_remove = sched_traditional_processor_queue_remove, | |
146 | .processor_queue_empty = sched_traditional_processor_queue_empty, | |
147 | .priority_is_urgent = priority_is_urgent, | |
148 | .processor_csw_check = sched_traditional_processor_csw_check, | |
149 | .processor_queue_has_priority = sched_traditional_processor_queue_has_priority, | |
150 | .initial_quantum_size = sched_timeshare_initial_quantum_size, | |
151 | .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode, | |
152 | .can_update_priority = can_update_priority, | |
153 | .update_priority = update_priority, | |
154 | .lightweight_update_priority = lightweight_update_priority, | |
155 | .quantum_expire = sched_default_quantum_expire, | |
156 | .processor_runq_count = sched_traditional_processor_runq_count, | |
157 | .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum, | |
158 | .processor_bound_count = sched_traditional_processor_bound_count, | |
159 | .thread_update_scan = sched_traditional_thread_update_scan, | |
160 | .direct_dispatch_to_idle_processors = TRUE, | |
161 | .multiple_psets_enabled = TRUE, | |
162 | .sched_groups_enabled = FALSE, | |
163 | }; | |
164 | ||
165 | const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = { | |
166 | .sched_name = "traditional_with_pset_runqueue", | |
167 | .init = sched_traditional_with_pset_runqueue_init, | |
168 | .timebase_init = sched_timeshare_timebase_init, | |
169 | .processor_init = sched_traditional_processor_init, | |
170 | .pset_init = sched_traditional_pset_init, | |
171 | .maintenance_continuation = sched_timeshare_maintenance_continue, | |
172 | .choose_thread = sched_traditional_choose_thread, | |
173 | .steal_thread_enabled = TRUE, | |
174 | .steal_thread = sched_traditional_steal_thread, | |
175 | .compute_timeshare_priority = sched_compute_timeshare_priority, | |
176 | .choose_processor = choose_processor, | |
177 | .processor_enqueue = sched_traditional_processor_enqueue, | |
178 | .processor_queue_shutdown = sched_traditional_processor_queue_shutdown, | |
179 | .processor_queue_remove = sched_traditional_processor_queue_remove, | |
180 | .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty, | |
181 | .priority_is_urgent = priority_is_urgent, | |
182 | .processor_csw_check = sched_traditional_processor_csw_check, | |
183 | .processor_queue_has_priority = sched_traditional_processor_queue_has_priority, | |
184 | .initial_quantum_size = sched_timeshare_initial_quantum_size, | |
185 | .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode, | |
186 | .can_update_priority = can_update_priority, | |
187 | .update_priority = update_priority, | |
188 | .lightweight_update_priority = lightweight_update_priority, | |
189 | .quantum_expire = sched_default_quantum_expire, | |
190 | .processor_runq_count = sched_traditional_processor_runq_count, | |
191 | .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum, | |
192 | .processor_bound_count = sched_traditional_processor_bound_count, | |
193 | .thread_update_scan = sched_traditional_thread_update_scan, | |
194 | .direct_dispatch_to_idle_processors = FALSE, | |
195 | .multiple_psets_enabled = TRUE, | |
196 | .sched_groups_enabled = FALSE, | |
197 | }; | |
198 | ||
199 | static void | |
200 | sched_traditional_init(void) | |
201 | { | |
202 | sched_timeshare_init(); | |
203 | } | |
204 | ||
205 | static void | |
206 | sched_traditional_with_pset_runqueue_init(void) | |
207 | { | |
208 | sched_timeshare_init(); | |
209 | sched_traditional_use_pset_runqueue = TRUE; | |
210 | } | |
211 | ||
212 | static void | |
213 | sched_traditional_processor_init(processor_t processor) | |
214 | { | |
215 | if (!sched_traditional_use_pset_runqueue) { | |
216 | run_queue_init(&processor->runq); | |
217 | } | |
218 | processor->runq_bound_count = 0; | |
219 | } | |
220 | ||
221 | static void | |
222 | sched_traditional_pset_init(processor_set_t pset) | |
223 | { | |
224 | if (sched_traditional_use_pset_runqueue) { | |
225 | run_queue_init(&pset->pset_runq); | |
226 | } | |
227 | pset->pset_runq_bound_count = 0; | |
228 | } | |
229 | ||
230 | __attribute__((always_inline)) | |
231 | static inline run_queue_t runq_for_processor(processor_t processor) | |
232 | { | |
233 | if (sched_traditional_use_pset_runqueue) | |
234 | return &processor->processor_set->pset_runq; | |
235 | else | |
236 | return &processor->runq; | |
237 | } | |
238 | ||
239 | __attribute__((always_inline)) | |
240 | static inline void runq_consider_incr_bound_count(processor_t processor, | |
241 | thread_t thread) | |
242 | { | |
243 | if (thread->bound_processor == PROCESSOR_NULL) | |
244 | return; | |
245 | ||
246 | assert(thread->bound_processor == processor); | |
247 | ||
248 | if (sched_traditional_use_pset_runqueue) | |
249 | processor->processor_set->pset_runq_bound_count++; | |
250 | ||
251 | processor->runq_bound_count++; | |
252 | } | |
253 | ||
254 | __attribute__((always_inline)) | |
255 | static inline void runq_consider_decr_bound_count(processor_t processor, | |
256 | thread_t thread) | |
257 | { | |
258 | if (thread->bound_processor == PROCESSOR_NULL) | |
259 | return; | |
260 | ||
261 | assert(thread->bound_processor == processor); | |
262 | ||
263 | if (sched_traditional_use_pset_runqueue) | |
264 | processor->processor_set->pset_runq_bound_count--; | |
265 | ||
266 | processor->runq_bound_count--; | |
267 | } | |
268 | ||
269 | static thread_t | |
270 | sched_traditional_choose_thread( | |
271 | processor_t processor, | |
272 | int priority, | |
273 | __unused ast_t reason) | |
274 | { | |
275 | thread_t thread; | |
276 | ||
277 | thread = sched_traditional_choose_thread_from_runq(processor, runq_for_processor(processor), priority); | |
278 | if (thread != THREAD_NULL) { | |
279 | runq_consider_decr_bound_count(processor, thread); | |
280 | } | |
281 | ||
282 | return thread; | |
283 | } | |
284 | ||
285 | /* | |
286 | * sched_traditional_choose_thread_from_runq: | |
287 | * | |
288 | * Locate a thread to execute from the processor run queue | |
289 | * and return it. Only choose a thread with greater or equal | |
290 | * priority. | |
291 | * | |
292 | * Associated pset must be locked. Returns THREAD_NULL | |
293 | * on failure. | |
294 | */ | |
295 | static thread_t | |
296 | sched_traditional_choose_thread_from_runq( | |
297 | processor_t processor, | |
298 | run_queue_t rq, | |
299 | int priority) | |
300 | { | |
301 | queue_t queue = rq->queues + rq->highq; | |
302 | int pri = rq->highq; | |
303 | int count = rq->count; | |
304 | thread_t thread; | |
305 | ||
306 | while (count > 0 && pri >= priority) { | |
307 | thread = (thread_t)(uintptr_t)queue_first(queue); | |
308 | while (!queue_end(queue, (queue_entry_t)thread)) { | |
309 | if (thread->bound_processor == PROCESSOR_NULL || | |
310 | thread->bound_processor == processor) { | |
311 | remqueue((queue_entry_t)thread); | |
312 | ||
313 | thread->runq = PROCESSOR_NULL; | |
314 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
315 | rq->count--; | |
316 | if (SCHED(priority_is_urgent)(pri)) { | |
317 | rq->urgency--; assert(rq->urgency >= 0); | |
318 | } | |
319 | if (queue_empty(queue)) { | |
320 | if (pri != IDLEPRI) | |
321 | clrbit(MAXPRI - pri, rq->bitmap); | |
322 | rq->highq = MAXPRI - ffsbit(rq->bitmap); | |
323 | } | |
324 | ||
325 | return (thread); | |
326 | } | |
327 | count--; | |
328 | ||
329 | thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); | |
330 | } | |
331 | ||
332 | queue--; pri--; | |
333 | } | |
334 | ||
335 | return (THREAD_NULL); | |
336 | } | |
337 | ||
338 | static sched_mode_t | |
339 | sched_traditional_initial_thread_sched_mode(task_t parent_task) | |
340 | { | |
341 | if (parent_task == kernel_task) | |
342 | return TH_MODE_FIXED; | |
343 | else | |
344 | return TH_MODE_TIMESHARE; | |
345 | } | |
346 | ||
347 | /* | |
348 | * sched_traditional_processor_enqueue: | |
349 | * | |
350 | * Enqueue thread on a processor run queue. Thread must be locked, | |
351 | * and not already be on a run queue. | |
352 | * | |
353 | * Returns TRUE if a preemption is indicated based on the state | |
354 | * of the run queue. | |
355 | * | |
356 | * The run queue must be locked (see thread_run_queue_remove() | |
357 | * for more info). | |
358 | */ | |
359 | static boolean_t | |
360 | sched_traditional_processor_enqueue(processor_t processor, | |
361 | thread_t thread, | |
362 | integer_t options) | |
363 | { | |
364 | run_queue_t rq = runq_for_processor(processor); | |
365 | boolean_t result; | |
366 | ||
367 | result = run_queue_enqueue(rq, thread, options); | |
368 | thread->runq = processor; | |
369 | runq_consider_incr_bound_count(processor, thread); | |
370 | ||
371 | return (result); | |
372 | } | |
373 | ||
374 | static boolean_t | |
375 | sched_traditional_processor_queue_empty(processor_t processor) | |
376 | { | |
377 | return runq_for_processor(processor)->count == 0; | |
378 | } | |
379 | ||
380 | static boolean_t | |
381 | sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor) | |
382 | { | |
383 | processor_set_t pset = processor->processor_set; | |
384 | int count = runq_for_processor(processor)->count; | |
385 | ||
386 | /* | |
387 | * The pset runq contains the count of all runnable threads | |
388 | * for all processors in the pset. However, for threads that | |
389 | * are bound to another processor, the current "processor" | |
390 | * is not eligible to execute the thread. So we only | |
391 | * include bound threads that our bound to the current | |
392 | * "processor". This allows the processor to idle when the | |
393 | * count of eligible threads drops to 0, even if there's | |
394 | * a runnable thread bound to a different processor in the | |
395 | * shared runq. | |
396 | */ | |
397 | ||
398 | count -= pset->pset_runq_bound_count; | |
399 | count += processor->runq_bound_count; | |
400 | ||
401 | return count == 0; | |
402 | } | |
403 | ||
404 | static ast_t | |
405 | sched_traditional_processor_csw_check(processor_t processor) | |
406 | { | |
407 | run_queue_t runq; | |
408 | boolean_t has_higher; | |
409 | ||
410 | assert(processor->active_thread != NULL); | |
411 | ||
412 | runq = runq_for_processor(processor); | |
413 | ||
414 | if (processor->first_timeslice) { | |
415 | has_higher = (runq->highq > processor->current_pri); | |
416 | } else { | |
417 | has_higher = (runq->highq >= processor->current_pri); | |
418 | } | |
419 | ||
420 | if (has_higher) { | |
421 | if (runq->urgency > 0) | |
422 | return (AST_PREEMPT | AST_URGENT); | |
423 | ||
424 | return AST_PREEMPT; | |
425 | } | |
426 | ||
427 | return AST_NONE; | |
428 | } | |
429 | ||
430 | static boolean_t | |
431 | sched_traditional_processor_queue_has_priority(processor_t processor, | |
432 | int priority, | |
433 | boolean_t gte) | |
434 | { | |
435 | if (gte) | |
436 | return runq_for_processor(processor)->highq >= priority; | |
437 | else | |
438 | return runq_for_processor(processor)->highq > priority; | |
439 | } | |
440 | ||
441 | static int | |
442 | sched_traditional_processor_runq_count(processor_t processor) | |
443 | { | |
444 | return runq_for_processor(processor)->count; | |
445 | } | |
446 | ||
447 | static uint64_t | |
448 | sched_traditional_processor_runq_stats_count_sum(processor_t processor) | |
449 | { | |
450 | return runq_for_processor(processor)->runq_stats.count_sum; | |
451 | } | |
452 | ||
453 | static uint64_t | |
454 | sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor) | |
455 | { | |
456 | if (processor->cpu_id == processor->processor_set->cpu_set_low) | |
457 | return runq_for_processor(processor)->runq_stats.count_sum; | |
458 | else | |
459 | return 0ULL; | |
460 | } | |
461 | ||
462 | static int | |
463 | sched_traditional_processor_bound_count(processor_t processor) | |
464 | { | |
465 | return processor->runq_bound_count; | |
466 | } | |
467 | ||
468 | /* | |
469 | * sched_traditional_processor_queue_shutdown: | |
470 | * | |
471 | * Shutdown a processor run queue by | |
472 | * re-dispatching non-bound threads. | |
473 | * | |
474 | * Associated pset must be locked, and is | |
475 | * returned unlocked. | |
476 | */ | |
477 | static void | |
478 | sched_traditional_processor_queue_shutdown(processor_t processor) | |
479 | { | |
480 | processor_set_t pset = processor->processor_set; | |
481 | run_queue_t rq = runq_for_processor(processor); | |
482 | queue_t queue = rq->queues + rq->highq; | |
483 | int pri = rq->highq; | |
484 | int count = rq->count; | |
485 | thread_t next, thread; | |
486 | queue_head_t tqueue; | |
487 | ||
488 | queue_init(&tqueue); | |
489 | ||
490 | while (count > 0) { | |
491 | thread = (thread_t)(uintptr_t)queue_first(queue); | |
492 | while (!queue_end(queue, (queue_entry_t)thread)) { | |
493 | next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); | |
494 | ||
495 | if (thread->bound_processor == PROCESSOR_NULL) { | |
496 | remqueue((queue_entry_t)thread); | |
497 | ||
498 | thread->runq = PROCESSOR_NULL; | |
499 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
500 | runq_consider_decr_bound_count(processor, thread); | |
501 | rq->count--; | |
502 | if (SCHED(priority_is_urgent)(pri)) { | |
503 | rq->urgency--; assert(rq->urgency >= 0); | |
504 | } | |
505 | if (queue_empty(queue)) { | |
506 | if (pri != IDLEPRI) | |
507 | clrbit(MAXPRI - pri, rq->bitmap); | |
508 | rq->highq = MAXPRI - ffsbit(rq->bitmap); | |
509 | } | |
510 | ||
511 | enqueue_tail(&tqueue, (queue_entry_t)thread); | |
512 | } | |
513 | count--; | |
514 | ||
515 | thread = next; | |
516 | } | |
517 | ||
518 | queue--; pri--; | |
519 | } | |
520 | ||
521 | pset_unlock(pset); | |
522 | ||
523 | while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) { | |
524 | thread_lock(thread); | |
525 | ||
526 | thread_setrun(thread, SCHED_TAILQ); | |
527 | ||
528 | thread_unlock(thread); | |
529 | } | |
530 | } | |
531 | ||
532 | #if 0 | |
533 | static void | |
534 | run_queue_check( | |
535 | run_queue_t rq, | |
536 | thread_t thread) | |
537 | { | |
538 | queue_t q; | |
539 | queue_entry_t qe; | |
540 | ||
541 | if (rq != thread->runq) | |
542 | panic("run_queue_check: thread runq"); | |
543 | ||
544 | if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) | |
545 | panic("run_queue_check: thread sched_pri"); | |
546 | ||
547 | q = &rq->queues[thread->sched_pri]; | |
548 | qe = queue_first(q); | |
549 | while (!queue_end(q, qe)) { | |
550 | if (qe == (queue_entry_t)thread) | |
551 | return; | |
552 | ||
553 | qe = queue_next(qe); | |
554 | } | |
555 | ||
556 | panic("run_queue_check: end"); | |
557 | } | |
558 | #endif /* 0 */ | |
559 | ||
560 | /* | |
561 | * Locks the runqueue itself. | |
562 | * | |
563 | * Thread must be locked. | |
564 | */ | |
565 | static boolean_t | |
566 | sched_traditional_processor_queue_remove(processor_t processor, | |
567 | thread_t thread) | |
568 | { | |
569 | processor_set_t pset; | |
570 | run_queue_t rq; | |
571 | ||
572 | pset = processor->processor_set; | |
573 | pset_lock(pset); | |
574 | ||
575 | rq = runq_for_processor(processor); | |
576 | ||
577 | if (processor == thread->runq) { | |
578 | /* | |
579 | * Thread is on a run queue and we have a lock on | |
580 | * that run queue. | |
581 | */ | |
582 | runq_consider_decr_bound_count(processor, thread); | |
583 | run_queue_remove(rq, thread); | |
584 | } | |
585 | else { | |
586 | /* | |
587 | * The thread left the run queue before we could | |
588 | * lock the run queue. | |
589 | */ | |
590 | assert(thread->runq == PROCESSOR_NULL); | |
591 | processor = PROCESSOR_NULL; | |
592 | } | |
593 | ||
594 | pset_unlock(pset); | |
595 | ||
596 | return (processor != PROCESSOR_NULL); | |
597 | } | |
598 | ||
599 | /* | |
600 | * sched_traditional_steal_processor_thread: | |
601 | * | |
602 | * Locate a thread to steal from the processor and | |
603 | * return it. | |
604 | * | |
605 | * Associated pset must be locked. Returns THREAD_NULL | |
606 | * on failure. | |
607 | */ | |
608 | static thread_t | |
609 | sched_traditional_steal_processor_thread(processor_t processor) | |
610 | { | |
611 | run_queue_t rq = runq_for_processor(processor); | |
612 | queue_t queue = rq->queues + rq->highq; | |
613 | int pri = rq->highq; | |
614 | int count = rq->count; | |
615 | thread_t thread; | |
616 | ||
617 | while (count > 0) { | |
618 | thread = (thread_t)(uintptr_t)queue_first(queue); | |
619 | while (!queue_end(queue, (queue_entry_t)thread)) { | |
620 | if (thread->bound_processor == PROCESSOR_NULL) { | |
621 | remqueue((queue_entry_t)thread); | |
622 | ||
623 | thread->runq = PROCESSOR_NULL; | |
624 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
625 | runq_consider_decr_bound_count(processor, thread); | |
626 | rq->count--; | |
627 | if (SCHED(priority_is_urgent)(pri)) { | |
628 | rq->urgency--; assert(rq->urgency >= 0); | |
629 | } | |
630 | if (queue_empty(queue)) { | |
631 | if (pri != IDLEPRI) | |
632 | clrbit(MAXPRI - pri, rq->bitmap); | |
633 | rq->highq = MAXPRI - ffsbit(rq->bitmap); | |
634 | } | |
635 | ||
636 | return (thread); | |
637 | } | |
638 | count--; | |
639 | ||
640 | thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); | |
641 | } | |
642 | ||
643 | queue--; pri--; | |
644 | } | |
645 | ||
646 | return (THREAD_NULL); | |
647 | } | |
648 | ||
649 | /* | |
650 | * Locate and steal a thread, beginning | |
651 | * at the pset. | |
652 | * | |
653 | * The pset must be locked, and is returned | |
654 | * unlocked. | |
655 | * | |
656 | * Returns the stolen thread, or THREAD_NULL on | |
657 | * failure. | |
658 | */ | |
659 | static thread_t | |
660 | sched_traditional_steal_thread(processor_set_t pset) | |
661 | { | |
662 | processor_set_t nset, cset = pset; | |
663 | processor_t processor; | |
664 | thread_t thread; | |
665 | ||
666 | do { | |
667 | processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue); | |
668 | while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { | |
669 | if (runq_for_processor(processor)->count > 0) { | |
670 | thread = sched_traditional_steal_processor_thread(processor); | |
671 | if (thread != THREAD_NULL) { | |
672 | remqueue((queue_entry_t)processor); | |
673 | enqueue_tail(&cset->active_queue, (queue_entry_t)processor); | |
674 | ||
675 | pset_unlock(cset); | |
676 | ||
677 | return (thread); | |
678 | } | |
679 | } | |
680 | ||
681 | processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor); | |
682 | } | |
683 | ||
684 | nset = next_pset(cset); | |
685 | ||
686 | if (nset != pset) { | |
687 | pset_unlock(cset); | |
688 | ||
689 | cset = nset; | |
690 | pset_lock(cset); | |
691 | } | |
692 | } while (nset != pset); | |
693 | ||
694 | pset_unlock(cset); | |
695 | ||
696 | return (THREAD_NULL); | |
697 | } | |
698 | ||
699 | static void | |
700 | sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) | |
701 | { | |
702 | boolean_t restart_needed = FALSE; | |
703 | processor_t processor = processor_list; | |
704 | processor_set_t pset; | |
705 | thread_t thread; | |
706 | spl_t s; | |
707 | ||
708 | do { | |
709 | do { | |
710 | /* | |
711 | * TODO: in sched_traditional_use_pset_runqueue case, | |
712 | * avoid scanning the same runq multiple times | |
713 | */ | |
714 | pset = processor->processor_set; | |
715 | ||
716 | s = splsched(); | |
717 | pset_lock(pset); | |
718 | ||
719 | restart_needed = runq_scan(runq_for_processor(processor), scan_context); | |
720 | ||
721 | pset_unlock(pset); | |
722 | splx(s); | |
723 | ||
724 | if (restart_needed) | |
725 | break; | |
726 | ||
727 | thread = processor->idle_thread; | |
728 | if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { | |
729 | if (thread_update_add_thread(thread) == FALSE) { | |
730 | restart_needed = TRUE; | |
731 | break; | |
732 | } | |
733 | } | |
734 | } while ((processor = processor->processor_list) != NULL); | |
735 | ||
736 | /* Ok, we now have a collection of candidates -- fix them. */ | |
737 | thread_update_process_threads(); | |
738 | } while (restart_needed); | |
739 | } | |
740 |