]>
Commit | Line | Data |
---|---|---|
c6bf4f31 A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/machine.h> | |
31 | ||
32 | #include <machine/machine_routines.h> | |
33 | #include <machine/sched_param.h> | |
34 | #include <machine/machine_cpu.h> | |
35 | ||
36 | #include <kern/kern_types.h> | |
37 | #include <kern/debug.h> | |
38 | #include <kern/machine.h> | |
39 | #include <kern/misc_protos.h> | |
40 | #include <kern/processor.h> | |
41 | #include <kern/queue.h> | |
42 | #include <kern/sched.h> | |
43 | #include <kern/sched_prim.h> | |
44 | #include <kern/task.h> | |
45 | #include <kern/thread.h> | |
46 | #include <kern/thread_group.h> | |
47 | #include <kern/sched_amp_common.h> | |
48 | ||
49 | #include <sys/kdebug.h> | |
50 | ||
51 | #if __AMP__ | |
52 | ||
53 | static thread_t | |
54 | sched_amp_steal_thread(processor_set_t pset); | |
55 | ||
56 | static void | |
57 | sched_amp_thread_update_scan(sched_update_scan_context_t scan_context); | |
58 | ||
59 | static boolean_t | |
60 | sched_amp_processor_enqueue(processor_t processor, thread_t thread, | |
61 | sched_options_t options); | |
62 | ||
63 | static boolean_t | |
64 | sched_amp_processor_queue_remove(processor_t processor, thread_t thread); | |
65 | ||
66 | static ast_t | |
67 | sched_amp_processor_csw_check(processor_t processor); | |
68 | ||
69 | static boolean_t | |
70 | sched_amp_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte); | |
71 | ||
72 | static int | |
73 | sched_amp_runq_count(processor_t processor); | |
74 | ||
75 | static boolean_t | |
76 | sched_amp_processor_queue_empty(processor_t processor); | |
77 | ||
78 | static uint64_t | |
79 | sched_amp_runq_stats_count_sum(processor_t processor); | |
80 | ||
81 | static int | |
82 | sched_amp_processor_bound_count(processor_t processor); | |
83 | ||
84 | static void | |
85 | sched_amp_pset_init(processor_set_t pset); | |
86 | ||
87 | static void | |
88 | sched_amp_processor_init(processor_t processor); | |
89 | ||
90 | static thread_t | |
91 | sched_amp_choose_thread(processor_t processor, int priority, ast_t reason); | |
92 | ||
93 | static void | |
94 | sched_amp_processor_queue_shutdown(processor_t processor); | |
95 | ||
96 | static sched_mode_t | |
97 | sched_amp_initial_thread_sched_mode(task_t parent_task); | |
98 | ||
99 | static processor_t | |
100 | sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread); | |
101 | ||
102 | static bool | |
103 | sched_amp_thread_avoid_processor(processor_t processor, thread_t thread); | |
104 | ||
105 | static bool | |
106 | sched_amp_thread_should_yield(processor_t processor, thread_t thread); | |
107 | ||
108 | static void | |
109 | sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation); | |
110 | ||
111 | const struct sched_dispatch_table sched_amp_dispatch = { | |
112 | .sched_name = "amp", | |
113 | .init = sched_amp_init, | |
114 | .timebase_init = sched_timeshare_timebase_init, | |
115 | .processor_init = sched_amp_processor_init, | |
116 | .pset_init = sched_amp_pset_init, | |
117 | .maintenance_continuation = sched_timeshare_maintenance_continue, | |
118 | .choose_thread = sched_amp_choose_thread, | |
119 | .steal_thread_enabled = sched_amp_steal_thread_enabled, | |
120 | .steal_thread = sched_amp_steal_thread, | |
121 | .compute_timeshare_priority = sched_compute_timeshare_priority, | |
f427ee49 | 122 | .choose_node = sched_amp_choose_node, |
c6bf4f31 A |
123 | .choose_processor = sched_amp_choose_processor, |
124 | .processor_enqueue = sched_amp_processor_enqueue, | |
125 | .processor_queue_shutdown = sched_amp_processor_queue_shutdown, | |
126 | .processor_queue_remove = sched_amp_processor_queue_remove, | |
127 | .processor_queue_empty = sched_amp_processor_queue_empty, | |
128 | .priority_is_urgent = priority_is_urgent, | |
129 | .processor_csw_check = sched_amp_processor_csw_check, | |
130 | .processor_queue_has_priority = sched_amp_processor_queue_has_priority, | |
131 | .initial_quantum_size = sched_timeshare_initial_quantum_size, | |
132 | .initial_thread_sched_mode = sched_amp_initial_thread_sched_mode, | |
133 | .can_update_priority = can_update_priority, | |
134 | .update_priority = update_priority, | |
135 | .lightweight_update_priority = lightweight_update_priority, | |
136 | .quantum_expire = sched_default_quantum_expire, | |
137 | .processor_runq_count = sched_amp_runq_count, | |
138 | .processor_runq_stats_count_sum = sched_amp_runq_stats_count_sum, | |
139 | .processor_bound_count = sched_amp_processor_bound_count, | |
140 | .thread_update_scan = sched_amp_thread_update_scan, | |
141 | .multiple_psets_enabled = TRUE, | |
142 | .sched_groups_enabled = FALSE, | |
143 | .avoid_processor_enabled = TRUE, | |
144 | .thread_avoid_processor = sched_amp_thread_avoid_processor, | |
145 | .processor_balance = sched_amp_balance, | |
146 | ||
147 | .rt_runq = sched_amp_rt_runq, | |
148 | .rt_init = sched_amp_rt_init, | |
149 | .rt_queue_shutdown = sched_amp_rt_queue_shutdown, | |
150 | .rt_runq_scan = sched_amp_rt_runq_scan, | |
151 | .rt_runq_count_sum = sched_amp_rt_runq_count_sum, | |
152 | ||
153 | .qos_max_parallelism = sched_amp_qos_max_parallelism, | |
154 | .check_spill = sched_amp_check_spill, | |
155 | .ipi_policy = sched_amp_ipi_policy, | |
156 | .thread_should_yield = sched_amp_thread_should_yield, | |
157 | .run_count_incr = sched_run_incr, | |
158 | .run_count_decr = sched_run_decr, | |
159 | .update_thread_bucket = sched_update_thread_bucket, | |
160 | .pset_made_schedulable = sched_pset_made_schedulable, | |
161 | .thread_group_recommendation_change = sched_amp_thread_group_recommendation_change, | |
162 | }; | |
163 | ||
164 | extern processor_set_t ecore_set; | |
165 | extern processor_set_t pcore_set; | |
166 | ||
167 | __attribute__((always_inline)) | |
168 | static inline run_queue_t | |
169 | amp_main_runq(processor_t processor) | |
170 | { | |
171 | return &processor->processor_set->pset_runq; | |
172 | } | |
173 | ||
174 | __attribute__((always_inline)) | |
175 | static inline run_queue_t | |
176 | amp_bound_runq(processor_t processor) | |
177 | { | |
178 | return &processor->runq; | |
179 | } | |
180 | ||
181 | __attribute__((always_inline)) | |
182 | static inline run_queue_t | |
183 | amp_runq_for_thread(processor_t processor, thread_t thread) | |
184 | { | |
185 | if (thread->bound_processor == PROCESSOR_NULL) { | |
186 | return amp_main_runq(processor); | |
187 | } else { | |
188 | assert(thread->bound_processor == processor); | |
189 | return amp_bound_runq(processor); | |
190 | } | |
191 | } | |
192 | ||
193 | static sched_mode_t | |
194 | sched_amp_initial_thread_sched_mode(task_t parent_task) | |
195 | { | |
196 | if (parent_task == kernel_task) { | |
197 | return TH_MODE_FIXED; | |
198 | } else { | |
199 | return TH_MODE_TIMESHARE; | |
200 | } | |
201 | } | |
202 | ||
203 | static void | |
204 | sched_amp_processor_init(processor_t processor) | |
205 | { | |
206 | run_queue_init(&processor->runq); | |
207 | } | |
208 | ||
209 | static void | |
210 | sched_amp_pset_init(processor_set_t pset) | |
211 | { | |
212 | run_queue_init(&pset->pset_runq); | |
213 | } | |
214 | ||
215 | static thread_t | |
216 | sched_amp_choose_thread( | |
217 | processor_t processor, | |
218 | int priority, | |
219 | __unused ast_t reason) | |
220 | { | |
221 | processor_set_t pset = processor->processor_set; | |
222 | bool spill_pending = false; | |
223 | int spill_pri = -1; | |
224 | ||
225 | if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { | |
226 | spill_pending = true; | |
227 | spill_pri = pcore_set->pset_runq.highq; | |
228 | } | |
229 | ||
230 | run_queue_t main_runq = amp_main_runq(processor); | |
231 | run_queue_t bound_runq = amp_bound_runq(processor); | |
232 | run_queue_t chosen_runq; | |
233 | ||
234 | if ((bound_runq->highq < priority) && | |
235 | (main_runq->highq < priority) && | |
236 | (spill_pri < priority)) { | |
237 | return THREAD_NULL; | |
238 | } | |
239 | ||
240 | if ((spill_pri > bound_runq->highq) && | |
241 | (spill_pri > main_runq->highq)) { | |
242 | /* | |
243 | * There is a higher priority thread on the P-core runq, | |
244 | * so returning THREAD_NULL here will cause thread_select() | |
245 | * to call sched_amp_steal_thread() to try to get it. | |
246 | */ | |
247 | return THREAD_NULL; | |
248 | } | |
249 | ||
250 | if (bound_runq->highq >= main_runq->highq) { | |
251 | chosen_runq = bound_runq; | |
252 | } else { | |
253 | chosen_runq = main_runq; | |
254 | } | |
255 | ||
256 | return run_queue_dequeue(chosen_runq, SCHED_HEADQ); | |
257 | } | |
258 | ||
259 | static boolean_t | |
260 | sched_amp_processor_enqueue( | |
261 | processor_t processor, | |
262 | thread_t thread, | |
263 | sched_options_t options) | |
264 | { | |
265 | run_queue_t rq = amp_runq_for_thread(processor, thread); | |
266 | boolean_t result; | |
267 | ||
268 | result = run_queue_enqueue(rq, thread, options); | |
269 | thread->runq = processor; | |
270 | ||
271 | return result; | |
272 | } | |
273 | ||
274 | static boolean_t | |
275 | sched_amp_processor_queue_empty(processor_t processor) | |
276 | { | |
277 | processor_set_t pset = processor->processor_set; | |
278 | bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); | |
279 | ||
280 | return (amp_main_runq(processor)->count == 0) && | |
281 | (amp_bound_runq(processor)->count == 0) && | |
282 | !spill_pending; | |
283 | } | |
284 | ||
285 | static bool | |
286 | sched_amp_thread_should_yield(processor_t processor, thread_t thread) | |
287 | { | |
288 | if (!sched_amp_processor_queue_empty(processor) || (rt_runq_count(processor->processor_set) > 0)) { | |
289 | return true; | |
290 | } | |
291 | ||
292 | if ((processor->processor_set->pset_cluster_type == PSET_AMP_E) && (recommended_pset_type(thread) == PSET_AMP_P)) { | |
293 | return pcore_set->pset_runq.count > 0; | |
294 | } | |
295 | ||
296 | return false; | |
297 | } | |
298 | ||
299 | static ast_t | |
300 | sched_amp_processor_csw_check(processor_t processor) | |
301 | { | |
302 | boolean_t has_higher; | |
303 | int pri; | |
304 | ||
305 | run_queue_t main_runq = amp_main_runq(processor); | |
306 | run_queue_t bound_runq = amp_bound_runq(processor); | |
307 | ||
308 | assert(processor->active_thread != NULL); | |
309 | ||
310 | processor_set_t pset = processor->processor_set; | |
311 | bool spill_pending = false; | |
312 | int spill_pri = -1; | |
313 | int spill_urgency = 0; | |
314 | ||
315 | if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { | |
316 | spill_pending = true; | |
317 | spill_pri = pcore_set->pset_runq.highq; | |
318 | spill_urgency = pcore_set->pset_runq.urgency; | |
319 | } | |
320 | ||
321 | pri = MAX(main_runq->highq, bound_runq->highq); | |
322 | if (spill_pending) { | |
323 | pri = MAX(pri, spill_pri); | |
324 | } | |
325 | ||
326 | if (processor->first_timeslice) { | |
327 | has_higher = (pri > processor->current_pri); | |
328 | } else { | |
329 | has_higher = (pri >= processor->current_pri); | |
330 | } | |
331 | ||
332 | if (has_higher) { | |
333 | if (main_runq->urgency > 0) { | |
334 | return AST_PREEMPT | AST_URGENT; | |
335 | } | |
336 | ||
337 | if (bound_runq->urgency > 0) { | |
338 | return AST_PREEMPT | AST_URGENT; | |
339 | } | |
340 | ||
341 | if (spill_urgency > 0) { | |
342 | return AST_PREEMPT | AST_URGENT; | |
343 | } | |
344 | ||
345 | return AST_PREEMPT; | |
346 | } | |
347 | ||
348 | return AST_NONE; | |
349 | } | |
350 | ||
351 | static boolean_t | |
352 | sched_amp_processor_queue_has_priority(processor_t processor, | |
353 | int priority, | |
354 | boolean_t gte) | |
355 | { | |
356 | bool spill_pending = false; | |
357 | int spill_pri = -1; | |
358 | processor_set_t pset = processor->processor_set; | |
359 | ||
360 | if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { | |
361 | spill_pending = true; | |
362 | spill_pri = pcore_set->pset_runq.highq; | |
363 | } | |
364 | run_queue_t main_runq = amp_main_runq(processor); | |
365 | run_queue_t bound_runq = amp_bound_runq(processor); | |
366 | ||
367 | int qpri = MAX(main_runq->highq, bound_runq->highq); | |
368 | if (spill_pending) { | |
369 | qpri = MAX(qpri, spill_pri); | |
370 | } | |
371 | ||
372 | if (gte) { | |
373 | return qpri >= priority; | |
374 | } else { | |
375 | return qpri > priority; | |
376 | } | |
377 | } | |
378 | ||
379 | static int | |
380 | sched_amp_runq_count(processor_t processor) | |
381 | { | |
382 | return amp_main_runq(processor)->count + amp_bound_runq(processor)->count; | |
383 | } | |
384 | ||
385 | static uint64_t | |
386 | sched_amp_runq_stats_count_sum(processor_t processor) | |
387 | { | |
388 | uint64_t bound_sum = amp_bound_runq(processor)->runq_stats.count_sum; | |
389 | ||
390 | if (processor->cpu_id == processor->processor_set->cpu_set_low) { | |
391 | return bound_sum + amp_main_runq(processor)->runq_stats.count_sum; | |
392 | } else { | |
393 | return bound_sum; | |
394 | } | |
395 | } | |
396 | static int | |
397 | sched_amp_processor_bound_count(processor_t processor) | |
398 | { | |
399 | return amp_bound_runq(processor)->count; | |
400 | } | |
401 | ||
402 | static void | |
403 | sched_amp_processor_queue_shutdown(processor_t processor) | |
404 | { | |
405 | processor_set_t pset = processor->processor_set; | |
406 | run_queue_t rq = amp_main_runq(processor); | |
407 | thread_t thread; | |
408 | queue_head_t tqueue; | |
409 | ||
410 | /* We only need to migrate threads if this is the last active or last recommended processor in the pset */ | |
411 | if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) { | |
412 | pset_unlock(pset); | |
413 | return; | |
414 | } | |
415 | ||
416 | queue_init(&tqueue); | |
417 | ||
418 | while (rq->count > 0) { | |
419 | thread = run_queue_dequeue(rq, SCHED_HEADQ); | |
420 | enqueue_tail(&tqueue, &thread->runq_links); | |
421 | } | |
422 | ||
423 | pset_unlock(pset); | |
424 | ||
425 | qe_foreach_element_safe(thread, &tqueue, runq_links) { | |
426 | remqueue(&thread->runq_links); | |
427 | ||
428 | thread_lock(thread); | |
429 | ||
430 | thread_setrun(thread, SCHED_TAILQ); | |
431 | ||
432 | thread_unlock(thread); | |
433 | } | |
434 | } | |
435 | ||
436 | static boolean_t | |
437 | sched_amp_processor_queue_remove( | |
438 | processor_t processor, | |
439 | thread_t thread) | |
440 | { | |
441 | run_queue_t rq; | |
442 | processor_set_t pset = processor->processor_set; | |
443 | ||
444 | pset_lock(pset); | |
445 | ||
446 | rq = amp_runq_for_thread(processor, thread); | |
447 | ||
448 | if (processor == thread->runq) { | |
449 | /* | |
450 | * Thread is on a run queue and we have a lock on | |
451 | * that run queue. | |
452 | */ | |
453 | run_queue_remove(rq, thread); | |
454 | } else { | |
455 | /* | |
456 | * The thread left the run queue before we could | |
457 | * lock the run queue. | |
458 | */ | |
459 | assert(thread->runq == PROCESSOR_NULL); | |
460 | processor = PROCESSOR_NULL; | |
461 | } | |
462 | ||
463 | pset_unlock(pset); | |
464 | ||
465 | return processor != PROCESSOR_NULL; | |
466 | } | |
467 | ||
468 | /* | |
469 | * sched_amp_steal_thread() | |
470 | * | |
471 | */ | |
472 | thread_t | |
473 | sched_amp_steal_thread(processor_set_t pset) | |
474 | { | |
475 | thread_t thread = THREAD_NULL; | |
476 | processor_set_t nset = pset; | |
477 | ||
478 | assert(pset->pset_cluster_type != PSET_AMP_P); | |
479 | ||
480 | processor_t processor = current_processor(); | |
481 | assert(pset == processor->processor_set); | |
482 | ||
483 | bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); | |
484 | bit_clear(pset->pending_spill_cpu_mask, processor->cpu_id); | |
485 | ||
486 | nset = pcore_set; | |
487 | ||
488 | assert(nset != pset); | |
489 | ||
f427ee49 | 490 | if (sched_get_pset_load_average(nset, 0) >= sched_amp_steal_threshold(nset, spill_pending)) { |
c6bf4f31 A |
491 | pset_unlock(pset); |
492 | ||
493 | pset = nset; | |
494 | ||
495 | pset_lock(pset); | |
496 | ||
497 | /* Allow steal if load average still OK, no idle cores, and more threads on runq than active cores DISPATCHING */ | |
f427ee49 | 498 | if ((sched_get_pset_load_average(pset, 0) >= sched_amp_steal_threshold(pset, spill_pending)) && |
c6bf4f31 A |
499 | (pset->pset_runq.count > bit_count(pset->cpu_state_map[PROCESSOR_DISPATCHING])) && |
500 | (bit_count(pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) == 0)) { | |
501 | thread = run_queue_dequeue(&pset->pset_runq, SCHED_HEADQ); | |
502 | KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_STEAL) | DBG_FUNC_NONE, spill_pending, 0, 0, 0); | |
f427ee49 | 503 | sched_update_pset_load_average(pset, 0); |
c6bf4f31 A |
504 | } |
505 | } | |
506 | ||
507 | pset_unlock(pset); | |
508 | return thread; | |
509 | } | |
510 | ||
511 | ||
512 | ||
513 | static void | |
514 | sched_amp_thread_update_scan(sched_update_scan_context_t scan_context) | |
515 | { | |
516 | boolean_t restart_needed = FALSE; | |
517 | processor_t processor = processor_list; | |
518 | processor_set_t pset; | |
519 | thread_t thread; | |
520 | spl_t s; | |
521 | ||
522 | /* | |
523 | * We update the threads associated with each processor (bound and idle threads) | |
524 | * and then update the threads in each pset runqueue. | |
525 | */ | |
526 | ||
527 | do { | |
528 | do { | |
529 | pset = processor->processor_set; | |
530 | ||
531 | s = splsched(); | |
532 | pset_lock(pset); | |
533 | ||
534 | restart_needed = runq_scan(amp_bound_runq(processor), scan_context); | |
535 | ||
536 | pset_unlock(pset); | |
537 | splx(s); | |
538 | ||
539 | if (restart_needed) { | |
540 | break; | |
541 | } | |
542 | ||
543 | thread = processor->idle_thread; | |
544 | if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { | |
545 | if (thread_update_add_thread(thread) == FALSE) { | |
546 | restart_needed = TRUE; | |
547 | break; | |
548 | } | |
549 | } | |
550 | } while ((processor = processor->processor_list) != NULL); | |
551 | ||
552 | /* Ok, we now have a collection of candidates -- fix them. */ | |
553 | thread_update_process_threads(); | |
554 | } while (restart_needed); | |
555 | ||
556 | pset_node_t node = &pset_node0; | |
557 | pset = node->psets; | |
558 | ||
559 | do { | |
560 | do { | |
561 | restart_needed = FALSE; | |
562 | while (pset != NULL) { | |
563 | s = splsched(); | |
564 | pset_lock(pset); | |
565 | ||
566 | restart_needed = runq_scan(&pset->pset_runq, scan_context); | |
567 | ||
568 | pset_unlock(pset); | |
569 | splx(s); | |
570 | ||
571 | if (restart_needed) { | |
572 | break; | |
573 | } | |
574 | ||
575 | pset = pset->pset_list; | |
576 | } | |
577 | ||
578 | if (restart_needed) { | |
579 | break; | |
580 | } | |
581 | } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL)); | |
582 | ||
583 | /* Ok, we now have a collection of candidates -- fix them. */ | |
584 | thread_update_process_threads(); | |
585 | } while (restart_needed); | |
586 | } | |
587 | ||
588 | static bool | |
589 | pcores_recommended(thread_t thread) | |
590 | { | |
591 | if (pcore_set->online_processor_count == 0) { | |
592 | /* No pcores available */ | |
593 | return false; | |
594 | } | |
595 | ||
596 | if (!pset_is_recommended(ecore_set)) { | |
597 | /* No E cores recommended, must use P cores */ | |
598 | return true; | |
599 | } | |
600 | ||
601 | if (recommended_pset_type(thread) == PSET_AMP_E) { | |
602 | return false; | |
603 | } | |
604 | ||
605 | return pset_is_recommended(pcore_set); | |
606 | } | |
607 | ||
608 | /* Return true if this thread should not continue running on this processor */ | |
609 | static bool | |
610 | sched_amp_thread_avoid_processor(processor_t processor, thread_t thread) | |
611 | { | |
612 | if (processor->processor_set->pset_cluster_type == PSET_AMP_E) { | |
613 | if (pcores_recommended(thread)) { | |
614 | return true; | |
615 | } | |
616 | } else if (processor->processor_set->pset_cluster_type == PSET_AMP_P) { | |
617 | if (!pcores_recommended(thread)) { | |
618 | return true; | |
619 | } | |
620 | } | |
621 | ||
622 | return false; | |
623 | } | |
624 | ||
625 | static processor_t | |
626 | sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread) | |
627 | { | |
628 | /* Bound threads don't call this function */ | |
629 | assert(thread->bound_processor == PROCESSOR_NULL); | |
630 | ||
631 | processor_set_t nset = pset; | |
632 | bool choose_pcores; | |
633 | ||
f427ee49 | 634 | |
c6bf4f31 A |
635 | again: |
636 | choose_pcores = pcores_recommended(thread); | |
637 | ||
638 | if (choose_pcores && (pset->pset_cluster_type != PSET_AMP_P)) { | |
639 | nset = pcore_set; | |
640 | assert(nset != NULL); | |
641 | } else if (!choose_pcores && (pset->pset_cluster_type != PSET_AMP_E)) { | |
642 | nset = ecore_set; | |
643 | assert(nset != NULL); | |
644 | } | |
645 | ||
646 | if (nset != pset) { | |
647 | pset_unlock(pset); | |
648 | pset_lock(nset); | |
649 | } | |
650 | ||
651 | /* Now that the chosen pset is definitely locked, make sure nothing important has changed */ | |
652 | if (!pset_is_recommended(nset)) { | |
653 | pset = nset; | |
654 | goto again; | |
655 | } | |
656 | ||
657 | return choose_processor(nset, processor, thread); | |
658 | } | |
659 | ||
660 | void | |
661 | sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation) | |
662 | { | |
663 | thread_group_update_recommendation(tg, new_recommendation); | |
664 | ||
665 | if (new_recommendation != CLUSTER_TYPE_P) { | |
666 | return; | |
667 | } | |
668 | ||
669 | sched_amp_bounce_thread_group_from_ecores(ecore_set, tg); | |
670 | } | |
671 | ||
672 | #if DEVELOPMENT || DEBUG | |
c6bf4f31 A |
673 | |
674 | extern char sysctl_get_bound_cluster_type(void); | |
675 | char | |
676 | sysctl_get_bound_cluster_type(void) | |
677 | { | |
678 | thread_t self = current_thread(); | |
679 | ||
680 | if (self->sched_flags & TH_SFLAG_ECORE_ONLY) { | |
681 | return 'E'; | |
682 | } else if (self->sched_flags & TH_SFLAG_PCORE_ONLY) { | |
683 | return 'P'; | |
684 | } | |
685 | ||
686 | return '0'; | |
687 | } | |
688 | ||
689 | extern void sysctl_thread_bind_cluster_type(char cluster_type); | |
690 | void | |
691 | sysctl_thread_bind_cluster_type(char cluster_type) | |
692 | { | |
f427ee49 | 693 | thread_bind_cluster_type(current_thread(), cluster_type, false); |
c6bf4f31 A |
694 | } |
695 | ||
696 | extern char sysctl_get_task_cluster_type(void); | |
697 | char | |
698 | sysctl_get_task_cluster_type(void) | |
699 | { | |
700 | thread_t thread = current_thread(); | |
701 | task_t task = thread->task; | |
702 | ||
703 | if (task->pset_hint == ecore_set) { | |
704 | return 'E'; | |
705 | } else if (task->pset_hint == pcore_set) { | |
706 | return 'P'; | |
707 | } | |
708 | ||
709 | return '0'; | |
710 | } | |
711 | ||
712 | extern void sysctl_task_set_cluster_type(char cluster_type); | |
713 | void | |
714 | sysctl_task_set_cluster_type(char cluster_type) | |
715 | { | |
716 | thread_t thread = current_thread(); | |
717 | task_t task = thread->task; | |
718 | ||
719 | switch (cluster_type) { | |
720 | case 'e': | |
721 | case 'E': | |
722 | task->pset_hint = ecore_set; | |
723 | break; | |
724 | case 'p': | |
725 | case 'P': | |
726 | task->pset_hint = pcore_set; | |
727 | break; | |
728 | default: | |
729 | break; | |
730 | } | |
731 | ||
732 | thread_block(THREAD_CONTINUE_NULL); | |
733 | } | |
c3c9b80d | 734 | #endif /* DEVELOPMENT || DEBUG */ |
c6bf4f31 | 735 | |
c3c9b80d | 736 | #endif /* __AMP__ */ |