]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/machine.h> | |
31 | #include <mach/policy.h> | |
32 | #include <mach/sync_policy.h> | |
33 | #include <mach/thread_act.h> | |
34 | ||
35 | #include <machine/machine_routines.h> | |
36 | #include <machine/sched_param.h> | |
37 | #include <machine/machine_cpu.h> | |
38 | ||
39 | #include <kern/kern_types.h> | |
40 | #include <kern/clock.h> | |
41 | #include <kern/counters.h> | |
42 | #include <kern/cpu_number.h> | |
43 | #include <kern/cpu_data.h> | |
44 | #include <kern/debug.h> | |
6d2010ae A |
45 | #include <kern/macro_help.h> |
46 | #include <kern/machine.h> | |
47 | #include <kern/misc_protos.h> | |
48 | #include <kern/processor.h> | |
49 | #include <kern/queue.h> | |
50 | #include <kern/sched.h> | |
51 | #include <kern/sched_prim.h> | |
52 | #include <kern/syscall_subr.h> | |
53 | #include <kern/task.h> | |
54 | #include <kern/thread.h> | |
6d2010ae A |
55 | |
56 | #include <vm/pmap.h> | |
57 | #include <vm/vm_kern.h> | |
58 | #include <vm/vm_map.h> | |
59 | ||
60 | #include <mach/sdt.h> | |
61 | ||
62 | #include <sys/kdebug.h> | |
63 | ||
64 | static void | |
65 | sched_proto_init(void); | |
66 | ||
67 | static void | |
68 | sched_proto_timebase_init(void); | |
69 | ||
70 | static void | |
71 | sched_proto_processor_init(processor_t processor); | |
72 | ||
73 | static void | |
74 | sched_proto_pset_init(processor_set_t pset); | |
75 | ||
76 | static void | |
77 | sched_proto_maintenance_continuation(void); | |
78 | ||
79 | static thread_t | |
80 | sched_proto_choose_thread(processor_t processor, | |
fe8ab488 A |
81 | int priority, |
82 | ast_t reason); | |
6d2010ae A |
83 | |
84 | static thread_t | |
85 | sched_proto_steal_thread(processor_set_t pset); | |
86 | ||
3e170ce0 A |
87 | static int |
88 | sched_proto_compute_priority(thread_t thread); | |
6d2010ae A |
89 | |
90 | static processor_t | |
91 | sched_proto_choose_processor( processor_set_t pset, | |
92 | processor_t processor, | |
93 | thread_t thread); | |
94 | ||
95 | ||
96 | static boolean_t | |
97 | sched_proto_processor_enqueue( | |
98 | processor_t processor, | |
99 | thread_t thread, | |
100 | integer_t options); | |
101 | ||
102 | static void | |
103 | sched_proto_processor_queue_shutdown( | |
104 | processor_t processor); | |
105 | ||
106 | static boolean_t | |
107 | sched_proto_processor_queue_remove( | |
108 | processor_t processor, | |
109 | thread_t thread); | |
110 | ||
111 | static boolean_t | |
112 | sched_proto_processor_queue_empty(processor_t processor); | |
113 | ||
114 | static boolean_t | |
115 | sched_proto_processor_queue_has_priority(processor_t processor, | |
116 | int priority, | |
117 | boolean_t gte); | |
118 | ||
119 | static boolean_t | |
120 | sched_proto_priority_is_urgent(int priority); | |
121 | ||
122 | static ast_t | |
123 | sched_proto_processor_csw_check(processor_t processor); | |
124 | ||
125 | static uint32_t | |
126 | sched_proto_initial_quantum_size(thread_t thread); | |
127 | ||
128 | static sched_mode_t | |
129 | sched_proto_initial_thread_sched_mode(task_t parent_task); | |
130 | ||
6d2010ae A |
131 | static boolean_t |
132 | sched_proto_can_update_priority(thread_t thread); | |
133 | ||
134 | static void | |
135 | sched_proto_update_priority(thread_t thread); | |
136 | ||
137 | static void | |
138 | sched_proto_lightweight_update_priority(thread_t thread); | |
139 | ||
140 | static void | |
141 | sched_proto_quantum_expire(thread_t thread); | |
142 | ||
6d2010ae A |
143 | static int |
144 | sched_proto_processor_runq_count(processor_t processor); | |
145 | ||
146 | static uint64_t | |
147 | sched_proto_processor_runq_stats_count_sum(processor_t processor); | |
148 | ||
fe8ab488 A |
149 | static int |
150 | sched_proto_processor_bound_count(processor_t processor); | |
151 | ||
152 | static void | |
3e170ce0 | 153 | sched_proto_thread_update_scan(sched_update_scan_context_t scan_context); |
fe8ab488 A |
154 | |
155 | ||
6d2010ae | 156 | const struct sched_dispatch_table sched_proto_dispatch = { |
3e170ce0 | 157 | .sched_name = "proto", |
fe8ab488 A |
158 | .init = sched_proto_init, |
159 | .timebase_init = sched_proto_timebase_init, | |
160 | .processor_init = sched_proto_processor_init, | |
161 | .pset_init = sched_proto_pset_init, | |
162 | .maintenance_continuation = sched_proto_maintenance_continuation, | |
163 | .choose_thread = sched_proto_choose_thread, | |
3e170ce0 | 164 | .steal_thread_enabled = FALSE, |
fe8ab488 | 165 | .steal_thread = sched_proto_steal_thread, |
3e170ce0 | 166 | .compute_timeshare_priority = sched_proto_compute_priority, |
fe8ab488 A |
167 | .choose_processor = sched_proto_choose_processor, |
168 | .processor_enqueue = sched_proto_processor_enqueue, | |
169 | .processor_queue_shutdown = sched_proto_processor_queue_shutdown, | |
170 | .processor_queue_remove = sched_proto_processor_queue_remove, | |
171 | .processor_queue_empty = sched_proto_processor_queue_empty, | |
172 | .priority_is_urgent = sched_proto_priority_is_urgent, | |
173 | .processor_csw_check = sched_proto_processor_csw_check, | |
174 | .processor_queue_has_priority = sched_proto_processor_queue_has_priority, | |
175 | .initial_quantum_size = sched_proto_initial_quantum_size, | |
176 | .initial_thread_sched_mode = sched_proto_initial_thread_sched_mode, | |
177 | .can_update_priority = sched_proto_can_update_priority, | |
178 | .update_priority = sched_proto_update_priority, | |
179 | .lightweight_update_priority = sched_proto_lightweight_update_priority, | |
180 | .quantum_expire = sched_proto_quantum_expire, | |
fe8ab488 A |
181 | .processor_runq_count = sched_proto_processor_runq_count, |
182 | .processor_runq_stats_count_sum = sched_proto_processor_runq_stats_count_sum, | |
fe8ab488 A |
183 | .processor_bound_count = sched_proto_processor_bound_count, |
184 | .thread_update_scan = sched_proto_thread_update_scan, | |
185 | .direct_dispatch_to_idle_processors = TRUE, | |
3e170ce0 A |
186 | .multiple_psets_enabled = TRUE, |
187 | .sched_groups_enabled = FALSE, | |
5ba3f43e A |
188 | .avoid_processor_enabled = FALSE, |
189 | .thread_avoid_processor = NULL, | |
190 | .processor_balance = sched_SMT_balance, | |
191 | ||
192 | .rt_runq = sched_rtglobal_runq, | |
193 | .rt_init = sched_rtglobal_init, | |
194 | .rt_queue_shutdown = sched_rtglobal_queue_shutdown, | |
195 | .rt_runq_scan = sched_rtglobal_runq_scan, | |
196 | .rt_runq_count_sum = sched_rtglobal_runq_count_sum, | |
197 | ||
198 | .qos_max_parallelism = sched_qos_max_parallelism, | |
199 | .check_spill = sched_check_spill, | |
200 | .ipi_policy = sched_ipi_policy, | |
201 | .thread_should_yield = sched_thread_should_yield, | |
6d2010ae A |
202 | }; |
203 | ||
204 | static struct run_queue *global_runq; | |
205 | static struct run_queue global_runq_storage; | |
206 | ||
207 | #define GLOBAL_RUNQ ((processor_t)-2) | |
208 | decl_simple_lock_data(static,global_runq_lock); | |
209 | ||
210 | extern int max_unsafe_quanta; | |
211 | ||
212 | static uint32_t proto_quantum_us; | |
213 | static uint32_t proto_quantum; | |
214 | ||
215 | static uint32_t runqueue_generation; | |
216 | ||
217 | static processor_t proto_processor; | |
218 | ||
219 | static uint64_t sched_proto_tick_deadline; | |
220 | static uint32_t sched_proto_tick; | |
221 | ||
222 | static void | |
223 | sched_proto_init(void) | |
224 | { | |
225 | proto_quantum_us = 10*1000; | |
226 | ||
227 | printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us); | |
228 | ||
229 | simple_lock_init(&global_runq_lock, 0); | |
230 | global_runq = &global_runq_storage; | |
231 | run_queue_init(global_runq); | |
232 | runqueue_generation = 0; | |
233 | ||
234 | proto_processor = master_processor; | |
235 | } | |
236 | ||
237 | static void | |
238 | sched_proto_timebase_init(void) | |
239 | { | |
240 | uint64_t abstime; | |
241 | ||
242 | /* standard timeslicing quantum */ | |
243 | clock_interval_to_absolutetime_interval( | |
244 | proto_quantum_us, NSEC_PER_USEC, &abstime); | |
245 | assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); | |
246 | proto_quantum = (uint32_t)abstime; | |
247 | ||
248 | thread_depress_time = 1 * proto_quantum; | |
249 | default_timeshare_computation = proto_quantum / 2; | |
250 | default_timeshare_constraint = proto_quantum; | |
251 | ||
252 | max_unsafe_computation = max_unsafe_quanta * proto_quantum; | |
253 | sched_safe_duration = 2 * max_unsafe_quanta * proto_quantum; | |
254 | ||
255 | } | |
256 | ||
257 | static void | |
258 | sched_proto_processor_init(processor_t processor __unused) | |
259 | { | |
260 | /* No per-processor state */ | |
261 | } | |
262 | ||
263 | static void | |
264 | sched_proto_pset_init(processor_set_t pset __unused) | |
265 | { | |
266 | } | |
267 | ||
268 | static void | |
269 | sched_proto_maintenance_continuation(void) | |
270 | { | |
271 | uint64_t abstime = mach_absolute_time(); | |
272 | ||
273 | sched_proto_tick++; | |
274 | ||
275 | /* Every 8 seconds, switch to another processor */ | |
276 | if ((sched_proto_tick & 0x7) == 0) { | |
277 | processor_t new_processor; | |
278 | ||
279 | new_processor = proto_processor->processor_list; | |
280 | if (new_processor == PROCESSOR_NULL) | |
281 | proto_processor = master_processor; | |
282 | else | |
283 | proto_processor = new_processor; | |
284 | } | |
285 | ||
286 | ||
287 | /* | |
288 | * Compute various averages. | |
289 | */ | |
39236c6e | 290 | compute_averages(1); |
6d2010ae A |
291 | |
292 | if (sched_proto_tick_deadline == 0) | |
293 | sched_proto_tick_deadline = abstime; | |
294 | ||
295 | clock_deadline_for_periodic_event(sched_one_second_interval, abstime, | |
296 | &sched_proto_tick_deadline); | |
297 | ||
298 | assert_wait_deadline((event_t)sched_proto_maintenance_continuation, THREAD_UNINT, sched_proto_tick_deadline); | |
299 | thread_block((thread_continue_t)sched_proto_maintenance_continuation); | |
300 | /*NOTREACHED*/ | |
301 | } | |
302 | ||
303 | static thread_t | |
304 | sched_proto_choose_thread(processor_t processor, | |
fe8ab488 A |
305 | int priority, |
306 | ast_t reason __unused) | |
6d2010ae A |
307 | { |
308 | run_queue_t rq = global_runq; | |
309 | queue_t queue; | |
310 | int pri, count; | |
311 | thread_t thread; | |
312 | ||
313 | ||
314 | simple_lock(&global_runq_lock); | |
315 | ||
316 | queue = rq->queues + rq->highq; | |
317 | pri = rq->highq; | |
318 | count = rq->count; | |
319 | ||
320 | /* | |
321 | * Since we don't depress priorities, a high priority thread | |
322 | * may get selected over and over again. Put a runqueue | |
323 | * generation number in the thread structure so that we | |
324 | * can ensure that we've cycled through all runnable tasks | |
325 | * before coming back to a high priority thread. This isn't | |
326 | * perfect, especially if the number of runnable threads always | |
327 | * stays high, but is a workable approximation | |
328 | */ | |
329 | ||
330 | while (count > 0 && pri >= priority) { | |
331 | thread = (thread_t)queue_first(queue); | |
332 | while (!queue_end(queue, (queue_entry_t)thread)) { | |
333 | if ((thread->bound_processor == PROCESSOR_NULL || | |
334 | thread->bound_processor == processor) && | |
335 | runqueue_generation != thread->runqueue_generation) { | |
336 | remqueue((queue_entry_t)thread); | |
337 | ||
338 | thread->runq = PROCESSOR_NULL; | |
339 | thread->runqueue_generation = runqueue_generation; | |
340 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
341 | rq->count--; | |
342 | if (queue_empty(queue)) { | |
39037602 A |
343 | bitmap_clear(rq->bitmap, pri); |
344 | rq->highq = bitmap_first(rq->bitmap, NRQS); | |
6d2010ae A |
345 | } |
346 | ||
347 | simple_unlock(&global_runq_lock); | |
348 | return (thread); | |
349 | } | |
350 | count--; | |
351 | ||
352 | thread = (thread_t)queue_next((queue_entry_t)thread); | |
353 | } | |
354 | ||
355 | queue--; pri--; | |
356 | } | |
357 | ||
358 | runqueue_generation++; | |
359 | ||
360 | simple_unlock(&global_runq_lock); | |
361 | return (THREAD_NULL); | |
362 | } | |
363 | ||
364 | static thread_t | |
365 | sched_proto_steal_thread(processor_set_t pset) | |
366 | { | |
367 | pset_unlock(pset); | |
368 | ||
369 | return (THREAD_NULL); | |
370 | ||
371 | } | |
372 | ||
3e170ce0 A |
373 | static int |
374 | sched_proto_compute_priority(thread_t thread) | |
6d2010ae | 375 | { |
3e170ce0 | 376 | return thread->base_pri; |
6d2010ae A |
377 | } |
378 | ||
379 | static processor_t | |
380 | sched_proto_choose_processor( processor_set_t pset, | |
381 | processor_t processor, | |
382 | thread_t thread __unused) | |
383 | { | |
384 | processor = proto_processor; | |
385 | ||
386 | /* | |
387 | * Check that the correct processor set is | |
388 | * returned locked. | |
389 | */ | |
390 | if (pset != processor->processor_set) { | |
391 | pset_unlock(pset); | |
392 | ||
393 | pset = processor->processor_set; | |
394 | pset_lock(pset); | |
395 | } | |
396 | ||
397 | return (processor); | |
398 | } | |
399 | ||
400 | static boolean_t | |
401 | sched_proto_processor_enqueue( | |
402 | processor_t processor __unused, | |
403 | thread_t thread, | |
404 | integer_t options) | |
405 | { | |
406 | run_queue_t rq = global_runq; | |
407 | boolean_t result; | |
408 | ||
409 | simple_lock(&global_runq_lock); | |
410 | result = run_queue_enqueue(rq, thread, options); | |
411 | thread->runq = GLOBAL_RUNQ; | |
412 | simple_unlock(&global_runq_lock); | |
413 | ||
414 | return (result); | |
415 | } | |
416 | ||
417 | static void | |
418 | sched_proto_processor_queue_shutdown( | |
419 | processor_t processor) | |
420 | { | |
421 | /* With a global runqueue, just stop choosing this processor */ | |
422 | (void)processor; | |
423 | } | |
424 | ||
425 | static boolean_t | |
426 | sched_proto_processor_queue_remove( | |
427 | processor_t processor, | |
428 | thread_t thread) | |
429 | { | |
430 | void * rqlock; | |
431 | run_queue_t rq; | |
3e170ce0 | 432 | |
6d2010ae A |
433 | rqlock = &global_runq_lock; |
434 | rq = global_runq; | |
435 | ||
436 | simple_lock(rqlock); | |
437 | if (processor == thread->runq) { | |
438 | /* | |
439 | * Thread is on a run queue and we have a lock on | |
440 | * that run queue. | |
441 | */ | |
442 | remqueue((queue_entry_t)thread); | |
443 | SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); | |
444 | rq->count--; | |
445 | if (SCHED(priority_is_urgent)(thread->sched_pri)) { | |
446 | rq->urgency--; assert(rq->urgency >= 0); | |
447 | } | |
448 | ||
449 | if (queue_empty(rq->queues + thread->sched_pri)) { | |
450 | /* update run queue status */ | |
39037602 A |
451 | bitmap_clear(rq->bitmap, thread->sched_pri); |
452 | rq->highq = bitmap_first(rq->bitmap, NRQS); | |
6d2010ae A |
453 | } |
454 | ||
455 | thread->runq = PROCESSOR_NULL; | |
456 | } | |
457 | else { | |
458 | /* | |
459 | * The thread left the run queue before we could | |
460 | * lock the run queue. | |
461 | */ | |
462 | assert(thread->runq == PROCESSOR_NULL); | |
463 | processor = PROCESSOR_NULL; | |
464 | } | |
465 | ||
466 | simple_unlock(rqlock); | |
467 | ||
468 | return (processor != PROCESSOR_NULL); | |
469 | } | |
470 | ||
471 | static boolean_t | |
472 | sched_proto_processor_queue_empty(processor_t processor __unused) | |
473 | { | |
474 | boolean_t result; | |
475 | ||
476 | result = (global_runq->count == 0); | |
477 | ||
478 | return result; | |
479 | } | |
480 | ||
481 | static boolean_t | |
482 | sched_proto_processor_queue_has_priority(processor_t processor __unused, | |
483 | int priority, | |
484 | boolean_t gte) | |
485 | { | |
486 | boolean_t result; | |
487 | ||
488 | simple_lock(&global_runq_lock); | |
489 | ||
5ba3f43e | 490 | if (gte) |
6d2010ae A |
491 | result = global_runq->highq >= priority; |
492 | else | |
5ba3f43e | 493 | result = global_runq->highq > priority; |
6d2010ae A |
494 | |
495 | simple_unlock(&global_runq_lock); | |
496 | ||
497 | return result; | |
498 | } | |
499 | ||
500 | /* Implement sched_preempt_pri in code */ | |
501 | static boolean_t | |
502 | sched_proto_priority_is_urgent(int priority) | |
503 | { | |
504 | if (priority <= BASEPRI_FOREGROUND) | |
505 | return FALSE; | |
506 | ||
507 | if (priority < MINPRI_KERNEL) | |
508 | return TRUE; | |
509 | ||
510 | if (priority >= BASEPRI_PREEMPT) | |
511 | return TRUE; | |
512 | ||
513 | return FALSE; | |
514 | } | |
515 | ||
516 | static ast_t | |
3e170ce0 | 517 | sched_proto_processor_csw_check(processor_t processor) |
6d2010ae A |
518 | { |
519 | run_queue_t runq; | |
520 | int count, urgency; | |
521 | ||
522 | runq = global_runq; | |
523 | count = runq->count; | |
524 | urgency = runq->urgency; | |
525 | ||
526 | if (count > 0) { | |
527 | if (urgency > 0) | |
528 | return (AST_PREEMPT | AST_URGENT); | |
529 | ||
530 | return AST_PREEMPT; | |
531 | } | |
3e170ce0 A |
532 | |
533 | if (proto_processor != processor) | |
534 | return AST_PREEMPT; | |
535 | ||
6d2010ae A |
536 | return AST_NONE; |
537 | } | |
538 | ||
539 | static uint32_t | |
540 | sched_proto_initial_quantum_size(thread_t thread __unused) | |
541 | { | |
542 | return proto_quantum; | |
543 | } | |
544 | ||
545 | static sched_mode_t | |
546 | sched_proto_initial_thread_sched_mode(task_t parent_task) | |
547 | { | |
548 | if (parent_task == kernel_task) | |
549 | return TH_MODE_FIXED; | |
550 | else | |
551 | return TH_MODE_TIMESHARE; | |
552 | } | |
553 | ||
6d2010ae A |
554 | static boolean_t |
555 | sched_proto_can_update_priority(thread_t thread __unused) | |
556 | { | |
557 | return FALSE; | |
558 | } | |
559 | ||
560 | static void | |
561 | sched_proto_update_priority(thread_t thread __unused) | |
562 | { | |
563 | ||
564 | } | |
565 | ||
566 | static void | |
567 | sched_proto_lightweight_update_priority(thread_t thread __unused) | |
568 | { | |
569 | ||
570 | } | |
571 | ||
572 | static void | |
3e170ce0 | 573 | sched_proto_quantum_expire(thread_t thread __unused) |
6d2010ae A |
574 | { |
575 | ||
576 | } | |
577 | ||
6d2010ae A |
578 | static int |
579 | sched_proto_processor_runq_count(processor_t processor) | |
580 | { | |
581 | if (master_processor == processor) { | |
582 | return global_runq->count; | |
583 | } else { | |
584 | return 0; | |
585 | } | |
586 | } | |
587 | ||
588 | static uint64_t | |
589 | sched_proto_processor_runq_stats_count_sum(processor_t processor) | |
590 | { | |
591 | if (master_processor == processor) { | |
592 | return global_runq->runq_stats.count_sum; | |
593 | } else { | |
594 | return 0ULL; | |
595 | } | |
596 | } | |
597 | ||
fe8ab488 A |
598 | static int |
599 | sched_proto_processor_bound_count(__unused processor_t processor) | |
600 | { | |
601 | return 0; | |
602 | } | |
603 | ||
604 | static void | |
3e170ce0 | 605 | sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context) |
fe8ab488 A |
606 | { |
607 | ||
608 | } | |
609 | ||
610 | ||
611 |