]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_proto.c
xnu-2782.1.97.tar.gz
[apple/xnu.git] / osfmk / kern / sched_proto.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/policy.h>
32#include <mach/sync_policy.h>
33#include <mach/thread_act.h>
34
35#include <machine/machine_routines.h>
36#include <machine/sched_param.h>
37#include <machine/machine_cpu.h>
38
39#include <kern/kern_types.h>
40#include <kern/clock.h>
41#include <kern/counters.h>
42#include <kern/cpu_number.h>
43#include <kern/cpu_data.h>
44#include <kern/debug.h>
6d2010ae
A
45#include <kern/macro_help.h>
46#include <kern/machine.h>
47#include <kern/misc_protos.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/task.h>
54#include <kern/thread.h>
55#include <kern/wait_queue.h>
56
57#include <vm/pmap.h>
58#include <vm/vm_kern.h>
59#include <vm/vm_map.h>
60
61#include <mach/sdt.h>
62
63#include <sys/kdebug.h>
64
65static void
66sched_proto_init(void);
67
68static void
69sched_proto_timebase_init(void);
70
71static void
72sched_proto_processor_init(processor_t processor);
73
74static void
75sched_proto_pset_init(processor_set_t pset);
76
77static void
78sched_proto_maintenance_continuation(void);
79
80static thread_t
81sched_proto_choose_thread(processor_t processor,
fe8ab488
A
82 int priority,
83 ast_t reason);
6d2010ae
A
84
85static thread_t
86sched_proto_steal_thread(processor_set_t pset);
87
88static void
89sched_proto_compute_priority(thread_t thread,
90 boolean_t override_depress);
91
92static processor_t
93sched_proto_choose_processor( processor_set_t pset,
94 processor_t processor,
95 thread_t thread);
96
97
98static boolean_t
99sched_proto_processor_enqueue(
100 processor_t processor,
101 thread_t thread,
102 integer_t options);
103
104static void
105sched_proto_processor_queue_shutdown(
106 processor_t processor);
107
108static boolean_t
109sched_proto_processor_queue_remove(
110 processor_t processor,
111 thread_t thread);
112
113static boolean_t
114sched_proto_processor_queue_empty(processor_t processor);
115
116static boolean_t
117sched_proto_processor_queue_has_priority(processor_t processor,
118 int priority,
119 boolean_t gte);
120
121static boolean_t
122sched_proto_priority_is_urgent(int priority);
123
124static ast_t
125sched_proto_processor_csw_check(processor_t processor);
126
127static uint32_t
128sched_proto_initial_quantum_size(thread_t thread);
129
130static sched_mode_t
131sched_proto_initial_thread_sched_mode(task_t parent_task);
132
6d2010ae
A
133static boolean_t
134sched_proto_can_update_priority(thread_t thread);
135
136static void
137sched_proto_update_priority(thread_t thread);
138
139static void
140sched_proto_lightweight_update_priority(thread_t thread);
141
142static void
143sched_proto_quantum_expire(thread_t thread);
144
145static boolean_t
146sched_proto_should_current_thread_rechoose_processor(processor_t processor);
147
148static int
149sched_proto_processor_runq_count(processor_t processor);
150
151static uint64_t
152sched_proto_processor_runq_stats_count_sum(processor_t processor);
153
fe8ab488
A
154static int
155sched_proto_processor_bound_count(processor_t processor);
156
157static void
158sched_proto_thread_update_scan(void);
159
160
6d2010ae 161const struct sched_dispatch_table sched_proto_dispatch = {
fe8ab488
A
162 .init = sched_proto_init,
163 .timebase_init = sched_proto_timebase_init,
164 .processor_init = sched_proto_processor_init,
165 .pset_init = sched_proto_pset_init,
166 .maintenance_continuation = sched_proto_maintenance_continuation,
167 .choose_thread = sched_proto_choose_thread,
168 .steal_thread = sched_proto_steal_thread,
169 .compute_priority = sched_proto_compute_priority,
170 .choose_processor = sched_proto_choose_processor,
171 .processor_enqueue = sched_proto_processor_enqueue,
172 .processor_queue_shutdown = sched_proto_processor_queue_shutdown,
173 .processor_queue_remove = sched_proto_processor_queue_remove,
174 .processor_queue_empty = sched_proto_processor_queue_empty,
175 .priority_is_urgent = sched_proto_priority_is_urgent,
176 .processor_csw_check = sched_proto_processor_csw_check,
177 .processor_queue_has_priority = sched_proto_processor_queue_has_priority,
178 .initial_quantum_size = sched_proto_initial_quantum_size,
179 .initial_thread_sched_mode = sched_proto_initial_thread_sched_mode,
180 .can_update_priority = sched_proto_can_update_priority,
181 .update_priority = sched_proto_update_priority,
182 .lightweight_update_priority = sched_proto_lightweight_update_priority,
183 .quantum_expire = sched_proto_quantum_expire,
184 .should_current_thread_rechoose_processor = sched_proto_should_current_thread_rechoose_processor,
185 .processor_runq_count = sched_proto_processor_runq_count,
186 .processor_runq_stats_count_sum = sched_proto_processor_runq_stats_count_sum,
187 .fairshare_init = sched_traditional_fairshare_init,
188 .fairshare_runq_count = sched_traditional_fairshare_runq_count,
189 .fairshare_runq_stats_count_sum = sched_traditional_fairshare_runq_stats_count_sum,
190 .fairshare_enqueue = sched_traditional_fairshare_enqueue,
191 .fairshare_dequeue = sched_traditional_fairshare_dequeue,
192 .fairshare_queue_remove = sched_traditional_fairshare_queue_remove,
193 .processor_bound_count = sched_proto_processor_bound_count,
194 .thread_update_scan = sched_proto_thread_update_scan,
195 .direct_dispatch_to_idle_processors = TRUE,
6d2010ae
A
196};
197
198static struct run_queue *global_runq;
199static struct run_queue global_runq_storage;
200
201#define GLOBAL_RUNQ ((processor_t)-2)
202decl_simple_lock_data(static,global_runq_lock);
203
204extern int max_unsafe_quanta;
205
206static uint32_t proto_quantum_us;
207static uint32_t proto_quantum;
208
209static uint32_t runqueue_generation;
210
211static processor_t proto_processor;
212
213static uint64_t sched_proto_tick_deadline;
214static uint32_t sched_proto_tick;
215
216static void
217sched_proto_init(void)
218{
219 proto_quantum_us = 10*1000;
220
221 printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us);
222
223 simple_lock_init(&global_runq_lock, 0);
224 global_runq = &global_runq_storage;
225 run_queue_init(global_runq);
226 runqueue_generation = 0;
227
228 proto_processor = master_processor;
229}
230
231static void
232sched_proto_timebase_init(void)
233{
234 uint64_t abstime;
235
236 /* standard timeslicing quantum */
237 clock_interval_to_absolutetime_interval(
238 proto_quantum_us, NSEC_PER_USEC, &abstime);
239 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
240 proto_quantum = (uint32_t)abstime;
241
242 thread_depress_time = 1 * proto_quantum;
243 default_timeshare_computation = proto_quantum / 2;
244 default_timeshare_constraint = proto_quantum;
245
246 max_unsafe_computation = max_unsafe_quanta * proto_quantum;
247 sched_safe_duration = 2 * max_unsafe_quanta * proto_quantum;
248
249}
250
251static void
252sched_proto_processor_init(processor_t processor __unused)
253{
254 /* No per-processor state */
255}
256
257static void
258sched_proto_pset_init(processor_set_t pset __unused)
259{
260}
261
262static void
263sched_proto_maintenance_continuation(void)
264{
265 uint64_t abstime = mach_absolute_time();
266
267 sched_proto_tick++;
268
269 /* Every 8 seconds, switch to another processor */
270 if ((sched_proto_tick & 0x7) == 0) {
271 processor_t new_processor;
272
273 new_processor = proto_processor->processor_list;
274 if (new_processor == PROCESSOR_NULL)
275 proto_processor = master_processor;
276 else
277 proto_processor = new_processor;
278 }
279
280
281 /*
282 * Compute various averages.
283 */
39236c6e 284 compute_averages(1);
6d2010ae
A
285
286 if (sched_proto_tick_deadline == 0)
287 sched_proto_tick_deadline = abstime;
288
289 clock_deadline_for_periodic_event(sched_one_second_interval, abstime,
290 &sched_proto_tick_deadline);
291
292 assert_wait_deadline((event_t)sched_proto_maintenance_continuation, THREAD_UNINT, sched_proto_tick_deadline);
293 thread_block((thread_continue_t)sched_proto_maintenance_continuation);
294 /*NOTREACHED*/
295}
296
297static thread_t
298sched_proto_choose_thread(processor_t processor,
fe8ab488
A
299 int priority,
300 ast_t reason __unused)
6d2010ae
A
301{
302 run_queue_t rq = global_runq;
303 queue_t queue;
304 int pri, count;
305 thread_t thread;
306
307
308 simple_lock(&global_runq_lock);
309
310 queue = rq->queues + rq->highq;
311 pri = rq->highq;
312 count = rq->count;
313
314 /*
315 * Since we don't depress priorities, a high priority thread
316 * may get selected over and over again. Put a runqueue
317 * generation number in the thread structure so that we
318 * can ensure that we've cycled through all runnable tasks
319 * before coming back to a high priority thread. This isn't
320 * perfect, especially if the number of runnable threads always
321 * stays high, but is a workable approximation
322 */
323
324 while (count > 0 && pri >= priority) {
325 thread = (thread_t)queue_first(queue);
326 while (!queue_end(queue, (queue_entry_t)thread)) {
327 if ((thread->bound_processor == PROCESSOR_NULL ||
328 thread->bound_processor == processor) &&
329 runqueue_generation != thread->runqueue_generation) {
330 remqueue((queue_entry_t)thread);
331
332 thread->runq = PROCESSOR_NULL;
333 thread->runqueue_generation = runqueue_generation;
334 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
335 rq->count--;
336 if (queue_empty(queue)) {
337 if (pri != IDLEPRI)
338 clrbit(MAXPRI - pri, rq->bitmap);
339 rq->highq = MAXPRI - ffsbit(rq->bitmap);
340 }
341
342 simple_unlock(&global_runq_lock);
343 return (thread);
344 }
345 count--;
346
347 thread = (thread_t)queue_next((queue_entry_t)thread);
348 }
349
350 queue--; pri--;
351 }
352
353 runqueue_generation++;
354
355 simple_unlock(&global_runq_lock);
356 return (THREAD_NULL);
357}
358
359static thread_t
360sched_proto_steal_thread(processor_set_t pset)
361{
362 pset_unlock(pset);
363
364 return (THREAD_NULL);
365
366}
367
368static void
369sched_proto_compute_priority(thread_t thread,
370 boolean_t override_depress __unused)
371{
372 set_sched_pri(thread, thread->priority);
373}
374
375static processor_t
376sched_proto_choose_processor( processor_set_t pset,
377 processor_t processor,
378 thread_t thread __unused)
379{
380 processor = proto_processor;
381
382 /*
383 * Check that the correct processor set is
384 * returned locked.
385 */
386 if (pset != processor->processor_set) {
387 pset_unlock(pset);
388
389 pset = processor->processor_set;
390 pset_lock(pset);
391 }
392
393 return (processor);
394}
395
396static boolean_t
397sched_proto_processor_enqueue(
398 processor_t processor __unused,
399 thread_t thread,
400 integer_t options)
401{
402 run_queue_t rq = global_runq;
403 boolean_t result;
404
405 simple_lock(&global_runq_lock);
406 result = run_queue_enqueue(rq, thread, options);
407 thread->runq = GLOBAL_RUNQ;
408 simple_unlock(&global_runq_lock);
409
410 return (result);
411}
412
413static void
414sched_proto_processor_queue_shutdown(
415 processor_t processor)
416{
417 /* With a global runqueue, just stop choosing this processor */
418 (void)processor;
419}
420
421static boolean_t
422sched_proto_processor_queue_remove(
423 processor_t processor,
424 thread_t thread)
425{
426 void * rqlock;
427 run_queue_t rq;
428
429 rqlock = &global_runq_lock;
430 rq = global_runq;
431
432 simple_lock(rqlock);
433 if (processor == thread->runq) {
434 /*
435 * Thread is on a run queue and we have a lock on
436 * that run queue.
437 */
438 remqueue((queue_entry_t)thread);
439 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
440 rq->count--;
441 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
442 rq->urgency--; assert(rq->urgency >= 0);
443 }
444
445 if (queue_empty(rq->queues + thread->sched_pri)) {
446 /* update run queue status */
447 if (thread->sched_pri != IDLEPRI)
448 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
449 rq->highq = MAXPRI - ffsbit(rq->bitmap);
450 }
451
452 thread->runq = PROCESSOR_NULL;
453 }
454 else {
455 /*
456 * The thread left the run queue before we could
457 * lock the run queue.
458 */
459 assert(thread->runq == PROCESSOR_NULL);
460 processor = PROCESSOR_NULL;
461 }
462
463 simple_unlock(rqlock);
464
465 return (processor != PROCESSOR_NULL);
466}
467
468static boolean_t
469sched_proto_processor_queue_empty(processor_t processor __unused)
470{
471 boolean_t result;
472
473 result = (global_runq->count == 0);
474
475 return result;
476}
477
478static boolean_t
479sched_proto_processor_queue_has_priority(processor_t processor __unused,
480 int priority,
481 boolean_t gte)
482{
483 boolean_t result;
484
485 simple_lock(&global_runq_lock);
486
487 if (gte)
488 result = global_runq->highq >= priority;
489 else
490 result = global_runq->highq >= priority;
491
492 simple_unlock(&global_runq_lock);
493
494 return result;
495}
496
497/* Implement sched_preempt_pri in code */
498static boolean_t
499sched_proto_priority_is_urgent(int priority)
500{
501 if (priority <= BASEPRI_FOREGROUND)
502 return FALSE;
503
504 if (priority < MINPRI_KERNEL)
505 return TRUE;
506
507 if (priority >= BASEPRI_PREEMPT)
508 return TRUE;
509
510 return FALSE;
511}
512
513static ast_t
514sched_proto_processor_csw_check(processor_t processor __unused)
515{
516 run_queue_t runq;
517 int count, urgency;
518
519 runq = global_runq;
520 count = runq->count;
521 urgency = runq->urgency;
522
523 if (count > 0) {
524 if (urgency > 0)
525 return (AST_PREEMPT | AST_URGENT);
526
527 return AST_PREEMPT;
528 }
529
530 return AST_NONE;
531}
532
533static uint32_t
534sched_proto_initial_quantum_size(thread_t thread __unused)
535{
536 return proto_quantum;
537}
538
539static sched_mode_t
540sched_proto_initial_thread_sched_mode(task_t parent_task)
541{
542 if (parent_task == kernel_task)
543 return TH_MODE_FIXED;
544 else
545 return TH_MODE_TIMESHARE;
546}
547
6d2010ae
A
548static boolean_t
549sched_proto_can_update_priority(thread_t thread __unused)
550{
551 return FALSE;
552}
553
554static void
555sched_proto_update_priority(thread_t thread __unused)
556{
557
558}
559
560static void
561sched_proto_lightweight_update_priority(thread_t thread __unused)
562{
563
564}
565
566static void
567sched_proto_quantum_expire(thread_t thread __unused)
568{
569
570}
571
572static boolean_t
573sched_proto_should_current_thread_rechoose_processor(processor_t processor)
574{
575 return (proto_processor != processor);
576}
577
578static int
579sched_proto_processor_runq_count(processor_t processor)
580{
581 if (master_processor == processor) {
582 return global_runq->count;
583 } else {
584 return 0;
585 }
586}
587
588static uint64_t
589sched_proto_processor_runq_stats_count_sum(processor_t processor)
590{
591 if (master_processor == processor) {
592 return global_runq->runq_stats.count_sum;
593 } else {
594 return 0ULL;
595 }
596}
597
fe8ab488
A
598static int
599sched_proto_processor_bound_count(__unused processor_t processor)
600{
601 return 0;
602}
603
604static void
605sched_proto_thread_update_scan(void)
606{
607
608}
609
610
611