]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_proto.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / kern / sched_proto.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59
60 #include <mach/sdt.h>
61
62 #include <sys/kdebug.h>
63
64 static void
65 sched_proto_init(void);
66
67 static void
68 sched_proto_timebase_init(void);
69
70 static void
71 sched_proto_processor_init(processor_t processor);
72
73 static void
74 sched_proto_pset_init(processor_set_t pset);
75
76 static void
77 sched_proto_maintenance_continuation(void);
78
79 static thread_t
80 sched_proto_choose_thread(processor_t processor,
81 int priority,
82 ast_t reason);
83
84 static thread_t
85 sched_proto_steal_thread(processor_set_t pset);
86
87 static int
88 sched_proto_compute_priority(thread_t thread);
89
90 static processor_t
91 sched_proto_choose_processor( processor_set_t pset,
92 processor_t processor,
93 thread_t thread);
94
95
96 static boolean_t
97 sched_proto_processor_enqueue(
98 processor_t processor,
99 thread_t thread,
100 integer_t options);
101
102 static void
103 sched_proto_processor_queue_shutdown(
104 processor_t processor);
105
106 static boolean_t
107 sched_proto_processor_queue_remove(
108 processor_t processor,
109 thread_t thread);
110
111 static boolean_t
112 sched_proto_processor_queue_empty(processor_t processor);
113
114 static boolean_t
115 sched_proto_processor_queue_has_priority(processor_t processor,
116 int priority,
117 boolean_t gte);
118
119 static boolean_t
120 sched_proto_priority_is_urgent(int priority);
121
122 static ast_t
123 sched_proto_processor_csw_check(processor_t processor);
124
125 static uint32_t
126 sched_proto_initial_quantum_size(thread_t thread);
127
128 static sched_mode_t
129 sched_proto_initial_thread_sched_mode(task_t parent_task);
130
131 static boolean_t
132 sched_proto_can_update_priority(thread_t thread);
133
134 static void
135 sched_proto_update_priority(thread_t thread);
136
137 static void
138 sched_proto_lightweight_update_priority(thread_t thread);
139
140 static void
141 sched_proto_quantum_expire(thread_t thread);
142
143 static int
144 sched_proto_processor_runq_count(processor_t processor);
145
146 static uint64_t
147 sched_proto_processor_runq_stats_count_sum(processor_t processor);
148
149 static int
150 sched_proto_processor_bound_count(processor_t processor);
151
152 static void
153 sched_proto_thread_update_scan(sched_update_scan_context_t scan_context);
154
155
156 const struct sched_dispatch_table sched_proto_dispatch = {
157 .sched_name = "proto",
158 .init = sched_proto_init,
159 .timebase_init = sched_proto_timebase_init,
160 .processor_init = sched_proto_processor_init,
161 .pset_init = sched_proto_pset_init,
162 .maintenance_continuation = sched_proto_maintenance_continuation,
163 .choose_thread = sched_proto_choose_thread,
164 .steal_thread_enabled = FALSE,
165 .steal_thread = sched_proto_steal_thread,
166 .compute_timeshare_priority = sched_proto_compute_priority,
167 .choose_processor = sched_proto_choose_processor,
168 .processor_enqueue = sched_proto_processor_enqueue,
169 .processor_queue_shutdown = sched_proto_processor_queue_shutdown,
170 .processor_queue_remove = sched_proto_processor_queue_remove,
171 .processor_queue_empty = sched_proto_processor_queue_empty,
172 .priority_is_urgent = sched_proto_priority_is_urgent,
173 .processor_csw_check = sched_proto_processor_csw_check,
174 .processor_queue_has_priority = sched_proto_processor_queue_has_priority,
175 .initial_quantum_size = sched_proto_initial_quantum_size,
176 .initial_thread_sched_mode = sched_proto_initial_thread_sched_mode,
177 .can_update_priority = sched_proto_can_update_priority,
178 .update_priority = sched_proto_update_priority,
179 .lightweight_update_priority = sched_proto_lightweight_update_priority,
180 .quantum_expire = sched_proto_quantum_expire,
181 .processor_runq_count = sched_proto_processor_runq_count,
182 .processor_runq_stats_count_sum = sched_proto_processor_runq_stats_count_sum,
183 .processor_bound_count = sched_proto_processor_bound_count,
184 .thread_update_scan = sched_proto_thread_update_scan,
185 .direct_dispatch_to_idle_processors = TRUE,
186 .multiple_psets_enabled = TRUE,
187 .sched_groups_enabled = FALSE,
188 };
189
190 static struct run_queue *global_runq;
191 static struct run_queue global_runq_storage;
192
193 #define GLOBAL_RUNQ ((processor_t)-2)
194 decl_simple_lock_data(static,global_runq_lock);
195
196 extern int max_unsafe_quanta;
197
198 static uint32_t proto_quantum_us;
199 static uint32_t proto_quantum;
200
201 static uint32_t runqueue_generation;
202
203 static processor_t proto_processor;
204
205 static uint64_t sched_proto_tick_deadline;
206 static uint32_t sched_proto_tick;
207
208 static void
209 sched_proto_init(void)
210 {
211 proto_quantum_us = 10*1000;
212
213 printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us);
214
215 simple_lock_init(&global_runq_lock, 0);
216 global_runq = &global_runq_storage;
217 run_queue_init(global_runq);
218 runqueue_generation = 0;
219
220 proto_processor = master_processor;
221 }
222
223 static void
224 sched_proto_timebase_init(void)
225 {
226 uint64_t abstime;
227
228 /* standard timeslicing quantum */
229 clock_interval_to_absolutetime_interval(
230 proto_quantum_us, NSEC_PER_USEC, &abstime);
231 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
232 proto_quantum = (uint32_t)abstime;
233
234 thread_depress_time = 1 * proto_quantum;
235 default_timeshare_computation = proto_quantum / 2;
236 default_timeshare_constraint = proto_quantum;
237
238 max_unsafe_computation = max_unsafe_quanta * proto_quantum;
239 sched_safe_duration = 2 * max_unsafe_quanta * proto_quantum;
240
241 }
242
243 static void
244 sched_proto_processor_init(processor_t processor __unused)
245 {
246 /* No per-processor state */
247 }
248
249 static void
250 sched_proto_pset_init(processor_set_t pset __unused)
251 {
252 }
253
254 static void
255 sched_proto_maintenance_continuation(void)
256 {
257 uint64_t abstime = mach_absolute_time();
258
259 sched_proto_tick++;
260
261 /* Every 8 seconds, switch to another processor */
262 if ((sched_proto_tick & 0x7) == 0) {
263 processor_t new_processor;
264
265 new_processor = proto_processor->processor_list;
266 if (new_processor == PROCESSOR_NULL)
267 proto_processor = master_processor;
268 else
269 proto_processor = new_processor;
270 }
271
272
273 /*
274 * Compute various averages.
275 */
276 compute_averages(1);
277
278 if (sched_proto_tick_deadline == 0)
279 sched_proto_tick_deadline = abstime;
280
281 clock_deadline_for_periodic_event(sched_one_second_interval, abstime,
282 &sched_proto_tick_deadline);
283
284 assert_wait_deadline((event_t)sched_proto_maintenance_continuation, THREAD_UNINT, sched_proto_tick_deadline);
285 thread_block((thread_continue_t)sched_proto_maintenance_continuation);
286 /*NOTREACHED*/
287 }
288
289 static thread_t
290 sched_proto_choose_thread(processor_t processor,
291 int priority,
292 ast_t reason __unused)
293 {
294 run_queue_t rq = global_runq;
295 queue_t queue;
296 int pri, count;
297 thread_t thread;
298
299
300 simple_lock(&global_runq_lock);
301
302 queue = rq->queues + rq->highq;
303 pri = rq->highq;
304 count = rq->count;
305
306 /*
307 * Since we don't depress priorities, a high priority thread
308 * may get selected over and over again. Put a runqueue
309 * generation number in the thread structure so that we
310 * can ensure that we've cycled through all runnable tasks
311 * before coming back to a high priority thread. This isn't
312 * perfect, especially if the number of runnable threads always
313 * stays high, but is a workable approximation
314 */
315
316 while (count > 0 && pri >= priority) {
317 thread = (thread_t)queue_first(queue);
318 while (!queue_end(queue, (queue_entry_t)thread)) {
319 if ((thread->bound_processor == PROCESSOR_NULL ||
320 thread->bound_processor == processor) &&
321 runqueue_generation != thread->runqueue_generation) {
322 remqueue((queue_entry_t)thread);
323
324 thread->runq = PROCESSOR_NULL;
325 thread->runqueue_generation = runqueue_generation;
326 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
327 rq->count--;
328 if (queue_empty(queue)) {
329 if (pri != IDLEPRI)
330 clrbit(MAXPRI - pri, rq->bitmap);
331 rq->highq = MAXPRI - ffsbit(rq->bitmap);
332 }
333
334 simple_unlock(&global_runq_lock);
335 return (thread);
336 }
337 count--;
338
339 thread = (thread_t)queue_next((queue_entry_t)thread);
340 }
341
342 queue--; pri--;
343 }
344
345 runqueue_generation++;
346
347 simple_unlock(&global_runq_lock);
348 return (THREAD_NULL);
349 }
350
351 static thread_t
352 sched_proto_steal_thread(processor_set_t pset)
353 {
354 pset_unlock(pset);
355
356 return (THREAD_NULL);
357
358 }
359
360 static int
361 sched_proto_compute_priority(thread_t thread)
362 {
363 return thread->base_pri;
364 }
365
366 static processor_t
367 sched_proto_choose_processor( processor_set_t pset,
368 processor_t processor,
369 thread_t thread __unused)
370 {
371 processor = proto_processor;
372
373 /*
374 * Check that the correct processor set is
375 * returned locked.
376 */
377 if (pset != processor->processor_set) {
378 pset_unlock(pset);
379
380 pset = processor->processor_set;
381 pset_lock(pset);
382 }
383
384 return (processor);
385 }
386
387 static boolean_t
388 sched_proto_processor_enqueue(
389 processor_t processor __unused,
390 thread_t thread,
391 integer_t options)
392 {
393 run_queue_t rq = global_runq;
394 boolean_t result;
395
396 simple_lock(&global_runq_lock);
397 result = run_queue_enqueue(rq, thread, options);
398 thread->runq = GLOBAL_RUNQ;
399 simple_unlock(&global_runq_lock);
400
401 return (result);
402 }
403
404 static void
405 sched_proto_processor_queue_shutdown(
406 processor_t processor)
407 {
408 /* With a global runqueue, just stop choosing this processor */
409 (void)processor;
410 }
411
412 static boolean_t
413 sched_proto_processor_queue_remove(
414 processor_t processor,
415 thread_t thread)
416 {
417 void * rqlock;
418 run_queue_t rq;
419
420 rqlock = &global_runq_lock;
421 rq = global_runq;
422
423 simple_lock(rqlock);
424 if (processor == thread->runq) {
425 /*
426 * Thread is on a run queue and we have a lock on
427 * that run queue.
428 */
429 remqueue((queue_entry_t)thread);
430 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
431 rq->count--;
432 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
433 rq->urgency--; assert(rq->urgency >= 0);
434 }
435
436 if (queue_empty(rq->queues + thread->sched_pri)) {
437 /* update run queue status */
438 if (thread->sched_pri != IDLEPRI)
439 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
440 rq->highq = MAXPRI - ffsbit(rq->bitmap);
441 }
442
443 thread->runq = PROCESSOR_NULL;
444 }
445 else {
446 /*
447 * The thread left the run queue before we could
448 * lock the run queue.
449 */
450 assert(thread->runq == PROCESSOR_NULL);
451 processor = PROCESSOR_NULL;
452 }
453
454 simple_unlock(rqlock);
455
456 return (processor != PROCESSOR_NULL);
457 }
458
459 static boolean_t
460 sched_proto_processor_queue_empty(processor_t processor __unused)
461 {
462 boolean_t result;
463
464 result = (global_runq->count == 0);
465
466 return result;
467 }
468
469 static boolean_t
470 sched_proto_processor_queue_has_priority(processor_t processor __unused,
471 int priority,
472 boolean_t gte)
473 {
474 boolean_t result;
475
476 simple_lock(&global_runq_lock);
477
478 if (gte)
479 result = global_runq->highq >= priority;
480 else
481 result = global_runq->highq >= priority;
482
483 simple_unlock(&global_runq_lock);
484
485 return result;
486 }
487
488 /* Implement sched_preempt_pri in code */
489 static boolean_t
490 sched_proto_priority_is_urgent(int priority)
491 {
492 if (priority <= BASEPRI_FOREGROUND)
493 return FALSE;
494
495 if (priority < MINPRI_KERNEL)
496 return TRUE;
497
498 if (priority >= BASEPRI_PREEMPT)
499 return TRUE;
500
501 return FALSE;
502 }
503
504 static ast_t
505 sched_proto_processor_csw_check(processor_t processor)
506 {
507 run_queue_t runq;
508 int count, urgency;
509
510 runq = global_runq;
511 count = runq->count;
512 urgency = runq->urgency;
513
514 if (count > 0) {
515 if (urgency > 0)
516 return (AST_PREEMPT | AST_URGENT);
517
518 return AST_PREEMPT;
519 }
520
521 if (proto_processor != processor)
522 return AST_PREEMPT;
523
524 return AST_NONE;
525 }
526
527 static uint32_t
528 sched_proto_initial_quantum_size(thread_t thread __unused)
529 {
530 return proto_quantum;
531 }
532
533 static sched_mode_t
534 sched_proto_initial_thread_sched_mode(task_t parent_task)
535 {
536 if (parent_task == kernel_task)
537 return TH_MODE_FIXED;
538 else
539 return TH_MODE_TIMESHARE;
540 }
541
542 static boolean_t
543 sched_proto_can_update_priority(thread_t thread __unused)
544 {
545 return FALSE;
546 }
547
548 static void
549 sched_proto_update_priority(thread_t thread __unused)
550 {
551
552 }
553
554 static void
555 sched_proto_lightweight_update_priority(thread_t thread __unused)
556 {
557
558 }
559
560 static void
561 sched_proto_quantum_expire(thread_t thread __unused)
562 {
563
564 }
565
566 static int
567 sched_proto_processor_runq_count(processor_t processor)
568 {
569 if (master_processor == processor) {
570 return global_runq->count;
571 } else {
572 return 0;
573 }
574 }
575
576 static uint64_t
577 sched_proto_processor_runq_stats_count_sum(processor_t processor)
578 {
579 if (master_processor == processor) {
580 return global_runq->runq_stats.count_sum;
581 } else {
582 return 0ULL;
583 }
584 }
585
586 static int
587 sched_proto_processor_bound_count(__unused processor_t processor)
588 {
589 return 0;
590 }
591
592 static void
593 sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context)
594 {
595
596 }
597
598
599