]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
2c2dae409cc8443dc75ed087d38466aa8077d2f3
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 #include <security/mac_mach_internal.h>
82
83 /*
84 * Exported interface
85 */
86 #include <mach/mach_host_server.h>
87 #include <mach/processor_set_server.h>
88
89 struct processor_set pset0;
90 struct pset_node pset_node0;
91 decl_simple_lock_data(static,pset_node_lock)
92
93 queue_head_t tasks;
94 queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
95 int tasks_count;
96 int terminated_tasks_count;
97 queue_head_t threads;
98 int threads_count;
99 decl_lck_mtx_data(,tasks_threads_lock)
100
101 processor_t processor_list;
102 unsigned int processor_count;
103 static processor_t processor_list_tail;
104 decl_simple_lock_data(,processor_list_lock)
105
106 uint32_t processor_avail_count;
107
108 processor_t master_processor;
109 int master_cpu = 0;
110 boolean_t sched_stats_active = FALSE;
111
112 void
113 processor_bootstrap(void)
114 {
115 pset_init(&pset0, &pset_node0);
116 pset_node0.psets = &pset0;
117
118 simple_lock_init(&pset_node_lock, 0);
119
120 queue_init(&tasks);
121 queue_init(&terminated_tasks);
122 queue_init(&threads);
123
124 simple_lock_init(&processor_list_lock, 0);
125
126 master_processor = cpu_to_processor(master_cpu);
127
128 processor_init(master_processor, master_cpu, &pset0);
129 }
130
131 /*
132 * Initialize the given processor for the cpu
133 * indicated by cpu_id, and assign to the
134 * specified processor set.
135 */
136 void
137 processor_init(
138 processor_t processor,
139 int cpu_id,
140 processor_set_t pset)
141 {
142 spl_t s;
143
144 if (processor != master_processor) {
145 /* Scheduler state deferred until sched_init() */
146 SCHED(processor_init)(processor);
147 }
148
149 processor->state = PROCESSOR_OFF_LINE;
150 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
151 processor->processor_set = pset;
152 processor->current_pri = MINPRI;
153 processor->current_thmode = TH_MODE_NONE;
154 processor->cpu_id = cpu_id;
155 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
156 processor->quantum_end = UINT64_MAX;
157 processor->deadline = UINT64_MAX;
158 processor->first_timeslice = FALSE;
159 processor->processor_primary = processor; /* no SMT relationship known at this point */
160 processor->processor_secondary = NULL;
161 processor->is_SMT = FALSE;
162 processor->is_recommended = TRUE;
163 processor->processor_self = IP_NULL;
164 processor_data_init(processor);
165 processor->processor_list = NULL;
166
167 s = splsched();
168 pset_lock(pset);
169 if (pset->cpu_set_count++ == 0)
170 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
171 else {
172 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
173 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
174 }
175 pset_unlock(pset);
176 splx(s);
177
178 simple_lock(&processor_list_lock);
179 if (processor_list == NULL)
180 processor_list = processor;
181 else
182 processor_list_tail->processor_list = processor;
183 processor_list_tail = processor;
184 processor_count++;
185 simple_unlock(&processor_list_lock);
186 }
187
188 void
189 processor_set_primary(
190 processor_t processor,
191 processor_t primary)
192 {
193 assert(processor->processor_primary == primary || processor->processor_primary == processor);
194 /* Re-adjust primary point for this (possibly) secondary processor */
195 processor->processor_primary = primary;
196
197 assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
198 if (primary != processor) {
199 /* Link primary to secondary, assumes a 2-way SMT model
200 * We'll need to move to a queue if any future architecture
201 * requires otherwise.
202 */
203 assert(processor->processor_secondary == NULL);
204 primary->processor_secondary = processor;
205 /* Mark both processors as SMT siblings */
206 primary->is_SMT = TRUE;
207 processor->is_SMT = TRUE;
208 }
209 }
210
211 processor_set_t
212 processor_pset(
213 processor_t processor)
214 {
215 return (processor->processor_set);
216 }
217
218 pset_node_t
219 pset_node_root(void)
220 {
221 return &pset_node0;
222 }
223
224 processor_set_t
225 pset_create(
226 pset_node_t node)
227 {
228 /* some schedulers do not support multiple psets */
229 if (SCHED(multiple_psets_enabled) == FALSE)
230 return processor_pset(master_processor);
231
232 processor_set_t *prev, pset = kalloc(sizeof (*pset));
233
234 if (pset != PROCESSOR_SET_NULL) {
235 pset_init(pset, node);
236
237 simple_lock(&pset_node_lock);
238
239 prev = &node->psets;
240 while (*prev != PROCESSOR_SET_NULL)
241 prev = &(*prev)->pset_list;
242
243 *prev = pset;
244
245 simple_unlock(&pset_node_lock);
246 }
247
248 return (pset);
249 }
250
251 /*
252 * Initialize the given processor_set structure.
253 */
254 void
255 pset_init(
256 processor_set_t pset,
257 pset_node_t node)
258 {
259 if (pset != &pset0) {
260 /* Scheduler state deferred until sched_init() */
261 SCHED(pset_init)(pset);
262 }
263
264 queue_init(&pset->active_queue);
265 queue_init(&pset->idle_queue);
266 queue_init(&pset->idle_secondary_queue);
267 pset->online_processor_count = 0;
268 pset->cpu_set_low = pset->cpu_set_hi = 0;
269 pset->cpu_set_count = 0;
270 pset->pending_AST_cpu_mask = 0;
271 #if defined(CONFIG_SCHED_DEFERRED_AST)
272 pset->pending_deferred_AST_cpu_mask = 0;
273 #endif
274 pset_lock_init(pset);
275 pset->pset_self = IP_NULL;
276 pset->pset_name_self = IP_NULL;
277 pset->pset_list = PROCESSOR_SET_NULL;
278 pset->node = node;
279 }
280
281 kern_return_t
282 processor_info_count(
283 processor_flavor_t flavor,
284 mach_msg_type_number_t *count)
285 {
286 switch (flavor) {
287
288 case PROCESSOR_BASIC_INFO:
289 *count = PROCESSOR_BASIC_INFO_COUNT;
290 break;
291
292 case PROCESSOR_CPU_LOAD_INFO:
293 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
294 break;
295
296 default:
297 return (cpu_info_count(flavor, count));
298 }
299
300 return (KERN_SUCCESS);
301 }
302
303
304 kern_return_t
305 processor_info(
306 register processor_t processor,
307 processor_flavor_t flavor,
308 host_t *host,
309 processor_info_t info,
310 mach_msg_type_number_t *count)
311 {
312 register int cpu_id, state;
313 kern_return_t result;
314
315 if (processor == PROCESSOR_NULL)
316 return (KERN_INVALID_ARGUMENT);
317
318 cpu_id = processor->cpu_id;
319
320 switch (flavor) {
321
322 case PROCESSOR_BASIC_INFO:
323 {
324 register processor_basic_info_t basic_info;
325
326 if (*count < PROCESSOR_BASIC_INFO_COUNT)
327 return (KERN_FAILURE);
328
329 basic_info = (processor_basic_info_t) info;
330 basic_info->cpu_type = slot_type(cpu_id);
331 basic_info->cpu_subtype = slot_subtype(cpu_id);
332 state = processor->state;
333 if (state == PROCESSOR_OFF_LINE)
334 basic_info->running = FALSE;
335 else
336 basic_info->running = TRUE;
337 basic_info->slot_num = cpu_id;
338 if (processor == master_processor)
339 basic_info->is_master = TRUE;
340 else
341 basic_info->is_master = FALSE;
342
343 *count = PROCESSOR_BASIC_INFO_COUNT;
344 *host = &realhost;
345
346 return (KERN_SUCCESS);
347 }
348
349 case PROCESSOR_CPU_LOAD_INFO:
350 {
351 processor_cpu_load_info_t cpu_load_info;
352 timer_t idle_state;
353 uint64_t idle_time_snapshot1, idle_time_snapshot2;
354 uint64_t idle_time_tstamp1, idle_time_tstamp2;
355
356 /*
357 * We capture the accumulated idle time twice over
358 * the course of this function, as well as the timestamps
359 * when each were last updated. Since these are
360 * all done using non-atomic racy mechanisms, the
361 * most we can infer is whether values are stable.
362 * timer_grab() is the only function that can be
363 * used reliably on another processor's per-processor
364 * data.
365 */
366
367 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
368 return (KERN_FAILURE);
369
370 cpu_load_info = (processor_cpu_load_info_t) info;
371 if (precise_user_kernel_time) {
372 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
373 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
374 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
375 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
376 } else {
377 uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
378 timer_grab(&PROCESSOR_DATA(processor, system_state));
379
380 cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
381 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
382 }
383
384 idle_state = &PROCESSOR_DATA(processor, idle_state);
385 idle_time_snapshot1 = timer_grab(idle_state);
386 idle_time_tstamp1 = idle_state->tstamp;
387
388 /*
389 * Idle processors are not continually updating their
390 * per-processor idle timer, so it may be extremely
391 * out of date, resulting in an over-representation
392 * of non-idle time between two measurement
393 * intervals by e.g. top(1). If we are non-idle, or
394 * have evidence that the timer is being updated
395 * concurrently, we consider its value up-to-date.
396 */
397 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
398 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
399 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
400 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
401 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
402 /* Idle timer is being updated concurrently, second stamp is good enough */
403 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
404 (uint32_t)(idle_time_snapshot2 / hz_tick_interval);
405 } else {
406 /*
407 * Idle timer may be very stale. Fortunately we have established
408 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
409 */
410 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
411
412 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
413 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
414 }
415
416 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
417
418 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
419 *host = &realhost;
420
421 return (KERN_SUCCESS);
422 }
423
424 default:
425 result = cpu_info(flavor, cpu_id, info, count);
426 if (result == KERN_SUCCESS)
427 *host = &realhost;
428
429 return (result);
430 }
431 }
432
433 kern_return_t
434 processor_start(
435 processor_t processor)
436 {
437 processor_set_t pset;
438 thread_t thread;
439 kern_return_t result;
440 spl_t s;
441
442 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
443 return (KERN_INVALID_ARGUMENT);
444
445 if (processor == master_processor) {
446 processor_t prev;
447
448 prev = thread_bind(processor);
449 thread_block(THREAD_CONTINUE_NULL);
450
451 result = cpu_start(processor->cpu_id);
452
453 thread_bind(prev);
454
455 return (result);
456 }
457
458 s = splsched();
459 pset = processor->processor_set;
460 pset_lock(pset);
461 if (processor->state != PROCESSOR_OFF_LINE) {
462 pset_unlock(pset);
463 splx(s);
464
465 return (KERN_FAILURE);
466 }
467
468 processor->state = PROCESSOR_START;
469 pset_unlock(pset);
470 splx(s);
471
472 /*
473 * Create the idle processor thread.
474 */
475 if (processor->idle_thread == THREAD_NULL) {
476 result = idle_thread_create(processor);
477 if (result != KERN_SUCCESS) {
478 s = splsched();
479 pset_lock(pset);
480 processor->state = PROCESSOR_OFF_LINE;
481 pset_unlock(pset);
482 splx(s);
483
484 return (result);
485 }
486 }
487
488 /*
489 * If there is no active thread, the processor
490 * has never been started. Create a dedicated
491 * start up thread.
492 */
493 if ( processor->active_thread == THREAD_NULL &&
494 processor->next_thread == THREAD_NULL ) {
495 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
496 if (result != KERN_SUCCESS) {
497 s = splsched();
498 pset_lock(pset);
499 processor->state = PROCESSOR_OFF_LINE;
500 pset_unlock(pset);
501 splx(s);
502
503 return (result);
504 }
505
506 s = splsched();
507 thread_lock(thread);
508 thread->bound_processor = processor;
509 processor->next_thread = thread;
510 thread->state = TH_RUN;
511 thread->last_made_runnable_time = mach_absolute_time();
512 thread_unlock(thread);
513 splx(s);
514
515 thread_deallocate(thread);
516 }
517
518 if (processor->processor_self == IP_NULL)
519 ipc_processor_init(processor);
520
521 result = cpu_start(processor->cpu_id);
522 if (result != KERN_SUCCESS) {
523 s = splsched();
524 pset_lock(pset);
525 processor->state = PROCESSOR_OFF_LINE;
526 pset_unlock(pset);
527 splx(s);
528
529 return (result);
530 }
531
532 ipc_processor_enable(processor);
533
534 return (KERN_SUCCESS);
535 }
536
537 kern_return_t
538 processor_exit(
539 processor_t processor)
540 {
541 if (processor == PROCESSOR_NULL)
542 return(KERN_INVALID_ARGUMENT);
543
544 return(processor_shutdown(processor));
545 }
546
547 kern_return_t
548 processor_control(
549 processor_t processor,
550 processor_info_t info,
551 mach_msg_type_number_t count)
552 {
553 if (processor == PROCESSOR_NULL)
554 return(KERN_INVALID_ARGUMENT);
555
556 return(cpu_control(processor->cpu_id, info, count));
557 }
558
559 kern_return_t
560 processor_set_create(
561 __unused host_t host,
562 __unused processor_set_t *new_set,
563 __unused processor_set_t *new_name)
564 {
565 return(KERN_FAILURE);
566 }
567
568 kern_return_t
569 processor_set_destroy(
570 __unused processor_set_t pset)
571 {
572 return(KERN_FAILURE);
573 }
574
575 kern_return_t
576 processor_get_assignment(
577 processor_t processor,
578 processor_set_t *pset)
579 {
580 int state;
581
582 if (processor == PROCESSOR_NULL)
583 return(KERN_INVALID_ARGUMENT);
584
585 state = processor->state;
586 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
587 return(KERN_FAILURE);
588
589 *pset = &pset0;
590
591 return(KERN_SUCCESS);
592 }
593
594 kern_return_t
595 processor_set_info(
596 processor_set_t pset,
597 int flavor,
598 host_t *host,
599 processor_set_info_t info,
600 mach_msg_type_number_t *count)
601 {
602 if (pset == PROCESSOR_SET_NULL)
603 return(KERN_INVALID_ARGUMENT);
604
605 if (flavor == PROCESSOR_SET_BASIC_INFO) {
606 register processor_set_basic_info_t basic_info;
607
608 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
609 return(KERN_FAILURE);
610
611 basic_info = (processor_set_basic_info_t) info;
612 basic_info->processor_count = processor_avail_count;
613 basic_info->default_policy = POLICY_TIMESHARE;
614
615 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
616 *host = &realhost;
617 return(KERN_SUCCESS);
618 }
619 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
620 register policy_timeshare_base_t ts_base;
621
622 if (*count < POLICY_TIMESHARE_BASE_COUNT)
623 return(KERN_FAILURE);
624
625 ts_base = (policy_timeshare_base_t) info;
626 ts_base->base_priority = BASEPRI_DEFAULT;
627
628 *count = POLICY_TIMESHARE_BASE_COUNT;
629 *host = &realhost;
630 return(KERN_SUCCESS);
631 }
632 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
633 register policy_fifo_base_t fifo_base;
634
635 if (*count < POLICY_FIFO_BASE_COUNT)
636 return(KERN_FAILURE);
637
638 fifo_base = (policy_fifo_base_t) info;
639 fifo_base->base_priority = BASEPRI_DEFAULT;
640
641 *count = POLICY_FIFO_BASE_COUNT;
642 *host = &realhost;
643 return(KERN_SUCCESS);
644 }
645 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
646 register policy_rr_base_t rr_base;
647
648 if (*count < POLICY_RR_BASE_COUNT)
649 return(KERN_FAILURE);
650
651 rr_base = (policy_rr_base_t) info;
652 rr_base->base_priority = BASEPRI_DEFAULT;
653 rr_base->quantum = 1;
654
655 *count = POLICY_RR_BASE_COUNT;
656 *host = &realhost;
657 return(KERN_SUCCESS);
658 }
659 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
660 register policy_timeshare_limit_t ts_limit;
661
662 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
663 return(KERN_FAILURE);
664
665 ts_limit = (policy_timeshare_limit_t) info;
666 ts_limit->max_priority = MAXPRI_KERNEL;
667
668 *count = POLICY_TIMESHARE_LIMIT_COUNT;
669 *host = &realhost;
670 return(KERN_SUCCESS);
671 }
672 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
673 register policy_fifo_limit_t fifo_limit;
674
675 if (*count < POLICY_FIFO_LIMIT_COUNT)
676 return(KERN_FAILURE);
677
678 fifo_limit = (policy_fifo_limit_t) info;
679 fifo_limit->max_priority = MAXPRI_KERNEL;
680
681 *count = POLICY_FIFO_LIMIT_COUNT;
682 *host = &realhost;
683 return(KERN_SUCCESS);
684 }
685 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
686 register policy_rr_limit_t rr_limit;
687
688 if (*count < POLICY_RR_LIMIT_COUNT)
689 return(KERN_FAILURE);
690
691 rr_limit = (policy_rr_limit_t) info;
692 rr_limit->max_priority = MAXPRI_KERNEL;
693
694 *count = POLICY_RR_LIMIT_COUNT;
695 *host = &realhost;
696 return(KERN_SUCCESS);
697 }
698 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
699 register int *enabled;
700
701 if (*count < (sizeof(*enabled)/sizeof(int)))
702 return(KERN_FAILURE);
703
704 enabled = (int *) info;
705 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
706
707 *count = sizeof(*enabled)/sizeof(int);
708 *host = &realhost;
709 return(KERN_SUCCESS);
710 }
711
712
713 *host = HOST_NULL;
714 return(KERN_INVALID_ARGUMENT);
715 }
716
717 /*
718 * processor_set_statistics
719 *
720 * Returns scheduling statistics for a processor set.
721 */
722 kern_return_t
723 processor_set_statistics(
724 processor_set_t pset,
725 int flavor,
726 processor_set_info_t info,
727 mach_msg_type_number_t *count)
728 {
729 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
730 return (KERN_INVALID_PROCESSOR_SET);
731
732 if (flavor == PROCESSOR_SET_LOAD_INFO) {
733 register processor_set_load_info_t load_info;
734
735 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
736 return(KERN_FAILURE);
737
738 load_info = (processor_set_load_info_t) info;
739
740 load_info->mach_factor = sched_mach_factor;
741 load_info->load_average = sched_load_average;
742
743 load_info->task_count = tasks_count;
744 load_info->thread_count = threads_count;
745
746 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
747 return(KERN_SUCCESS);
748 }
749
750 return(KERN_INVALID_ARGUMENT);
751 }
752
753 /*
754 * processor_set_max_priority:
755 *
756 * Specify max priority permitted on processor set. This affects
757 * newly created and assigned threads. Optionally change existing
758 * ones.
759 */
760 kern_return_t
761 processor_set_max_priority(
762 __unused processor_set_t pset,
763 __unused int max_priority,
764 __unused boolean_t change_threads)
765 {
766 return (KERN_INVALID_ARGUMENT);
767 }
768
769 /*
770 * processor_set_policy_enable:
771 *
772 * Allow indicated policy on processor set.
773 */
774
775 kern_return_t
776 processor_set_policy_enable(
777 __unused processor_set_t pset,
778 __unused int policy)
779 {
780 return (KERN_INVALID_ARGUMENT);
781 }
782
783 /*
784 * processor_set_policy_disable:
785 *
786 * Forbid indicated policy on processor set. Time sharing cannot
787 * be forbidden.
788 */
789 kern_return_t
790 processor_set_policy_disable(
791 __unused processor_set_t pset,
792 __unused int policy,
793 __unused boolean_t change_threads)
794 {
795 return (KERN_INVALID_ARGUMENT);
796 }
797
798 /*
799 * processor_set_things:
800 *
801 * Common internals for processor_set_{threads,tasks}
802 */
803 kern_return_t
804 processor_set_things(
805 processor_set_t pset,
806 void **thing_list,
807 mach_msg_type_number_t *count,
808 int type)
809 {
810 unsigned int i , j, used;
811 task_t task;
812 thread_t thread;
813
814 task_t *task_list;
815 unsigned int actual_tasks;
816 vm_size_t task_size, task_size_needed;
817
818 thread_t *thread_list;
819 unsigned int actual_threads;
820 vm_size_t thread_size, thread_size_needed;
821
822 void *addr, *newaddr;
823 vm_size_t size, size_needed;
824
825 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
826 return (KERN_INVALID_ARGUMENT);
827
828 task_size = 0;
829 task_size_needed = 0;
830 task_list = NULL;
831 actual_tasks = 0;
832
833 thread_size = 0;
834 thread_size_needed = 0;
835 thread_list = NULL;
836 actual_threads = 0;
837
838 for (;;) {
839 lck_mtx_lock(&tasks_threads_lock);
840
841 /* do we have the memory we need? */
842 if (type == PSET_THING_THREAD)
843 thread_size_needed = threads_count * sizeof(void *);
844 #if !CONFIG_MACF
845 else
846 #endif
847 task_size_needed = tasks_count * sizeof(void *);
848
849 if (task_size_needed <= task_size &&
850 thread_size_needed <= thread_size)
851 break;
852
853 /* unlock and allocate more memory */
854 lck_mtx_unlock(&tasks_threads_lock);
855
856 /* grow task array */
857 if (task_size_needed > task_size) {
858 if (task_size != 0)
859 kfree(task_list, task_size);
860
861 assert(task_size_needed > 0);
862 task_size = task_size_needed;
863
864 task_list = (task_t *)kalloc(task_size);
865 if (task_list == NULL) {
866 if (thread_size != 0)
867 kfree(thread_list, thread_size);
868 return (KERN_RESOURCE_SHORTAGE);
869 }
870 }
871
872 /* grow thread array */
873 if (thread_size_needed > thread_size) {
874 if (thread_size != 0)
875 kfree(thread_list, thread_size);
876
877 assert(thread_size_needed > 0);
878 thread_size = thread_size_needed;
879
880 thread_list = (thread_t *)kalloc(thread_size);
881 if (thread_list == 0) {
882 if (task_size != 0)
883 kfree(task_list, task_size);
884 return (KERN_RESOURCE_SHORTAGE);
885 }
886 }
887 }
888
889 /* OK, have memory and the list locked */
890
891 /* If we need it, get the thread list */
892 if (type == PSET_THING_THREAD) {
893 for (thread = (thread_t)queue_first(&threads);
894 !queue_end(&threads, (queue_entry_t)thread);
895 thread = (thread_t)queue_next(&thread->threads)) {
896 #if defined(SECURE_KERNEL)
897 if (thread->task != kernel_task) {
898 #endif
899 thread_reference_internal(thread);
900 thread_list[actual_threads++] = thread;
901 #if defined(SECURE_KERNEL)
902 }
903 #endif
904 }
905 }
906 #if !CONFIG_MACF
907 else {
908 #endif
909 /* get a list of the tasks */
910 for (task = (task_t)queue_first(&tasks);
911 !queue_end(&tasks, (queue_entry_t)task);
912 task = (task_t)queue_next(&task->tasks)) {
913 #if defined(SECURE_KERNEL)
914 if (task != kernel_task) {
915 #endif
916 task_reference_internal(task);
917 task_list[actual_tasks++] = task;
918 #if defined(SECURE_KERNEL)
919 }
920 #endif
921 }
922 #if !CONFIG_MACF
923 }
924 #endif
925
926 lck_mtx_unlock(&tasks_threads_lock);
927
928 #if CONFIG_MACF
929 /* for each task, make sure we are allowed to examine it */
930 for (i = used = 0; i < actual_tasks; i++) {
931 if (mac_task_check_expose_task(task_list[i])) {
932 task_deallocate(task_list[i]);
933 continue;
934 }
935 task_list[used++] = task_list[i];
936 }
937 actual_tasks = used;
938 task_size_needed = actual_tasks * sizeof(void *);
939
940 if (type == PSET_THING_THREAD) {
941
942 /* for each thread (if any), make sure it's task is in the allowed list */
943 for (i = used = 0; i < actual_threads; i++) {
944 boolean_t found_task = FALSE;
945
946 task = thread_list[i]->task;
947 for (j = 0; j < actual_tasks; j++) {
948 if (task_list[j] == task) {
949 found_task = TRUE;
950 break;
951 }
952 }
953 if (found_task)
954 thread_list[used++] = thread_list[i];
955 else
956 thread_deallocate(thread_list[i]);
957 }
958 actual_threads = used;
959 thread_size_needed = actual_threads * sizeof(void *);
960
961 /* done with the task list */
962 for (i = 0; i < actual_tasks; i++)
963 task_deallocate(task_list[i]);
964 kfree(task_list, task_size);
965 task_size = 0;
966 actual_tasks = 0;
967 task_list = NULL;
968 }
969 #endif
970
971 if (type == PSET_THING_THREAD) {
972 if (actual_threads == 0) {
973 /* no threads available to return */
974 assert(task_size == 0);
975 if (thread_size != 0)
976 kfree(thread_list, thread_size);
977 *thing_list = NULL;
978 *count = 0;
979 return KERN_SUCCESS;
980 }
981 size_needed = actual_threads * sizeof(void *);
982 size = thread_size;
983 addr = thread_list;
984 } else {
985 if (actual_tasks == 0) {
986 /* no tasks available to return */
987 assert(thread_size == 0);
988 if (task_size != 0)
989 kfree(task_list, task_size);
990 *thing_list = NULL;
991 *count = 0;
992 return KERN_SUCCESS;
993 }
994 size_needed = actual_tasks * sizeof(void *);
995 size = task_size;
996 addr = task_list;
997 }
998
999 /* if we allocated too much, must copy */
1000 if (size_needed < size) {
1001 newaddr = kalloc(size_needed);
1002 if (newaddr == 0) {
1003 for (i = 0; i < actual_tasks; i++) {
1004 if (type == PSET_THING_THREAD)
1005 thread_deallocate(thread_list[i]);
1006 else
1007 task_deallocate(task_list[i]);
1008 }
1009 if (size)
1010 kfree(addr, size);
1011 return (KERN_RESOURCE_SHORTAGE);
1012 }
1013
1014 bcopy((void *) addr, (void *) newaddr, size_needed);
1015 kfree(addr, size);
1016
1017 addr = newaddr;
1018 size = size_needed;
1019 }
1020
1021 *thing_list = (void **)addr;
1022 *count = (unsigned int)size / sizeof(void *);
1023
1024 return (KERN_SUCCESS);
1025 }
1026
1027
1028 /*
1029 * processor_set_tasks:
1030 *
1031 * List all tasks in the processor set.
1032 */
1033 kern_return_t
1034 processor_set_tasks(
1035 processor_set_t pset,
1036 task_array_t *task_list,
1037 mach_msg_type_number_t *count)
1038 {
1039 kern_return_t ret;
1040 mach_msg_type_number_t i;
1041
1042 ret = processor_set_things(pset, (void **)task_list, count, PSET_THING_TASK);
1043 if (ret != KERN_SUCCESS)
1044 return ret;
1045
1046 /* do the conversion that Mig should handle */
1047 for (i = 0; i < *count; i++)
1048 (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]);
1049 return KERN_SUCCESS;
1050 }
1051
1052 /*
1053 * processor_set_threads:
1054 *
1055 * List all threads in the processor set.
1056 */
1057 #if defined(SECURE_KERNEL)
1058 kern_return_t
1059 processor_set_threads(
1060 __unused processor_set_t pset,
1061 __unused thread_array_t *thread_list,
1062 __unused mach_msg_type_number_t *count)
1063 {
1064 return KERN_FAILURE;
1065 }
1066 #else
1067 kern_return_t
1068 processor_set_threads(
1069 processor_set_t pset,
1070 thread_array_t *thread_list,
1071 mach_msg_type_number_t *count)
1072 {
1073 kern_return_t ret;
1074 mach_msg_type_number_t i;
1075
1076 ret = processor_set_things(pset, (void **)thread_list, count, PSET_THING_THREAD);
1077 if (ret != KERN_SUCCESS)
1078 return ret;
1079
1080 /* do the conversion that Mig should handle */
1081 for (i = 0; i < *count; i++)
1082 (*thread_list)[i] = (thread_t)convert_thread_to_port((*thread_list)[i]);
1083 return KERN_SUCCESS;
1084 }
1085 #endif
1086
1087 /*
1088 * processor_set_policy_control
1089 *
1090 * Controls the scheduling attributes governing the processor set.
1091 * Allows control of enabled policies, and per-policy base and limit
1092 * priorities.
1093 */
1094 kern_return_t
1095 processor_set_policy_control(
1096 __unused processor_set_t pset,
1097 __unused int flavor,
1098 __unused processor_set_info_t policy_info,
1099 __unused mach_msg_type_number_t count,
1100 __unused boolean_t change)
1101 {
1102 return (KERN_INVALID_ARGUMENT);
1103 }
1104
1105 #undef pset_deallocate
1106 void pset_deallocate(processor_set_t pset);
1107 void
1108 pset_deallocate(
1109 __unused processor_set_t pset)
1110 {
1111 return;
1112 }
1113
1114 #undef pset_reference
1115 void pset_reference(processor_set_t pset);
1116 void
1117 pset_reference(
1118 __unused processor_set_t pset)
1119 {
1120 return;
1121 }