]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 #include <security/mac_mach_internal.h>
82
83 /*
84 * Exported interface
85 */
86 #include <mach/mach_host_server.h>
87 #include <mach/processor_set_server.h>
88
89 struct processor_set pset0;
90 struct pset_node pset_node0;
91 decl_simple_lock_data(static,pset_node_lock)
92
93 queue_head_t tasks;
94 queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
95 queue_head_t corpse_tasks;
96 int tasks_count;
97 int terminated_tasks_count;
98 queue_head_t threads;
99 int threads_count;
100 decl_lck_mtx_data(,tasks_threads_lock)
101 decl_lck_mtx_data(,tasks_corpse_lock)
102
103 processor_t processor_list;
104 unsigned int processor_count;
105 static processor_t processor_list_tail;
106 decl_simple_lock_data(,processor_list_lock)
107
108 uint32_t processor_avail_count;
109
110 processor_t master_processor;
111 int master_cpu = 0;
112 boolean_t sched_stats_active = FALSE;
113
114 void
115 processor_bootstrap(void)
116 {
117 pset_init(&pset0, &pset_node0);
118 pset_node0.psets = &pset0;
119
120 simple_lock_init(&pset_node_lock, 0);
121
122 queue_init(&tasks);
123 queue_init(&terminated_tasks);
124 queue_init(&threads);
125 queue_init(&corpse_tasks);
126
127 simple_lock_init(&processor_list_lock, 0);
128
129 master_processor = cpu_to_processor(master_cpu);
130
131 processor_init(master_processor, master_cpu, &pset0);
132 }
133
134 /*
135 * Initialize the given processor for the cpu
136 * indicated by cpu_id, and assign to the
137 * specified processor set.
138 */
139 void
140 processor_init(
141 processor_t processor,
142 int cpu_id,
143 processor_set_t pset)
144 {
145 spl_t s;
146
147 if (processor != master_processor) {
148 /* Scheduler state deferred until sched_init() */
149 SCHED(processor_init)(processor);
150 }
151
152 processor->state = PROCESSOR_OFF_LINE;
153 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
154 processor->processor_set = pset;
155 processor->current_pri = MINPRI;
156 processor->current_thmode = TH_MODE_NONE;
157 processor->current_sfi_class = SFI_CLASS_KERNEL;
158 processor->starting_pri = MINPRI;
159 processor->cpu_id = cpu_id;
160 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
161 processor->quantum_end = UINT64_MAX;
162 processor->deadline = UINT64_MAX;
163 processor->first_timeslice = FALSE;
164 processor->processor_primary = processor; /* no SMT relationship known at this point */
165 processor->processor_secondary = NULL;
166 processor->is_SMT = FALSE;
167 processor->is_recommended = (pset->recommended_bitmask & (1ULL << cpu_id)) ? TRUE : FALSE;
168 processor->processor_self = IP_NULL;
169 processor_data_init(processor);
170 processor->processor_list = NULL;
171
172 s = splsched();
173 pset_lock(pset);
174 if (pset->cpu_set_count++ == 0)
175 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
176 else {
177 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
178 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
179 }
180 pset_unlock(pset);
181 splx(s);
182
183 simple_lock(&processor_list_lock);
184 if (processor_list == NULL)
185 processor_list = processor;
186 else
187 processor_list_tail->processor_list = processor;
188 processor_list_tail = processor;
189 processor_count++;
190 simple_unlock(&processor_list_lock);
191 }
192
193 void
194 processor_set_primary(
195 processor_t processor,
196 processor_t primary)
197 {
198 assert(processor->processor_primary == primary || processor->processor_primary == processor);
199 /* Re-adjust primary point for this (possibly) secondary processor */
200 processor->processor_primary = primary;
201
202 assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
203 if (primary != processor) {
204 /* Link primary to secondary, assumes a 2-way SMT model
205 * We'll need to move to a queue if any future architecture
206 * requires otherwise.
207 */
208 assert(processor->processor_secondary == NULL);
209 primary->processor_secondary = processor;
210 /* Mark both processors as SMT siblings */
211 primary->is_SMT = TRUE;
212 processor->is_SMT = TRUE;
213 }
214 }
215
216 processor_set_t
217 processor_pset(
218 processor_t processor)
219 {
220 return (processor->processor_set);
221 }
222
223 pset_node_t
224 pset_node_root(void)
225 {
226 return &pset_node0;
227 }
228
229 processor_set_t
230 pset_create(
231 pset_node_t node)
232 {
233 /* some schedulers do not support multiple psets */
234 if (SCHED(multiple_psets_enabled) == FALSE)
235 return processor_pset(master_processor);
236
237 processor_set_t *prev, pset = kalloc(sizeof (*pset));
238
239 if (pset != PROCESSOR_SET_NULL) {
240 pset_init(pset, node);
241
242 simple_lock(&pset_node_lock);
243
244 prev = &node->psets;
245 while (*prev != PROCESSOR_SET_NULL)
246 prev = &(*prev)->pset_list;
247
248 *prev = pset;
249
250 simple_unlock(&pset_node_lock);
251 }
252
253 return (pset);
254 }
255
256 /*
257 * Initialize the given processor_set structure.
258 */
259 void
260 pset_init(
261 processor_set_t pset,
262 pset_node_t node)
263 {
264 if (pset != &pset0) {
265 /* Scheduler state deferred until sched_init() */
266 SCHED(pset_init)(pset);
267 }
268
269 queue_init(&pset->active_queue);
270 queue_init(&pset->idle_queue);
271 queue_init(&pset->idle_secondary_queue);
272 pset->online_processor_count = 0;
273 pset->cpu_set_low = pset->cpu_set_hi = 0;
274 pset->cpu_set_count = 0;
275 pset->recommended_bitmask = ~0ULL;
276 pset->pending_AST_cpu_mask = 0;
277 #if defined(CONFIG_SCHED_DEFERRED_AST)
278 pset->pending_deferred_AST_cpu_mask = 0;
279 #endif
280 pset_lock_init(pset);
281 pset->pset_self = IP_NULL;
282 pset->pset_name_self = IP_NULL;
283 pset->pset_list = PROCESSOR_SET_NULL;
284 pset->node = node;
285 }
286
287 kern_return_t
288 processor_info_count(
289 processor_flavor_t flavor,
290 mach_msg_type_number_t *count)
291 {
292 switch (flavor) {
293
294 case PROCESSOR_BASIC_INFO:
295 *count = PROCESSOR_BASIC_INFO_COUNT;
296 break;
297
298 case PROCESSOR_CPU_LOAD_INFO:
299 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
300 break;
301
302 default:
303 return (cpu_info_count(flavor, count));
304 }
305
306 return (KERN_SUCCESS);
307 }
308
309
310 kern_return_t
311 processor_info(
312 processor_t processor,
313 processor_flavor_t flavor,
314 host_t *host,
315 processor_info_t info,
316 mach_msg_type_number_t *count)
317 {
318 int cpu_id, state;
319 kern_return_t result;
320
321 if (processor == PROCESSOR_NULL)
322 return (KERN_INVALID_ARGUMENT);
323
324 cpu_id = processor->cpu_id;
325
326 switch (flavor) {
327
328 case PROCESSOR_BASIC_INFO:
329 {
330 processor_basic_info_t basic_info;
331
332 if (*count < PROCESSOR_BASIC_INFO_COUNT)
333 return (KERN_FAILURE);
334
335 basic_info = (processor_basic_info_t) info;
336 basic_info->cpu_type = slot_type(cpu_id);
337 basic_info->cpu_subtype = slot_subtype(cpu_id);
338 state = processor->state;
339 if (state == PROCESSOR_OFF_LINE)
340 basic_info->running = FALSE;
341 else
342 basic_info->running = TRUE;
343 basic_info->slot_num = cpu_id;
344 if (processor == master_processor)
345 basic_info->is_master = TRUE;
346 else
347 basic_info->is_master = FALSE;
348
349 *count = PROCESSOR_BASIC_INFO_COUNT;
350 *host = &realhost;
351
352 return (KERN_SUCCESS);
353 }
354
355 case PROCESSOR_CPU_LOAD_INFO:
356 {
357 processor_cpu_load_info_t cpu_load_info;
358 timer_t idle_state;
359 uint64_t idle_time_snapshot1, idle_time_snapshot2;
360 uint64_t idle_time_tstamp1, idle_time_tstamp2;
361
362 /*
363 * We capture the accumulated idle time twice over
364 * the course of this function, as well as the timestamps
365 * when each were last updated. Since these are
366 * all done using non-atomic racy mechanisms, the
367 * most we can infer is whether values are stable.
368 * timer_grab() is the only function that can be
369 * used reliably on another processor's per-processor
370 * data.
371 */
372
373 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
374 return (KERN_FAILURE);
375
376 cpu_load_info = (processor_cpu_load_info_t) info;
377 if (precise_user_kernel_time) {
378 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
379 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
380 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
381 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
382 } else {
383 uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
384 timer_grab(&PROCESSOR_DATA(processor, system_state));
385
386 cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
387 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
388 }
389
390 idle_state = &PROCESSOR_DATA(processor, idle_state);
391 idle_time_snapshot1 = timer_grab(idle_state);
392 idle_time_tstamp1 = idle_state->tstamp;
393
394 /*
395 * Idle processors are not continually updating their
396 * per-processor idle timer, so it may be extremely
397 * out of date, resulting in an over-representation
398 * of non-idle time between two measurement
399 * intervals by e.g. top(1). If we are non-idle, or
400 * have evidence that the timer is being updated
401 * concurrently, we consider its value up-to-date.
402 */
403 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
404 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
405 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
406 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
407 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
408 /* Idle timer is being updated concurrently, second stamp is good enough */
409 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
410 (uint32_t)(idle_time_snapshot2 / hz_tick_interval);
411 } else {
412 /*
413 * Idle timer may be very stale. Fortunately we have established
414 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
415 */
416 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
417
418 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
419 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
420 }
421
422 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
423
424 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
425 *host = &realhost;
426
427 return (KERN_SUCCESS);
428 }
429
430 default:
431 result = cpu_info(flavor, cpu_id, info, count);
432 if (result == KERN_SUCCESS)
433 *host = &realhost;
434
435 return (result);
436 }
437 }
438
439 kern_return_t
440 processor_start(
441 processor_t processor)
442 {
443 processor_set_t pset;
444 thread_t thread;
445 kern_return_t result;
446 spl_t s;
447
448 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
449 return (KERN_INVALID_ARGUMENT);
450
451 if (processor == master_processor) {
452 processor_t prev;
453
454 prev = thread_bind(processor);
455 thread_block(THREAD_CONTINUE_NULL);
456
457 result = cpu_start(processor->cpu_id);
458
459 thread_bind(prev);
460
461 return (result);
462 }
463
464 s = splsched();
465 pset = processor->processor_set;
466 pset_lock(pset);
467 if (processor->state != PROCESSOR_OFF_LINE) {
468 pset_unlock(pset);
469 splx(s);
470
471 return (KERN_FAILURE);
472 }
473
474 processor->state = PROCESSOR_START;
475 pset_unlock(pset);
476 splx(s);
477
478 /*
479 * Create the idle processor thread.
480 */
481 if (processor->idle_thread == THREAD_NULL) {
482 result = idle_thread_create(processor);
483 if (result != KERN_SUCCESS) {
484 s = splsched();
485 pset_lock(pset);
486 processor->state = PROCESSOR_OFF_LINE;
487 pset_unlock(pset);
488 splx(s);
489
490 return (result);
491 }
492 }
493
494 /*
495 * If there is no active thread, the processor
496 * has never been started. Create a dedicated
497 * start up thread.
498 */
499 if ( processor->active_thread == THREAD_NULL &&
500 processor->next_thread == THREAD_NULL ) {
501 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
502 if (result != KERN_SUCCESS) {
503 s = splsched();
504 pset_lock(pset);
505 processor->state = PROCESSOR_OFF_LINE;
506 pset_unlock(pset);
507 splx(s);
508
509 return (result);
510 }
511
512 s = splsched();
513 thread_lock(thread);
514 thread->bound_processor = processor;
515 processor->next_thread = thread;
516 thread->state = TH_RUN;
517 thread->last_made_runnable_time = mach_absolute_time();
518 thread_unlock(thread);
519 splx(s);
520
521 thread_deallocate(thread);
522 }
523
524 if (processor->processor_self == IP_NULL)
525 ipc_processor_init(processor);
526
527 result = cpu_start(processor->cpu_id);
528 if (result != KERN_SUCCESS) {
529 s = splsched();
530 pset_lock(pset);
531 processor->state = PROCESSOR_OFF_LINE;
532 pset_unlock(pset);
533 splx(s);
534
535 return (result);
536 }
537
538 ipc_processor_enable(processor);
539
540 return (KERN_SUCCESS);
541 }
542
543 kern_return_t
544 processor_exit(
545 processor_t processor)
546 {
547 if (processor == PROCESSOR_NULL)
548 return(KERN_INVALID_ARGUMENT);
549
550 return(processor_shutdown(processor));
551 }
552
553 kern_return_t
554 processor_control(
555 processor_t processor,
556 processor_info_t info,
557 mach_msg_type_number_t count)
558 {
559 if (processor == PROCESSOR_NULL)
560 return(KERN_INVALID_ARGUMENT);
561
562 return(cpu_control(processor->cpu_id, info, count));
563 }
564
565 kern_return_t
566 processor_set_create(
567 __unused host_t host,
568 __unused processor_set_t *new_set,
569 __unused processor_set_t *new_name)
570 {
571 return(KERN_FAILURE);
572 }
573
574 kern_return_t
575 processor_set_destroy(
576 __unused processor_set_t pset)
577 {
578 return(KERN_FAILURE);
579 }
580
581 kern_return_t
582 processor_get_assignment(
583 processor_t processor,
584 processor_set_t *pset)
585 {
586 int state;
587
588 if (processor == PROCESSOR_NULL)
589 return(KERN_INVALID_ARGUMENT);
590
591 state = processor->state;
592 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
593 return(KERN_FAILURE);
594
595 *pset = &pset0;
596
597 return(KERN_SUCCESS);
598 }
599
600 kern_return_t
601 processor_set_info(
602 processor_set_t pset,
603 int flavor,
604 host_t *host,
605 processor_set_info_t info,
606 mach_msg_type_number_t *count)
607 {
608 if (pset == PROCESSOR_SET_NULL)
609 return(KERN_INVALID_ARGUMENT);
610
611 if (flavor == PROCESSOR_SET_BASIC_INFO) {
612 processor_set_basic_info_t basic_info;
613
614 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
615 return(KERN_FAILURE);
616
617 basic_info = (processor_set_basic_info_t) info;
618 basic_info->processor_count = processor_avail_count;
619 basic_info->default_policy = POLICY_TIMESHARE;
620
621 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
622 *host = &realhost;
623 return(KERN_SUCCESS);
624 }
625 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
626 policy_timeshare_base_t ts_base;
627
628 if (*count < POLICY_TIMESHARE_BASE_COUNT)
629 return(KERN_FAILURE);
630
631 ts_base = (policy_timeshare_base_t) info;
632 ts_base->base_priority = BASEPRI_DEFAULT;
633
634 *count = POLICY_TIMESHARE_BASE_COUNT;
635 *host = &realhost;
636 return(KERN_SUCCESS);
637 }
638 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
639 policy_fifo_base_t fifo_base;
640
641 if (*count < POLICY_FIFO_BASE_COUNT)
642 return(KERN_FAILURE);
643
644 fifo_base = (policy_fifo_base_t) info;
645 fifo_base->base_priority = BASEPRI_DEFAULT;
646
647 *count = POLICY_FIFO_BASE_COUNT;
648 *host = &realhost;
649 return(KERN_SUCCESS);
650 }
651 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
652 policy_rr_base_t rr_base;
653
654 if (*count < POLICY_RR_BASE_COUNT)
655 return(KERN_FAILURE);
656
657 rr_base = (policy_rr_base_t) info;
658 rr_base->base_priority = BASEPRI_DEFAULT;
659 rr_base->quantum = 1;
660
661 *count = POLICY_RR_BASE_COUNT;
662 *host = &realhost;
663 return(KERN_SUCCESS);
664 }
665 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
666 policy_timeshare_limit_t ts_limit;
667
668 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
669 return(KERN_FAILURE);
670
671 ts_limit = (policy_timeshare_limit_t) info;
672 ts_limit->max_priority = MAXPRI_KERNEL;
673
674 *count = POLICY_TIMESHARE_LIMIT_COUNT;
675 *host = &realhost;
676 return(KERN_SUCCESS);
677 }
678 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
679 policy_fifo_limit_t fifo_limit;
680
681 if (*count < POLICY_FIFO_LIMIT_COUNT)
682 return(KERN_FAILURE);
683
684 fifo_limit = (policy_fifo_limit_t) info;
685 fifo_limit->max_priority = MAXPRI_KERNEL;
686
687 *count = POLICY_FIFO_LIMIT_COUNT;
688 *host = &realhost;
689 return(KERN_SUCCESS);
690 }
691 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
692 policy_rr_limit_t rr_limit;
693
694 if (*count < POLICY_RR_LIMIT_COUNT)
695 return(KERN_FAILURE);
696
697 rr_limit = (policy_rr_limit_t) info;
698 rr_limit->max_priority = MAXPRI_KERNEL;
699
700 *count = POLICY_RR_LIMIT_COUNT;
701 *host = &realhost;
702 return(KERN_SUCCESS);
703 }
704 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
705 int *enabled;
706
707 if (*count < (sizeof(*enabled)/sizeof(int)))
708 return(KERN_FAILURE);
709
710 enabled = (int *) info;
711 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
712
713 *count = sizeof(*enabled)/sizeof(int);
714 *host = &realhost;
715 return(KERN_SUCCESS);
716 }
717
718
719 *host = HOST_NULL;
720 return(KERN_INVALID_ARGUMENT);
721 }
722
723 /*
724 * processor_set_statistics
725 *
726 * Returns scheduling statistics for a processor set.
727 */
728 kern_return_t
729 processor_set_statistics(
730 processor_set_t pset,
731 int flavor,
732 processor_set_info_t info,
733 mach_msg_type_number_t *count)
734 {
735 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
736 return (KERN_INVALID_PROCESSOR_SET);
737
738 if (flavor == PROCESSOR_SET_LOAD_INFO) {
739 processor_set_load_info_t load_info;
740
741 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
742 return(KERN_FAILURE);
743
744 load_info = (processor_set_load_info_t) info;
745
746 load_info->mach_factor = sched_mach_factor;
747 load_info->load_average = sched_load_average;
748
749 load_info->task_count = tasks_count;
750 load_info->thread_count = threads_count;
751
752 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
753 return(KERN_SUCCESS);
754 }
755
756 return(KERN_INVALID_ARGUMENT);
757 }
758
759 /*
760 * processor_set_max_priority:
761 *
762 * Specify max priority permitted on processor set. This affects
763 * newly created and assigned threads. Optionally change existing
764 * ones.
765 */
766 kern_return_t
767 processor_set_max_priority(
768 __unused processor_set_t pset,
769 __unused int max_priority,
770 __unused boolean_t change_threads)
771 {
772 return (KERN_INVALID_ARGUMENT);
773 }
774
775 /*
776 * processor_set_policy_enable:
777 *
778 * Allow indicated policy on processor set.
779 */
780
781 kern_return_t
782 processor_set_policy_enable(
783 __unused processor_set_t pset,
784 __unused int policy)
785 {
786 return (KERN_INVALID_ARGUMENT);
787 }
788
789 /*
790 * processor_set_policy_disable:
791 *
792 * Forbid indicated policy on processor set. Time sharing cannot
793 * be forbidden.
794 */
795 kern_return_t
796 processor_set_policy_disable(
797 __unused processor_set_t pset,
798 __unused int policy,
799 __unused boolean_t change_threads)
800 {
801 return (KERN_INVALID_ARGUMENT);
802 }
803
804 /*
805 * processor_set_things:
806 *
807 * Common internals for processor_set_{threads,tasks}
808 */
809 kern_return_t
810 processor_set_things(
811 processor_set_t pset,
812 void **thing_list,
813 mach_msg_type_number_t *count,
814 int type)
815 {
816 unsigned int i;
817 task_t task;
818 thread_t thread;
819
820 task_t *task_list;
821 unsigned int actual_tasks;
822 vm_size_t task_size, task_size_needed;
823
824 thread_t *thread_list;
825 unsigned int actual_threads;
826 vm_size_t thread_size, thread_size_needed;
827
828 void *addr, *newaddr;
829 vm_size_t size, size_needed;
830
831 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
832 return (KERN_INVALID_ARGUMENT);
833
834 task_size = 0;
835 task_size_needed = 0;
836 task_list = NULL;
837 actual_tasks = 0;
838
839 thread_size = 0;
840 thread_size_needed = 0;
841 thread_list = NULL;
842 actual_threads = 0;
843
844 for (;;) {
845 lck_mtx_lock(&tasks_threads_lock);
846
847 /* do we have the memory we need? */
848 if (type == PSET_THING_THREAD)
849 thread_size_needed = threads_count * sizeof(void *);
850 #if !CONFIG_MACF
851 else
852 #endif
853 task_size_needed = tasks_count * sizeof(void *);
854
855 if (task_size_needed <= task_size &&
856 thread_size_needed <= thread_size)
857 break;
858
859 /* unlock and allocate more memory */
860 lck_mtx_unlock(&tasks_threads_lock);
861
862 /* grow task array */
863 if (task_size_needed > task_size) {
864 if (task_size != 0)
865 kfree(task_list, task_size);
866
867 assert(task_size_needed > 0);
868 task_size = task_size_needed;
869
870 task_list = (task_t *)kalloc(task_size);
871 if (task_list == NULL) {
872 if (thread_size != 0)
873 kfree(thread_list, thread_size);
874 return (KERN_RESOURCE_SHORTAGE);
875 }
876 }
877
878 /* grow thread array */
879 if (thread_size_needed > thread_size) {
880 if (thread_size != 0)
881 kfree(thread_list, thread_size);
882
883 assert(thread_size_needed > 0);
884 thread_size = thread_size_needed;
885
886 thread_list = (thread_t *)kalloc(thread_size);
887 if (thread_list == 0) {
888 if (task_size != 0)
889 kfree(task_list, task_size);
890 return (KERN_RESOURCE_SHORTAGE);
891 }
892 }
893 }
894
895 /* OK, have memory and the list locked */
896
897 /* If we need it, get the thread list */
898 if (type == PSET_THING_THREAD) {
899 for (thread = (thread_t)queue_first(&threads);
900 !queue_end(&threads, (queue_entry_t)thread);
901 thread = (thread_t)queue_next(&thread->threads)) {
902 #if defined(SECURE_KERNEL)
903 if (thread->task != kernel_task) {
904 #endif
905 thread_reference_internal(thread);
906 thread_list[actual_threads++] = thread;
907 #if defined(SECURE_KERNEL)
908 }
909 #endif
910 }
911 }
912 #if !CONFIG_MACF
913 else {
914 #endif
915 /* get a list of the tasks */
916 for (task = (task_t)queue_first(&tasks);
917 !queue_end(&tasks, (queue_entry_t)task);
918 task = (task_t)queue_next(&task->tasks)) {
919 #if defined(SECURE_KERNEL)
920 if (task != kernel_task) {
921 #endif
922 task_reference_internal(task);
923 task_list[actual_tasks++] = task;
924 #if defined(SECURE_KERNEL)
925 }
926 #endif
927 }
928 #if !CONFIG_MACF
929 }
930 #endif
931
932 lck_mtx_unlock(&tasks_threads_lock);
933
934 #if CONFIG_MACF
935 unsigned int j, used;
936
937 /* for each task, make sure we are allowed to examine it */
938 for (i = used = 0; i < actual_tasks; i++) {
939 if (mac_task_check_expose_task(task_list[i])) {
940 task_deallocate(task_list[i]);
941 continue;
942 }
943 task_list[used++] = task_list[i];
944 }
945 actual_tasks = used;
946 task_size_needed = actual_tasks * sizeof(void *);
947
948 if (type == PSET_THING_THREAD) {
949
950 /* for each thread (if any), make sure it's task is in the allowed list */
951 for (i = used = 0; i < actual_threads; i++) {
952 boolean_t found_task = FALSE;
953
954 task = thread_list[i]->task;
955 for (j = 0; j < actual_tasks; j++) {
956 if (task_list[j] == task) {
957 found_task = TRUE;
958 break;
959 }
960 }
961 if (found_task)
962 thread_list[used++] = thread_list[i];
963 else
964 thread_deallocate(thread_list[i]);
965 }
966 actual_threads = used;
967 thread_size_needed = actual_threads * sizeof(void *);
968
969 /* done with the task list */
970 for (i = 0; i < actual_tasks; i++)
971 task_deallocate(task_list[i]);
972 kfree(task_list, task_size);
973 task_size = 0;
974 actual_tasks = 0;
975 task_list = NULL;
976 }
977 #endif
978
979 if (type == PSET_THING_THREAD) {
980 if (actual_threads == 0) {
981 /* no threads available to return */
982 assert(task_size == 0);
983 if (thread_size != 0)
984 kfree(thread_list, thread_size);
985 *thing_list = NULL;
986 *count = 0;
987 return KERN_SUCCESS;
988 }
989 size_needed = actual_threads * sizeof(void *);
990 size = thread_size;
991 addr = thread_list;
992 } else {
993 if (actual_tasks == 0) {
994 /* no tasks available to return */
995 assert(thread_size == 0);
996 if (task_size != 0)
997 kfree(task_list, task_size);
998 *thing_list = NULL;
999 *count = 0;
1000 return KERN_SUCCESS;
1001 }
1002 size_needed = actual_tasks * sizeof(void *);
1003 size = task_size;
1004 addr = task_list;
1005 }
1006
1007 /* if we allocated too much, must copy */
1008 if (size_needed < size) {
1009 newaddr = kalloc(size_needed);
1010 if (newaddr == 0) {
1011 for (i = 0; i < actual_tasks; i++) {
1012 if (type == PSET_THING_THREAD)
1013 thread_deallocate(thread_list[i]);
1014 else
1015 task_deallocate(task_list[i]);
1016 }
1017 if (size)
1018 kfree(addr, size);
1019 return (KERN_RESOURCE_SHORTAGE);
1020 }
1021
1022 bcopy((void *) addr, (void *) newaddr, size_needed);
1023 kfree(addr, size);
1024
1025 addr = newaddr;
1026 size = size_needed;
1027 }
1028
1029 *thing_list = (void **)addr;
1030 *count = (unsigned int)size / sizeof(void *);
1031
1032 return (KERN_SUCCESS);
1033 }
1034
1035
1036 /*
1037 * processor_set_tasks:
1038 *
1039 * List all tasks in the processor set.
1040 */
1041 kern_return_t
1042 processor_set_tasks(
1043 processor_set_t pset,
1044 task_array_t *task_list,
1045 mach_msg_type_number_t *count)
1046 {
1047 kern_return_t ret;
1048 mach_msg_type_number_t i;
1049
1050 ret = processor_set_things(pset, (void **)task_list, count, PSET_THING_TASK);
1051 if (ret != KERN_SUCCESS)
1052 return ret;
1053
1054 /* do the conversion that Mig should handle */
1055 for (i = 0; i < *count; i++)
1056 (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]);
1057 return KERN_SUCCESS;
1058 }
1059
1060 /*
1061 * processor_set_threads:
1062 *
1063 * List all threads in the processor set.
1064 */
1065 #if defined(SECURE_KERNEL)
1066 kern_return_t
1067 processor_set_threads(
1068 __unused processor_set_t pset,
1069 __unused thread_array_t *thread_list,
1070 __unused mach_msg_type_number_t *count)
1071 {
1072 return KERN_FAILURE;
1073 }
1074 #else
1075 kern_return_t
1076 processor_set_threads(
1077 processor_set_t pset,
1078 thread_array_t *thread_list,
1079 mach_msg_type_number_t *count)
1080 {
1081 kern_return_t ret;
1082 mach_msg_type_number_t i;
1083
1084 ret = processor_set_things(pset, (void **)thread_list, count, PSET_THING_THREAD);
1085 if (ret != KERN_SUCCESS)
1086 return ret;
1087
1088 /* do the conversion that Mig should handle */
1089 for (i = 0; i < *count; i++)
1090 (*thread_list)[i] = (thread_t)convert_thread_to_port((*thread_list)[i]);
1091 return KERN_SUCCESS;
1092 }
1093 #endif
1094
1095 /*
1096 * processor_set_policy_control
1097 *
1098 * Controls the scheduling attributes governing the processor set.
1099 * Allows control of enabled policies, and per-policy base and limit
1100 * priorities.
1101 */
1102 kern_return_t
1103 processor_set_policy_control(
1104 __unused processor_set_t pset,
1105 __unused int flavor,
1106 __unused processor_set_info_t policy_info,
1107 __unused mach_msg_type_number_t count,
1108 __unused boolean_t change)
1109 {
1110 return (KERN_INVALID_ARGUMENT);
1111 }
1112
1113 #undef pset_deallocate
1114 void pset_deallocate(processor_set_t pset);
1115 void
1116 pset_deallocate(
1117 __unused processor_set_t pset)
1118 {
1119 return;
1120 }
1121
1122 #undef pset_reference
1123 void pset_reference(processor_set_t pset);
1124 void
1125 pset_reference(
1126 __unused processor_set_t pset)
1127 {
1128 return;
1129 }