]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
709019b9fa6f87522f88b9b993cd4d33e6b845cd
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
93 int tasks_count;
94 int terminated_tasks_count;
95 queue_head_t threads;
96 int threads_count;
97 decl_lck_mtx_data(,tasks_threads_lock)
98
99 processor_t processor_list;
100 unsigned int processor_count;
101 static processor_t processor_list_tail;
102 decl_simple_lock_data(,processor_list_lock)
103
104 uint32_t processor_avail_count;
105
106 processor_t master_processor;
107 int master_cpu = 0;
108 boolean_t sched_stats_active = FALSE;
109
110 /* Forwards */
111 kern_return_t processor_set_things(
112 processor_set_t pset,
113 mach_port_t **thing_list,
114 mach_msg_type_number_t *count,
115 int type);
116
117 void
118 processor_bootstrap(void)
119 {
120 pset_init(&pset0, &pset_node0);
121 pset_node0.psets = &pset0;
122
123 simple_lock_init(&pset_node_lock, 0);
124
125 queue_init(&tasks);
126 queue_init(&terminated_tasks);
127 queue_init(&threads);
128
129 simple_lock_init(&processor_list_lock, 0);
130
131 master_processor = cpu_to_processor(master_cpu);
132
133 processor_init(master_processor, master_cpu, &pset0);
134 }
135
136 /*
137 * Initialize the given processor for the cpu
138 * indicated by cpu_id, and assign to the
139 * specified processor set.
140 */
141 void
142 processor_init(
143 processor_t processor,
144 int cpu_id,
145 processor_set_t pset)
146 {
147 spl_t s;
148
149 if (processor != master_processor) {
150 /* Scheduler state deferred until sched_init() */
151 SCHED(processor_init)(processor);
152 }
153
154 processor->state = PROCESSOR_OFF_LINE;
155 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
156 processor->processor_set = pset;
157 processor->current_pri = MINPRI;
158 processor->current_thmode = TH_MODE_NONE;
159 processor->cpu_id = cpu_id;
160 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
161 processor->deadline = UINT64_MAX;
162 processor->timeslice = 0;
163 processor->processor_meta = PROCESSOR_META_NULL;
164 processor->processor_self = IP_NULL;
165 processor_data_init(processor);
166 processor->processor_list = NULL;
167
168 s = splsched();
169 pset_lock(pset);
170 if (pset->cpu_set_count++ == 0)
171 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
172 else {
173 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
174 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
175 }
176 pset_unlock(pset);
177 splx(s);
178
179 simple_lock(&processor_list_lock);
180 if (processor_list == NULL)
181 processor_list = processor;
182 else
183 processor_list_tail->processor_list = processor;
184 processor_list_tail = processor;
185 processor_count++;
186 simple_unlock(&processor_list_lock);
187 }
188
189 void
190 processor_meta_init(
191 processor_t processor,
192 processor_t primary)
193 {
194 processor_meta_t pmeta = primary->processor_meta;
195
196 if (pmeta == PROCESSOR_META_NULL) {
197 pmeta = kalloc(sizeof (*pmeta));
198
199 queue_init(&pmeta->idle_queue);
200
201 pmeta->primary = primary;
202 }
203
204 processor->processor_meta = pmeta;
205 }
206
207 processor_set_t
208 processor_pset(
209 processor_t processor)
210 {
211 return (processor->processor_set);
212 }
213
214 pset_node_t
215 pset_node_root(void)
216 {
217 return &pset_node0;
218 }
219
220 processor_set_t
221 pset_create(
222 pset_node_t node)
223 {
224 processor_set_t *prev, pset = kalloc(sizeof (*pset));
225
226 if (pset != PROCESSOR_SET_NULL) {
227 pset_init(pset, node);
228
229 simple_lock(&pset_node_lock);
230
231 prev = &node->psets;
232 while (*prev != PROCESSOR_SET_NULL)
233 prev = &(*prev)->pset_list;
234
235 *prev = pset;
236
237 simple_unlock(&pset_node_lock);
238 }
239
240 return (pset);
241 }
242
243 /*
244 * Initialize the given processor_set structure.
245 */
246 void
247 pset_init(
248 processor_set_t pset,
249 pset_node_t node)
250 {
251 if (pset != &pset0) {
252 /* Scheduler state deferred until sched_init() */
253 SCHED(pset_init)(pset);
254 }
255
256 queue_init(&pset->active_queue);
257 queue_init(&pset->idle_queue);
258 pset->online_processor_count = 0;
259 pset_pri_init_hint(pset, PROCESSOR_NULL);
260 pset_count_init_hint(pset, PROCESSOR_NULL);
261 pset->cpu_set_low = pset->cpu_set_hi = 0;
262 pset->cpu_set_count = 0;
263 pset->pending_AST_cpu_mask = 0;
264 pset_lock_init(pset);
265 pset->pset_self = IP_NULL;
266 pset->pset_name_self = IP_NULL;
267 pset->pset_list = PROCESSOR_SET_NULL;
268 pset->node = node;
269 }
270
271 kern_return_t
272 processor_info_count(
273 processor_flavor_t flavor,
274 mach_msg_type_number_t *count)
275 {
276 switch (flavor) {
277
278 case PROCESSOR_BASIC_INFO:
279 *count = PROCESSOR_BASIC_INFO_COUNT;
280 break;
281
282 case PROCESSOR_CPU_LOAD_INFO:
283 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
284 break;
285
286 default:
287 return (cpu_info_count(flavor, count));
288 }
289
290 return (KERN_SUCCESS);
291 }
292
293
294 kern_return_t
295 processor_info(
296 register processor_t processor,
297 processor_flavor_t flavor,
298 host_t *host,
299 processor_info_t info,
300 mach_msg_type_number_t *count)
301 {
302 register int cpu_id, state;
303 kern_return_t result;
304
305 if (processor == PROCESSOR_NULL)
306 return (KERN_INVALID_ARGUMENT);
307
308 cpu_id = processor->cpu_id;
309
310 switch (flavor) {
311
312 case PROCESSOR_BASIC_INFO:
313 {
314 register processor_basic_info_t basic_info;
315
316 if (*count < PROCESSOR_BASIC_INFO_COUNT)
317 return (KERN_FAILURE);
318
319 basic_info = (processor_basic_info_t) info;
320 basic_info->cpu_type = slot_type(cpu_id);
321 basic_info->cpu_subtype = slot_subtype(cpu_id);
322 state = processor->state;
323 if (state == PROCESSOR_OFF_LINE)
324 basic_info->running = FALSE;
325 else
326 basic_info->running = TRUE;
327 basic_info->slot_num = cpu_id;
328 if (processor == master_processor)
329 basic_info->is_master = TRUE;
330 else
331 basic_info->is_master = FALSE;
332
333 *count = PROCESSOR_BASIC_INFO_COUNT;
334 *host = &realhost;
335
336 return (KERN_SUCCESS);
337 }
338
339 case PROCESSOR_CPU_LOAD_INFO:
340 {
341 processor_cpu_load_info_t cpu_load_info;
342 timer_t idle_state;
343 uint64_t idle_time_snapshot1, idle_time_snapshot2;
344 uint64_t idle_time_tstamp1, idle_time_tstamp2;
345
346 /*
347 * We capture the accumulated idle time twice over
348 * the course of this function, as well as the timestamps
349 * when each were last updated. Since these are
350 * all done using non-atomic racy mechanisms, the
351 * most we can infer is whether values are stable.
352 * timer_grab() is the only function that can be
353 * used reliably on another processor's per-processor
354 * data.
355 */
356
357 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
358 return (KERN_FAILURE);
359
360 cpu_load_info = (processor_cpu_load_info_t) info;
361 if (precise_user_kernel_time) {
362 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
363 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
364 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
365 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
366 } else {
367 uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
368 timer_grab(&PROCESSOR_DATA(processor, system_state));
369
370 cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
371 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
372 }
373
374 idle_state = &PROCESSOR_DATA(processor, idle_state);
375 idle_time_snapshot1 = timer_grab(idle_state);
376 idle_time_tstamp1 = idle_state->tstamp;
377
378 /*
379 * Idle processors are not continually updating their
380 * per-processor idle timer, so it may be extremely
381 * out of date, resulting in an over-representation
382 * of non-idle time between two measurement
383 * intervals by e.g. top(1). If we are non-idle, or
384 * have evidence that the timer is being updated
385 * concurrently, we consider its value up-to-date.
386 */
387 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
388 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
389 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
390 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
391 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
392 /* Idle timer is being updated concurrently, second stamp is good enough */
393 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
394 (uint32_t)(idle_time_snapshot2 / hz_tick_interval);
395 } else {
396 /*
397 * Idle timer may be very stale. Fortunately we have established
398 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
399 */
400 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
401
402 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
403 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
404 }
405
406 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
407
408 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
409 *host = &realhost;
410
411 return (KERN_SUCCESS);
412 }
413
414 default:
415 result = cpu_info(flavor, cpu_id, info, count);
416 if (result == KERN_SUCCESS)
417 *host = &realhost;
418
419 return (result);
420 }
421 }
422
423 kern_return_t
424 processor_start(
425 processor_t processor)
426 {
427 processor_set_t pset;
428 thread_t thread;
429 kern_return_t result;
430 spl_t s;
431
432 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
433 return (KERN_INVALID_ARGUMENT);
434
435 if (processor == master_processor) {
436 processor_t prev;
437
438 prev = thread_bind(processor);
439 thread_block(THREAD_CONTINUE_NULL);
440
441 result = cpu_start(processor->cpu_id);
442
443 thread_bind(prev);
444
445 return (result);
446 }
447
448 s = splsched();
449 pset = processor->processor_set;
450 pset_lock(pset);
451 if (processor->state != PROCESSOR_OFF_LINE) {
452 pset_unlock(pset);
453 splx(s);
454
455 return (KERN_FAILURE);
456 }
457
458 processor->state = PROCESSOR_START;
459 pset_unlock(pset);
460 splx(s);
461
462 /*
463 * Create the idle processor thread.
464 */
465 if (processor->idle_thread == THREAD_NULL) {
466 result = idle_thread_create(processor);
467 if (result != KERN_SUCCESS) {
468 s = splsched();
469 pset_lock(pset);
470 processor->state = PROCESSOR_OFF_LINE;
471 pset_unlock(pset);
472 splx(s);
473
474 return (result);
475 }
476 }
477
478 /*
479 * If there is no active thread, the processor
480 * has never been started. Create a dedicated
481 * start up thread.
482 */
483 if ( processor->active_thread == THREAD_NULL &&
484 processor->next_thread == THREAD_NULL ) {
485 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
486 if (result != KERN_SUCCESS) {
487 s = splsched();
488 pset_lock(pset);
489 processor->state = PROCESSOR_OFF_LINE;
490 pset_unlock(pset);
491 splx(s);
492
493 return (result);
494 }
495
496 s = splsched();
497 thread_lock(thread);
498 thread->bound_processor = processor;
499 processor->next_thread = thread;
500 thread->state = TH_RUN;
501 thread_unlock(thread);
502 splx(s);
503
504 thread_deallocate(thread);
505 }
506
507 if (processor->processor_self == IP_NULL)
508 ipc_processor_init(processor);
509
510 result = cpu_start(processor->cpu_id);
511 if (result != KERN_SUCCESS) {
512 s = splsched();
513 pset_lock(pset);
514 processor->state = PROCESSOR_OFF_LINE;
515 pset_unlock(pset);
516 splx(s);
517
518 return (result);
519 }
520
521 ipc_processor_enable(processor);
522
523 return (KERN_SUCCESS);
524 }
525
526 kern_return_t
527 processor_exit(
528 processor_t processor)
529 {
530 if (processor == PROCESSOR_NULL)
531 return(KERN_INVALID_ARGUMENT);
532
533 return(processor_shutdown(processor));
534 }
535
536 kern_return_t
537 processor_control(
538 processor_t processor,
539 processor_info_t info,
540 mach_msg_type_number_t count)
541 {
542 if (processor == PROCESSOR_NULL)
543 return(KERN_INVALID_ARGUMENT);
544
545 return(cpu_control(processor->cpu_id, info, count));
546 }
547
548 kern_return_t
549 processor_set_create(
550 __unused host_t host,
551 __unused processor_set_t *new_set,
552 __unused processor_set_t *new_name)
553 {
554 return(KERN_FAILURE);
555 }
556
557 kern_return_t
558 processor_set_destroy(
559 __unused processor_set_t pset)
560 {
561 return(KERN_FAILURE);
562 }
563
564 kern_return_t
565 processor_get_assignment(
566 processor_t processor,
567 processor_set_t *pset)
568 {
569 int state;
570
571 if (processor == PROCESSOR_NULL)
572 return(KERN_INVALID_ARGUMENT);
573
574 state = processor->state;
575 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
576 return(KERN_FAILURE);
577
578 *pset = &pset0;
579
580 return(KERN_SUCCESS);
581 }
582
583 kern_return_t
584 processor_set_info(
585 processor_set_t pset,
586 int flavor,
587 host_t *host,
588 processor_set_info_t info,
589 mach_msg_type_number_t *count)
590 {
591 if (pset == PROCESSOR_SET_NULL)
592 return(KERN_INVALID_ARGUMENT);
593
594 if (flavor == PROCESSOR_SET_BASIC_INFO) {
595 register processor_set_basic_info_t basic_info;
596
597 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
598 return(KERN_FAILURE);
599
600 basic_info = (processor_set_basic_info_t) info;
601 basic_info->processor_count = processor_avail_count;
602 basic_info->default_policy = POLICY_TIMESHARE;
603
604 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
605 *host = &realhost;
606 return(KERN_SUCCESS);
607 }
608 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
609 register policy_timeshare_base_t ts_base;
610
611 if (*count < POLICY_TIMESHARE_BASE_COUNT)
612 return(KERN_FAILURE);
613
614 ts_base = (policy_timeshare_base_t) info;
615 ts_base->base_priority = BASEPRI_DEFAULT;
616
617 *count = POLICY_TIMESHARE_BASE_COUNT;
618 *host = &realhost;
619 return(KERN_SUCCESS);
620 }
621 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
622 register policy_fifo_base_t fifo_base;
623
624 if (*count < POLICY_FIFO_BASE_COUNT)
625 return(KERN_FAILURE);
626
627 fifo_base = (policy_fifo_base_t) info;
628 fifo_base->base_priority = BASEPRI_DEFAULT;
629
630 *count = POLICY_FIFO_BASE_COUNT;
631 *host = &realhost;
632 return(KERN_SUCCESS);
633 }
634 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
635 register policy_rr_base_t rr_base;
636
637 if (*count < POLICY_RR_BASE_COUNT)
638 return(KERN_FAILURE);
639
640 rr_base = (policy_rr_base_t) info;
641 rr_base->base_priority = BASEPRI_DEFAULT;
642 rr_base->quantum = 1;
643
644 *count = POLICY_RR_BASE_COUNT;
645 *host = &realhost;
646 return(KERN_SUCCESS);
647 }
648 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
649 register policy_timeshare_limit_t ts_limit;
650
651 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
652 return(KERN_FAILURE);
653
654 ts_limit = (policy_timeshare_limit_t) info;
655 ts_limit->max_priority = MAXPRI_KERNEL;
656
657 *count = POLICY_TIMESHARE_LIMIT_COUNT;
658 *host = &realhost;
659 return(KERN_SUCCESS);
660 }
661 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
662 register policy_fifo_limit_t fifo_limit;
663
664 if (*count < POLICY_FIFO_LIMIT_COUNT)
665 return(KERN_FAILURE);
666
667 fifo_limit = (policy_fifo_limit_t) info;
668 fifo_limit->max_priority = MAXPRI_KERNEL;
669
670 *count = POLICY_FIFO_LIMIT_COUNT;
671 *host = &realhost;
672 return(KERN_SUCCESS);
673 }
674 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
675 register policy_rr_limit_t rr_limit;
676
677 if (*count < POLICY_RR_LIMIT_COUNT)
678 return(KERN_FAILURE);
679
680 rr_limit = (policy_rr_limit_t) info;
681 rr_limit->max_priority = MAXPRI_KERNEL;
682
683 *count = POLICY_RR_LIMIT_COUNT;
684 *host = &realhost;
685 return(KERN_SUCCESS);
686 }
687 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
688 register int *enabled;
689
690 if (*count < (sizeof(*enabled)/sizeof(int)))
691 return(KERN_FAILURE);
692
693 enabled = (int *) info;
694 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
695
696 *count = sizeof(*enabled)/sizeof(int);
697 *host = &realhost;
698 return(KERN_SUCCESS);
699 }
700
701
702 *host = HOST_NULL;
703 return(KERN_INVALID_ARGUMENT);
704 }
705
706 /*
707 * processor_set_statistics
708 *
709 * Returns scheduling statistics for a processor set.
710 */
711 kern_return_t
712 processor_set_statistics(
713 processor_set_t pset,
714 int flavor,
715 processor_set_info_t info,
716 mach_msg_type_number_t *count)
717 {
718 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
719 return (KERN_INVALID_PROCESSOR_SET);
720
721 if (flavor == PROCESSOR_SET_LOAD_INFO) {
722 register processor_set_load_info_t load_info;
723
724 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
725 return(KERN_FAILURE);
726
727 load_info = (processor_set_load_info_t) info;
728
729 load_info->mach_factor = sched_mach_factor;
730 load_info->load_average = sched_load_average;
731
732 load_info->task_count = tasks_count;
733 load_info->thread_count = threads_count;
734
735 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
736 return(KERN_SUCCESS);
737 }
738
739 return(KERN_INVALID_ARGUMENT);
740 }
741
742 /*
743 * processor_set_max_priority:
744 *
745 * Specify max priority permitted on processor set. This affects
746 * newly created and assigned threads. Optionally change existing
747 * ones.
748 */
749 kern_return_t
750 processor_set_max_priority(
751 __unused processor_set_t pset,
752 __unused int max_priority,
753 __unused boolean_t change_threads)
754 {
755 return (KERN_INVALID_ARGUMENT);
756 }
757
758 /*
759 * processor_set_policy_enable:
760 *
761 * Allow indicated policy on processor set.
762 */
763
764 kern_return_t
765 processor_set_policy_enable(
766 __unused processor_set_t pset,
767 __unused int policy)
768 {
769 return (KERN_INVALID_ARGUMENT);
770 }
771
772 /*
773 * processor_set_policy_disable:
774 *
775 * Forbid indicated policy on processor set. Time sharing cannot
776 * be forbidden.
777 */
778 kern_return_t
779 processor_set_policy_disable(
780 __unused processor_set_t pset,
781 __unused int policy,
782 __unused boolean_t change_threads)
783 {
784 return (KERN_INVALID_ARGUMENT);
785 }
786
787 #define THING_TASK 0
788 #define THING_THREAD 1
789
790 /*
791 * processor_set_things:
792 *
793 * Common internals for processor_set_{threads,tasks}
794 */
795 kern_return_t
796 processor_set_things(
797 processor_set_t pset,
798 mach_port_t **thing_list,
799 mach_msg_type_number_t *count,
800 int type)
801 {
802 unsigned int actual; /* this many things */
803 unsigned int maxthings;
804 unsigned int i;
805
806 vm_size_t size, size_needed;
807 void *addr;
808
809 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
810 return (KERN_INVALID_ARGUMENT);
811
812 size = 0;
813 addr = NULL;
814
815 for (;;) {
816 lck_mtx_lock(&tasks_threads_lock);
817
818 if (type == THING_TASK)
819 maxthings = tasks_count;
820 else
821 maxthings = threads_count;
822
823 /* do we have the memory we need? */
824
825 size_needed = maxthings * sizeof (mach_port_t);
826 if (size_needed <= size)
827 break;
828
829 /* unlock and allocate more memory */
830 lck_mtx_unlock(&tasks_threads_lock);
831
832 if (size != 0)
833 kfree(addr, size);
834
835 assert(size_needed > 0);
836 size = size_needed;
837
838 addr = kalloc(size);
839 if (addr == 0)
840 return (KERN_RESOURCE_SHORTAGE);
841 }
842
843 /* OK, have memory and the list locked */
844
845 actual = 0;
846 switch (type) {
847
848 case THING_TASK: {
849 task_t task, *task_list = (task_t *)addr;
850
851 for (task = (task_t)queue_first(&tasks);
852 !queue_end(&tasks, (queue_entry_t)task);
853 task = (task_t)queue_next(&task->tasks)) {
854 #if defined(SECURE_KERNEL)
855 if (task != kernel_task) {
856 #endif
857 task_reference_internal(task);
858 task_list[actual++] = task;
859 #if defined(SECURE_KERNEL)
860 }
861 #endif
862 }
863
864 break;
865 }
866
867 case THING_THREAD: {
868 thread_t thread, *thread_list = (thread_t *)addr;
869
870 for (thread = (thread_t)queue_first(&threads);
871 !queue_end(&threads, (queue_entry_t)thread);
872 thread = (thread_t)queue_next(&thread->threads)) {
873 thread_reference_internal(thread);
874 thread_list[actual++] = thread;
875 }
876
877 break;
878 }
879
880 }
881
882 lck_mtx_unlock(&tasks_threads_lock);
883
884 if (actual < maxthings)
885 size_needed = actual * sizeof (mach_port_t);
886
887 if (actual == 0) {
888 /* no things, so return null pointer and deallocate memory */
889 *thing_list = NULL;
890 *count = 0;
891
892 if (size != 0)
893 kfree(addr, size);
894 }
895 else {
896 /* if we allocated too much, must copy */
897
898 if (size_needed < size) {
899 void *newaddr;
900
901 newaddr = kalloc(size_needed);
902 if (newaddr == 0) {
903 switch (type) {
904
905 case THING_TASK: {
906 task_t *task_list = (task_t *)addr;
907
908 for (i = 0; i < actual; i++)
909 task_deallocate(task_list[i]);
910 break;
911 }
912
913 case THING_THREAD: {
914 thread_t *thread_list = (thread_t *)addr;
915
916 for (i = 0; i < actual; i++)
917 thread_deallocate(thread_list[i]);
918 break;
919 }
920
921 }
922
923 kfree(addr, size);
924 return (KERN_RESOURCE_SHORTAGE);
925 }
926
927 bcopy((void *) addr, (void *) newaddr, size_needed);
928 kfree(addr, size);
929 addr = newaddr;
930 }
931
932 *thing_list = (mach_port_t *)addr;
933 *count = actual;
934
935 /* do the conversion that Mig should handle */
936
937 switch (type) {
938
939 case THING_TASK: {
940 task_t *task_list = (task_t *)addr;
941
942 for (i = 0; i < actual; i++)
943 (*thing_list)[i] = convert_task_to_port(task_list[i]);
944 break;
945 }
946
947 case THING_THREAD: {
948 thread_t *thread_list = (thread_t *)addr;
949
950 for (i = 0; i < actual; i++)
951 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
952 break;
953 }
954
955 }
956 }
957
958 return (KERN_SUCCESS);
959 }
960
961
962 /*
963 * processor_set_tasks:
964 *
965 * List all tasks in the processor set.
966 */
967 kern_return_t
968 processor_set_tasks(
969 processor_set_t pset,
970 task_array_t *task_list,
971 mach_msg_type_number_t *count)
972 {
973 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
974 }
975
976 /*
977 * processor_set_threads:
978 *
979 * List all threads in the processor set.
980 */
981 #if defined(SECURE_KERNEL)
982 kern_return_t
983 processor_set_threads(
984 __unused processor_set_t pset,
985 __unused thread_array_t *thread_list,
986 __unused mach_msg_type_number_t *count)
987 {
988 return KERN_FAILURE;
989 }
990 #else
991 kern_return_t
992 processor_set_threads(
993 processor_set_t pset,
994 thread_array_t *thread_list,
995 mach_msg_type_number_t *count)
996 {
997 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
998 }
999 #endif
1000
1001 /*
1002 * processor_set_policy_control
1003 *
1004 * Controls the scheduling attributes governing the processor set.
1005 * Allows control of enabled policies, and per-policy base and limit
1006 * priorities.
1007 */
1008 kern_return_t
1009 processor_set_policy_control(
1010 __unused processor_set_t pset,
1011 __unused int flavor,
1012 __unused processor_set_info_t policy_info,
1013 __unused mach_msg_type_number_t count,
1014 __unused boolean_t change)
1015 {
1016 return (KERN_INVALID_ARGUMENT);
1017 }
1018
1019 #undef pset_deallocate
1020 void pset_deallocate(processor_set_t pset);
1021 void
1022 pset_deallocate(
1023 __unused processor_set_t pset)
1024 {
1025 return;
1026 }
1027
1028 #undef pset_reference
1029 void pset_reference(processor_set_t pset);
1030 void
1031 pset_reference(
1032 __unused processor_set_t pset)
1033 {
1034 return;
1035 }