]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
ead95b882b2fb689b4c5363e881b7ff540d99181
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
93 int tasks_count;
94 queue_head_t threads;
95 int threads_count;
96 decl_lck_mtx_data(,tasks_threads_lock)
97
98 processor_t processor_list;
99 unsigned int processor_count;
100 static processor_t processor_list_tail;
101 decl_simple_lock_data(,processor_list_lock)
102
103 uint32_t processor_avail_count;
104
105 processor_t master_processor;
106 int master_cpu = 0;
107 boolean_t sched_stats_active = FALSE;
108
109 /* Forwards */
110 kern_return_t processor_set_things(
111 processor_set_t pset,
112 mach_port_t **thing_list,
113 mach_msg_type_number_t *count,
114 int type);
115
116 void
117 processor_bootstrap(void)
118 {
119 pset_init(&pset0, &pset_node0);
120 pset_node0.psets = &pset0;
121
122 simple_lock_init(&pset_node_lock, 0);
123
124 queue_init(&tasks);
125 queue_init(&terminated_tasks);
126 queue_init(&threads);
127
128 simple_lock_init(&processor_list_lock, 0);
129
130 master_processor = cpu_to_processor(master_cpu);
131
132 processor_init(master_processor, master_cpu, &pset0);
133 }
134
135 /*
136 * Initialize the given processor for the cpu
137 * indicated by cpu_id, and assign to the
138 * specified processor set.
139 */
140 void
141 processor_init(
142 processor_t processor,
143 int cpu_id,
144 processor_set_t pset)
145 {
146 spl_t s;
147
148 if (processor != master_processor) {
149 /* Scheduler state deferred until sched_init() */
150 SCHED(processor_init)(processor);
151 }
152
153 processor->state = PROCESSOR_OFF_LINE;
154 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
155 processor->processor_set = pset;
156 processor->current_pri = MINPRI;
157 processor->current_thmode = TH_MODE_NONE;
158 processor->cpu_id = cpu_id;
159 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
160 processor->deadline = UINT64_MAX;
161 processor->timeslice = 0;
162 processor->processor_meta = PROCESSOR_META_NULL;
163 processor->processor_self = IP_NULL;
164 processor_data_init(processor);
165 processor->processor_list = NULL;
166
167 s = splsched();
168 pset_lock(pset);
169 if (pset->cpu_set_count++ == 0)
170 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
171 else {
172 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
173 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
174 }
175 pset_unlock(pset);
176 splx(s);
177
178 simple_lock(&processor_list_lock);
179 if (processor_list == NULL)
180 processor_list = processor;
181 else
182 processor_list_tail->processor_list = processor;
183 processor_list_tail = processor;
184 processor_count++;
185 simple_unlock(&processor_list_lock);
186 }
187
188 void
189 processor_meta_init(
190 processor_t processor,
191 processor_t primary)
192 {
193 processor_meta_t pmeta = primary->processor_meta;
194
195 if (pmeta == PROCESSOR_META_NULL) {
196 pmeta = kalloc(sizeof (*pmeta));
197
198 queue_init(&pmeta->idle_queue);
199
200 pmeta->primary = primary;
201 }
202
203 processor->processor_meta = pmeta;
204 }
205
206 processor_set_t
207 processor_pset(
208 processor_t processor)
209 {
210 return (processor->processor_set);
211 }
212
213 pset_node_t
214 pset_node_root(void)
215 {
216 return &pset_node0;
217 }
218
219 processor_set_t
220 pset_create(
221 pset_node_t node)
222 {
223 processor_set_t *prev, pset = kalloc(sizeof (*pset));
224
225 if (pset != PROCESSOR_SET_NULL) {
226 pset_init(pset, node);
227
228 simple_lock(&pset_node_lock);
229
230 prev = &node->psets;
231 while (*prev != PROCESSOR_SET_NULL)
232 prev = &(*prev)->pset_list;
233
234 *prev = pset;
235
236 simple_unlock(&pset_node_lock);
237 }
238
239 return (pset);
240 }
241
242 /*
243 * Initialize the given processor_set structure.
244 */
245 void
246 pset_init(
247 processor_set_t pset,
248 pset_node_t node)
249 {
250 if (pset != &pset0) {
251 /* Scheduler state deferred until sched_init() */
252 SCHED(pset_init)(pset);
253 }
254
255 queue_init(&pset->active_queue);
256 queue_init(&pset->idle_queue);
257 pset->online_processor_count = 0;
258 pset_pri_init_hint(pset, PROCESSOR_NULL);
259 pset_count_init_hint(pset, PROCESSOR_NULL);
260 pset->cpu_set_low = pset->cpu_set_hi = 0;
261 pset->cpu_set_count = 0;
262 pset_lock_init(pset);
263 pset->pset_self = IP_NULL;
264 pset->pset_name_self = IP_NULL;
265 pset->pset_list = PROCESSOR_SET_NULL;
266 pset->node = node;
267 }
268
269 kern_return_t
270 processor_info_count(
271 processor_flavor_t flavor,
272 mach_msg_type_number_t *count)
273 {
274 switch (flavor) {
275
276 case PROCESSOR_BASIC_INFO:
277 *count = PROCESSOR_BASIC_INFO_COUNT;
278 break;
279
280 case PROCESSOR_CPU_LOAD_INFO:
281 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
282 break;
283
284 default:
285 return (cpu_info_count(flavor, count));
286 }
287
288 return (KERN_SUCCESS);
289 }
290
291
292 kern_return_t
293 processor_info(
294 register processor_t processor,
295 processor_flavor_t flavor,
296 host_t *host,
297 processor_info_t info,
298 mach_msg_type_number_t *count)
299 {
300 register int cpu_id, state;
301 kern_return_t result;
302
303 if (processor == PROCESSOR_NULL)
304 return (KERN_INVALID_ARGUMENT);
305
306 cpu_id = processor->cpu_id;
307
308 switch (flavor) {
309
310 case PROCESSOR_BASIC_INFO:
311 {
312 register processor_basic_info_t basic_info;
313
314 if (*count < PROCESSOR_BASIC_INFO_COUNT)
315 return (KERN_FAILURE);
316
317 basic_info = (processor_basic_info_t) info;
318 basic_info->cpu_type = slot_type(cpu_id);
319 basic_info->cpu_subtype = slot_subtype(cpu_id);
320 state = processor->state;
321 if (state == PROCESSOR_OFF_LINE)
322 basic_info->running = FALSE;
323 else
324 basic_info->running = TRUE;
325 basic_info->slot_num = cpu_id;
326 if (processor == master_processor)
327 basic_info->is_master = TRUE;
328 else
329 basic_info->is_master = FALSE;
330
331 *count = PROCESSOR_BASIC_INFO_COUNT;
332 *host = &realhost;
333
334 return (KERN_SUCCESS);
335 }
336
337 case PROCESSOR_CPU_LOAD_INFO:
338 {
339 processor_cpu_load_info_t cpu_load_info;
340 timer_data_t idle_temp;
341 timer_t idle_state;
342
343 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
344 return (KERN_FAILURE);
345
346 cpu_load_info = (processor_cpu_load_info_t) info;
347 if (precise_user_kernel_time) {
348 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
349 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
350 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
351 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
352 } else {
353 uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
354 timer_grab(&PROCESSOR_DATA(processor, system_state));
355
356 cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
357 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
358 }
359
360 idle_state = &PROCESSOR_DATA(processor, idle_state);
361 idle_temp = *idle_state;
362
363 if (PROCESSOR_DATA(processor, current_state) != idle_state ||
364 timer_grab(&idle_temp) != timer_grab(idle_state)) {
365 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
366 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
367 } else {
368 timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);
369
370 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
371 (uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
372 }
373
374 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
375
376 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
377 *host = &realhost;
378
379 return (KERN_SUCCESS);
380 }
381
382 default:
383 result = cpu_info(flavor, cpu_id, info, count);
384 if (result == KERN_SUCCESS)
385 *host = &realhost;
386
387 return (result);
388 }
389 }
390
391 kern_return_t
392 processor_start(
393 processor_t processor)
394 {
395 processor_set_t pset;
396 thread_t thread;
397 kern_return_t result;
398 spl_t s;
399
400 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
401 return (KERN_INVALID_ARGUMENT);
402
403 if (processor == master_processor) {
404 processor_t prev;
405
406 prev = thread_bind(processor);
407 thread_block(THREAD_CONTINUE_NULL);
408
409 result = cpu_start(processor->cpu_id);
410
411 thread_bind(prev);
412
413 return (result);
414 }
415
416 s = splsched();
417 pset = processor->processor_set;
418 pset_lock(pset);
419 if (processor->state != PROCESSOR_OFF_LINE) {
420 pset_unlock(pset);
421 splx(s);
422
423 return (KERN_FAILURE);
424 }
425
426 processor->state = PROCESSOR_START;
427 pset_unlock(pset);
428 splx(s);
429
430 /*
431 * Create the idle processor thread.
432 */
433 if (processor->idle_thread == THREAD_NULL) {
434 result = idle_thread_create(processor);
435 if (result != KERN_SUCCESS) {
436 s = splsched();
437 pset_lock(pset);
438 processor->state = PROCESSOR_OFF_LINE;
439 pset_unlock(pset);
440 splx(s);
441
442 return (result);
443 }
444 }
445
446 /*
447 * If there is no active thread, the processor
448 * has never been started. Create a dedicated
449 * start up thread.
450 */
451 if ( processor->active_thread == THREAD_NULL &&
452 processor->next_thread == THREAD_NULL ) {
453 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
454 if (result != KERN_SUCCESS) {
455 s = splsched();
456 pset_lock(pset);
457 processor->state = PROCESSOR_OFF_LINE;
458 pset_unlock(pset);
459 splx(s);
460
461 return (result);
462 }
463
464 s = splsched();
465 thread_lock(thread);
466 thread->bound_processor = processor;
467 processor->next_thread = thread;
468 thread->state = TH_RUN;
469 thread_unlock(thread);
470 splx(s);
471
472 thread_deallocate(thread);
473 }
474
475 if (processor->processor_self == IP_NULL)
476 ipc_processor_init(processor);
477
478 result = cpu_start(processor->cpu_id);
479 if (result != KERN_SUCCESS) {
480 s = splsched();
481 pset_lock(pset);
482 processor->state = PROCESSOR_OFF_LINE;
483 pset_unlock(pset);
484 splx(s);
485
486 return (result);
487 }
488
489 ipc_processor_enable(processor);
490
491 return (KERN_SUCCESS);
492 }
493
494 kern_return_t
495 processor_exit(
496 processor_t processor)
497 {
498 if (processor == PROCESSOR_NULL)
499 return(KERN_INVALID_ARGUMENT);
500
501 return(processor_shutdown(processor));
502 }
503
504 kern_return_t
505 processor_control(
506 processor_t processor,
507 processor_info_t info,
508 mach_msg_type_number_t count)
509 {
510 if (processor == PROCESSOR_NULL)
511 return(KERN_INVALID_ARGUMENT);
512
513 return(cpu_control(processor->cpu_id, info, count));
514 }
515
516 kern_return_t
517 processor_set_create(
518 __unused host_t host,
519 __unused processor_set_t *new_set,
520 __unused processor_set_t *new_name)
521 {
522 return(KERN_FAILURE);
523 }
524
525 kern_return_t
526 processor_set_destroy(
527 __unused processor_set_t pset)
528 {
529 return(KERN_FAILURE);
530 }
531
532 kern_return_t
533 processor_get_assignment(
534 processor_t processor,
535 processor_set_t *pset)
536 {
537 int state;
538
539 if (processor == PROCESSOR_NULL)
540 return(KERN_INVALID_ARGUMENT);
541
542 state = processor->state;
543 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
544 return(KERN_FAILURE);
545
546 *pset = &pset0;
547
548 return(KERN_SUCCESS);
549 }
550
551 kern_return_t
552 processor_set_info(
553 processor_set_t pset,
554 int flavor,
555 host_t *host,
556 processor_set_info_t info,
557 mach_msg_type_number_t *count)
558 {
559 if (pset == PROCESSOR_SET_NULL)
560 return(KERN_INVALID_ARGUMENT);
561
562 if (flavor == PROCESSOR_SET_BASIC_INFO) {
563 register processor_set_basic_info_t basic_info;
564
565 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
566 return(KERN_FAILURE);
567
568 basic_info = (processor_set_basic_info_t) info;
569 basic_info->processor_count = processor_avail_count;
570 basic_info->default_policy = POLICY_TIMESHARE;
571
572 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
573 *host = &realhost;
574 return(KERN_SUCCESS);
575 }
576 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
577 register policy_timeshare_base_t ts_base;
578
579 if (*count < POLICY_TIMESHARE_BASE_COUNT)
580 return(KERN_FAILURE);
581
582 ts_base = (policy_timeshare_base_t) info;
583 ts_base->base_priority = BASEPRI_DEFAULT;
584
585 *count = POLICY_TIMESHARE_BASE_COUNT;
586 *host = &realhost;
587 return(KERN_SUCCESS);
588 }
589 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
590 register policy_fifo_base_t fifo_base;
591
592 if (*count < POLICY_FIFO_BASE_COUNT)
593 return(KERN_FAILURE);
594
595 fifo_base = (policy_fifo_base_t) info;
596 fifo_base->base_priority = BASEPRI_DEFAULT;
597
598 *count = POLICY_FIFO_BASE_COUNT;
599 *host = &realhost;
600 return(KERN_SUCCESS);
601 }
602 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
603 register policy_rr_base_t rr_base;
604
605 if (*count < POLICY_RR_BASE_COUNT)
606 return(KERN_FAILURE);
607
608 rr_base = (policy_rr_base_t) info;
609 rr_base->base_priority = BASEPRI_DEFAULT;
610 rr_base->quantum = 1;
611
612 *count = POLICY_RR_BASE_COUNT;
613 *host = &realhost;
614 return(KERN_SUCCESS);
615 }
616 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
617 register policy_timeshare_limit_t ts_limit;
618
619 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
620 return(KERN_FAILURE);
621
622 ts_limit = (policy_timeshare_limit_t) info;
623 ts_limit->max_priority = MAXPRI_KERNEL;
624
625 *count = POLICY_TIMESHARE_LIMIT_COUNT;
626 *host = &realhost;
627 return(KERN_SUCCESS);
628 }
629 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
630 register policy_fifo_limit_t fifo_limit;
631
632 if (*count < POLICY_FIFO_LIMIT_COUNT)
633 return(KERN_FAILURE);
634
635 fifo_limit = (policy_fifo_limit_t) info;
636 fifo_limit->max_priority = MAXPRI_KERNEL;
637
638 *count = POLICY_FIFO_LIMIT_COUNT;
639 *host = &realhost;
640 return(KERN_SUCCESS);
641 }
642 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
643 register policy_rr_limit_t rr_limit;
644
645 if (*count < POLICY_RR_LIMIT_COUNT)
646 return(KERN_FAILURE);
647
648 rr_limit = (policy_rr_limit_t) info;
649 rr_limit->max_priority = MAXPRI_KERNEL;
650
651 *count = POLICY_RR_LIMIT_COUNT;
652 *host = &realhost;
653 return(KERN_SUCCESS);
654 }
655 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
656 register int *enabled;
657
658 if (*count < (sizeof(*enabled)/sizeof(int)))
659 return(KERN_FAILURE);
660
661 enabled = (int *) info;
662 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
663
664 *count = sizeof(*enabled)/sizeof(int);
665 *host = &realhost;
666 return(KERN_SUCCESS);
667 }
668
669
670 *host = HOST_NULL;
671 return(KERN_INVALID_ARGUMENT);
672 }
673
674 /*
675 * processor_set_statistics
676 *
677 * Returns scheduling statistics for a processor set.
678 */
679 kern_return_t
680 processor_set_statistics(
681 processor_set_t pset,
682 int flavor,
683 processor_set_info_t info,
684 mach_msg_type_number_t *count)
685 {
686 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
687 return (KERN_INVALID_PROCESSOR_SET);
688
689 if (flavor == PROCESSOR_SET_LOAD_INFO) {
690 register processor_set_load_info_t load_info;
691
692 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
693 return(KERN_FAILURE);
694
695 load_info = (processor_set_load_info_t) info;
696
697 load_info->mach_factor = sched_mach_factor;
698 load_info->load_average = sched_load_average;
699
700 load_info->task_count = tasks_count;
701 load_info->thread_count = threads_count;
702
703 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
704 return(KERN_SUCCESS);
705 }
706
707 return(KERN_INVALID_ARGUMENT);
708 }
709
710 /*
711 * processor_set_max_priority:
712 *
713 * Specify max priority permitted on processor set. This affects
714 * newly created and assigned threads. Optionally change existing
715 * ones.
716 */
717 kern_return_t
718 processor_set_max_priority(
719 __unused processor_set_t pset,
720 __unused int max_priority,
721 __unused boolean_t change_threads)
722 {
723 return (KERN_INVALID_ARGUMENT);
724 }
725
726 /*
727 * processor_set_policy_enable:
728 *
729 * Allow indicated policy on processor set.
730 */
731
732 kern_return_t
733 processor_set_policy_enable(
734 __unused processor_set_t pset,
735 __unused int policy)
736 {
737 return (KERN_INVALID_ARGUMENT);
738 }
739
740 /*
741 * processor_set_policy_disable:
742 *
743 * Forbid indicated policy on processor set. Time sharing cannot
744 * be forbidden.
745 */
746 kern_return_t
747 processor_set_policy_disable(
748 __unused processor_set_t pset,
749 __unused int policy,
750 __unused boolean_t change_threads)
751 {
752 return (KERN_INVALID_ARGUMENT);
753 }
754
755 #define THING_TASK 0
756 #define THING_THREAD 1
757
758 /*
759 * processor_set_things:
760 *
761 * Common internals for processor_set_{threads,tasks}
762 */
763 kern_return_t
764 processor_set_things(
765 processor_set_t pset,
766 mach_port_t **thing_list,
767 mach_msg_type_number_t *count,
768 int type)
769 {
770 unsigned int actual; /* this many things */
771 unsigned int maxthings;
772 unsigned int i;
773
774 vm_size_t size, size_needed;
775 void *addr;
776
777 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
778 return (KERN_INVALID_ARGUMENT);
779
780 size = 0;
781 addr = NULL;
782
783 for (;;) {
784 lck_mtx_lock(&tasks_threads_lock);
785
786 if (type == THING_TASK)
787 maxthings = tasks_count;
788 else
789 maxthings = threads_count;
790
791 /* do we have the memory we need? */
792
793 size_needed = maxthings * sizeof (mach_port_t);
794 if (size_needed <= size)
795 break;
796
797 /* unlock and allocate more memory */
798 lck_mtx_unlock(&tasks_threads_lock);
799
800 if (size != 0)
801 kfree(addr, size);
802
803 assert(size_needed > 0);
804 size = size_needed;
805
806 addr = kalloc(size);
807 if (addr == 0)
808 return (KERN_RESOURCE_SHORTAGE);
809 }
810
811 /* OK, have memory and the list locked */
812
813 actual = 0;
814 switch (type) {
815
816 case THING_TASK: {
817 task_t task, *task_list = (task_t *)addr;
818
819 for (task = (task_t)queue_first(&tasks);
820 !queue_end(&tasks, (queue_entry_t)task);
821 task = (task_t)queue_next(&task->tasks)) {
822 #if defined(SECURE_KERNEL)
823 if (task != kernel_task) {
824 #endif
825 task_reference_internal(task);
826 task_list[actual++] = task;
827 #if defined(SECURE_KERNEL)
828 }
829 #endif
830 }
831
832 break;
833 }
834
835 case THING_THREAD: {
836 thread_t thread, *thread_list = (thread_t *)addr;
837
838 for (thread = (thread_t)queue_first(&threads);
839 !queue_end(&threads, (queue_entry_t)thread);
840 thread = (thread_t)queue_next(&thread->threads)) {
841 thread_reference_internal(thread);
842 thread_list[actual++] = thread;
843 }
844
845 break;
846 }
847
848 }
849
850 lck_mtx_unlock(&tasks_threads_lock);
851
852 if (actual < maxthings)
853 size_needed = actual * sizeof (mach_port_t);
854
855 if (actual == 0) {
856 /* no things, so return null pointer and deallocate memory */
857 *thing_list = NULL;
858 *count = 0;
859
860 if (size != 0)
861 kfree(addr, size);
862 }
863 else {
864 /* if we allocated too much, must copy */
865
866 if (size_needed < size) {
867 void *newaddr;
868
869 newaddr = kalloc(size_needed);
870 if (newaddr == 0) {
871 switch (type) {
872
873 case THING_TASK: {
874 task_t *task_list = (task_t *)addr;
875
876 for (i = 0; i < actual; i++)
877 task_deallocate(task_list[i]);
878 break;
879 }
880
881 case THING_THREAD: {
882 thread_t *thread_list = (thread_t *)addr;
883
884 for (i = 0; i < actual; i++)
885 thread_deallocate(thread_list[i]);
886 break;
887 }
888
889 }
890
891 kfree(addr, size);
892 return (KERN_RESOURCE_SHORTAGE);
893 }
894
895 bcopy((void *) addr, (void *) newaddr, size_needed);
896 kfree(addr, size);
897 addr = newaddr;
898 }
899
900 *thing_list = (mach_port_t *)addr;
901 *count = actual;
902
903 /* do the conversion that Mig should handle */
904
905 switch (type) {
906
907 case THING_TASK: {
908 task_t *task_list = (task_t *)addr;
909
910 for (i = 0; i < actual; i++)
911 (*thing_list)[i] = convert_task_to_port(task_list[i]);
912 break;
913 }
914
915 case THING_THREAD: {
916 thread_t *thread_list = (thread_t *)addr;
917
918 for (i = 0; i < actual; i++)
919 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
920 break;
921 }
922
923 }
924 }
925
926 return (KERN_SUCCESS);
927 }
928
929
930 /*
931 * processor_set_tasks:
932 *
933 * List all tasks in the processor set.
934 */
935 kern_return_t
936 processor_set_tasks(
937 processor_set_t pset,
938 task_array_t *task_list,
939 mach_msg_type_number_t *count)
940 {
941 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
942 }
943
944 /*
945 * processor_set_threads:
946 *
947 * List all threads in the processor set.
948 */
949 #if defined(SECURE_KERNEL)
950 kern_return_t
951 processor_set_threads(
952 __unused processor_set_t pset,
953 __unused thread_array_t *thread_list,
954 __unused mach_msg_type_number_t *count)
955 {
956 return KERN_FAILURE;
957 }
958 #elif defined(CONFIG_EMBEDDED)
959 kern_return_t
960 processor_set_threads(
961 __unused processor_set_t pset,
962 __unused thread_array_t *thread_list,
963 __unused mach_msg_type_number_t *count)
964 {
965 return KERN_NOT_SUPPORTED;
966 }
967 #else
968 kern_return_t
969 processor_set_threads(
970 processor_set_t pset,
971 thread_array_t *thread_list,
972 mach_msg_type_number_t *count)
973 {
974 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
975 }
976 #endif
977
978 /*
979 * processor_set_policy_control
980 *
981 * Controls the scheduling attributes governing the processor set.
982 * Allows control of enabled policies, and per-policy base and limit
983 * priorities.
984 */
985 kern_return_t
986 processor_set_policy_control(
987 __unused processor_set_t pset,
988 __unused int flavor,
989 __unused processor_set_info_t policy_info,
990 __unused mach_msg_type_number_t count,
991 __unused boolean_t change)
992 {
993 return (KERN_INVALID_ARGUMENT);
994 }
995
996 #undef pset_deallocate
997 void pset_deallocate(processor_set_t pset);
998 void
999 pset_deallocate(
1000 __unused processor_set_t pset)
1001 {
1002 return;
1003 }
1004
1005 #undef pset_reference
1006 void pset_reference(processor_set_t pset);
1007 void
1008 pset_reference(
1009 __unused processor_set_t pset)
1010 {
1011 return;
1012 }