]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 int tasks_count;
93 queue_head_t threads;
94 int threads_count;
95 decl_lck_mtx_data(,tasks_threads_lock)
96
97 processor_t processor_list;
98 unsigned int processor_count;
99 static processor_t processor_list_tail;
100 decl_simple_lock_data(,processor_list_lock)
101
102 uint32_t processor_avail_count;
103
104 processor_t master_processor;
105 int master_cpu = 0;
106
107 /* Forwards */
108 kern_return_t processor_set_things(
109 processor_set_t pset,
110 mach_port_t **thing_list,
111 mach_msg_type_number_t *count,
112 int type);
113
114 void
115 processor_bootstrap(void)
116 {
117 pset_init(&pset0, &pset_node0);
118 pset_node0.psets = &pset0;
119
120 simple_lock_init(&pset_node_lock, 0);
121
122 queue_init(&tasks);
123 queue_init(&threads);
124
125 simple_lock_init(&processor_list_lock, 0);
126
127 master_processor = cpu_to_processor(master_cpu);
128
129 processor_init(master_processor, master_cpu, &pset0);
130 }
131
132 /*
133 * Initialize the given processor for the cpu
134 * indicated by cpu_id, and assign to the
135 * specified processor set.
136 */
137 void
138 processor_init(
139 processor_t processor,
140 int cpu_id,
141 processor_set_t pset)
142 {
143 run_queue_init(&processor->runq);
144
145 processor->state = PROCESSOR_OFF_LINE;
146 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
147 processor->processor_set = pset;
148 processor->current_pri = MINPRI;
149 processor->cpu_id = cpu_id;
150 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
151 processor->deadline = UINT64_MAX;
152 processor->timeslice = 0;
153 processor->processor_meta = PROCESSOR_META_NULL;
154 processor->processor_self = IP_NULL;
155 processor_data_init(processor);
156 processor->processor_list = NULL;
157
158 pset_lock(pset);
159 if (pset->cpu_set_count++ == 0)
160 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
161 else {
162 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
163 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
164 }
165 pset_unlock(pset);
166
167 simple_lock(&processor_list_lock);
168 if (processor_list == NULL)
169 processor_list = processor;
170 else
171 processor_list_tail->processor_list = processor;
172 processor_list_tail = processor;
173 processor_count++;
174 simple_unlock(&processor_list_lock);
175 }
176
177 void
178 processor_meta_init(
179 processor_t processor,
180 processor_t primary)
181 {
182 processor_meta_t pmeta = primary->processor_meta;
183
184 if (pmeta == PROCESSOR_META_NULL) {
185 pmeta = kalloc(sizeof (*pmeta));
186
187 queue_init(&pmeta->idle_queue);
188
189 pmeta->primary = primary;
190 }
191
192 processor->processor_meta = pmeta;
193 }
194
195 processor_set_t
196 processor_pset(
197 processor_t processor)
198 {
199 return (processor->processor_set);
200 }
201
202 pset_node_t
203 pset_node_root(void)
204 {
205 return &pset_node0;
206 }
207
208 processor_set_t
209 pset_create(
210 pset_node_t node)
211 {
212 processor_set_t *prev, pset = kalloc(sizeof (*pset));
213
214 if (pset != PROCESSOR_SET_NULL) {
215 pset_init(pset, node);
216
217 simple_lock(&pset_node_lock);
218
219 prev = &node->psets;
220 while (*prev != PROCESSOR_SET_NULL)
221 prev = &(*prev)->pset_list;
222
223 *prev = pset;
224
225 simple_unlock(&pset_node_lock);
226 }
227
228 return (pset);
229 }
230
231 /*
232 * Initialize the given processor_set structure.
233 */
234 void
235 pset_init(
236 processor_set_t pset,
237 pset_node_t node)
238 {
239 queue_init(&pset->active_queue);
240 queue_init(&pset->idle_queue);
241 pset->processor_count = 0;
242 pset->low_pri = pset->low_count = PROCESSOR_NULL;
243 pset->cpu_set_low = pset->cpu_set_hi = 0;
244 pset->cpu_set_count = 0;
245 pset_lock_init(pset);
246 pset->pset_self = IP_NULL;
247 pset->pset_name_self = IP_NULL;
248 pset->pset_list = PROCESSOR_SET_NULL;
249 pset->node = node;
250 }
251
252 kern_return_t
253 processor_info_count(
254 processor_flavor_t flavor,
255 mach_msg_type_number_t *count)
256 {
257 switch (flavor) {
258
259 case PROCESSOR_BASIC_INFO:
260 *count = PROCESSOR_BASIC_INFO_COUNT;
261 break;
262
263 case PROCESSOR_CPU_LOAD_INFO:
264 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
265 break;
266
267 default:
268 return (cpu_info_count(flavor, count));
269 }
270
271 return (KERN_SUCCESS);
272 }
273
274
275 kern_return_t
276 processor_info(
277 register processor_t processor,
278 processor_flavor_t flavor,
279 host_t *host,
280 processor_info_t info,
281 mach_msg_type_number_t *count)
282 {
283 register int cpu_id, state;
284 kern_return_t result;
285
286 if (processor == PROCESSOR_NULL)
287 return (KERN_INVALID_ARGUMENT);
288
289 cpu_id = processor->cpu_id;
290
291 switch (flavor) {
292
293 case PROCESSOR_BASIC_INFO:
294 {
295 register processor_basic_info_t basic_info;
296
297 if (*count < PROCESSOR_BASIC_INFO_COUNT)
298 return (KERN_FAILURE);
299
300 basic_info = (processor_basic_info_t) info;
301 basic_info->cpu_type = slot_type(cpu_id);
302 basic_info->cpu_subtype = slot_subtype(cpu_id);
303 state = processor->state;
304 if (state == PROCESSOR_OFF_LINE)
305 basic_info->running = FALSE;
306 else
307 basic_info->running = TRUE;
308 basic_info->slot_num = cpu_id;
309 if (processor == master_processor)
310 basic_info->is_master = TRUE;
311 else
312 basic_info->is_master = FALSE;
313
314 *count = PROCESSOR_BASIC_INFO_COUNT;
315 *host = &realhost;
316
317 return (KERN_SUCCESS);
318 }
319
320 case PROCESSOR_CPU_LOAD_INFO:
321 {
322 register processor_cpu_load_info_t cpu_load_info;
323
324 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
325 return (KERN_FAILURE);
326
327 cpu_load_info = (processor_cpu_load_info_t) info;
328 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
329 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
330 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
331 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
332 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
333 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
334 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
335
336 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
337 *host = &realhost;
338
339 return (KERN_SUCCESS);
340 }
341
342 default:
343 result = cpu_info(flavor, cpu_id, info, count);
344 if (result == KERN_SUCCESS)
345 *host = &realhost;
346
347 return (result);
348 }
349 }
350
351 kern_return_t
352 processor_start(
353 processor_t processor)
354 {
355 processor_set_t pset;
356 thread_t thread;
357 kern_return_t result;
358 spl_t s;
359
360 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
361 return (KERN_INVALID_ARGUMENT);
362
363 if (processor == master_processor) {
364 processor_t prev;
365
366 prev = thread_bind(processor);
367 thread_block(THREAD_CONTINUE_NULL);
368
369 result = cpu_start(processor->cpu_id);
370
371 thread_bind(prev);
372
373 return (result);
374 }
375
376 s = splsched();
377 pset = processor->processor_set;
378 pset_lock(pset);
379 if (processor->state != PROCESSOR_OFF_LINE) {
380 pset_unlock(pset);
381 splx(s);
382
383 return (KERN_FAILURE);
384 }
385
386 processor->state = PROCESSOR_START;
387 pset_unlock(pset);
388 splx(s);
389
390 /*
391 * Create the idle processor thread.
392 */
393 if (processor->idle_thread == THREAD_NULL) {
394 result = idle_thread_create(processor);
395 if (result != KERN_SUCCESS) {
396 s = splsched();
397 pset_lock(pset);
398 processor->state = PROCESSOR_OFF_LINE;
399 pset_unlock(pset);
400 splx(s);
401
402 return (result);
403 }
404 }
405
406 /*
407 * If there is no active thread, the processor
408 * has never been started. Create a dedicated
409 * start up thread.
410 */
411 if ( processor->active_thread == THREAD_NULL &&
412 processor->next_thread == THREAD_NULL ) {
413 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
414 if (result != KERN_SUCCESS) {
415 s = splsched();
416 pset_lock(pset);
417 processor->state = PROCESSOR_OFF_LINE;
418 pset_unlock(pset);
419 splx(s);
420
421 return (result);
422 }
423
424 s = splsched();
425 thread_lock(thread);
426 thread->bound_processor = processor;
427 processor->next_thread = thread;
428 thread->state = TH_RUN;
429 thread_unlock(thread);
430 splx(s);
431
432 thread_deallocate(thread);
433 }
434
435 if (processor->processor_self == IP_NULL)
436 ipc_processor_init(processor);
437
438 result = cpu_start(processor->cpu_id);
439 if (result != KERN_SUCCESS) {
440 s = splsched();
441 pset_lock(pset);
442 processor->state = PROCESSOR_OFF_LINE;
443 pset_unlock(pset);
444 splx(s);
445
446 return (result);
447 }
448
449 ipc_processor_enable(processor);
450
451 return (KERN_SUCCESS);
452 }
453
454 kern_return_t
455 processor_exit(
456 processor_t processor)
457 {
458 if (processor == PROCESSOR_NULL)
459 return(KERN_INVALID_ARGUMENT);
460
461 return(processor_shutdown(processor));
462 }
463
464 kern_return_t
465 processor_control(
466 processor_t processor,
467 processor_info_t info,
468 mach_msg_type_number_t count)
469 {
470 if (processor == PROCESSOR_NULL)
471 return(KERN_INVALID_ARGUMENT);
472
473 return(cpu_control(processor->cpu_id, info, count));
474 }
475
476 kern_return_t
477 processor_set_create(
478 __unused host_t host,
479 __unused processor_set_t *new_set,
480 __unused processor_set_t *new_name)
481 {
482 return(KERN_FAILURE);
483 }
484
485 kern_return_t
486 processor_set_destroy(
487 __unused processor_set_t pset)
488 {
489 return(KERN_FAILURE);
490 }
491
492 kern_return_t
493 processor_get_assignment(
494 processor_t processor,
495 processor_set_t *pset)
496 {
497 int state;
498
499 state = processor->state;
500 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
501 return(KERN_FAILURE);
502
503 *pset = &pset0;
504
505 return(KERN_SUCCESS);
506 }
507
508 kern_return_t
509 processor_set_info(
510 processor_set_t pset,
511 int flavor,
512 host_t *host,
513 processor_set_info_t info,
514 mach_msg_type_number_t *count)
515 {
516 if (pset == PROCESSOR_SET_NULL)
517 return(KERN_INVALID_ARGUMENT);
518
519 if (flavor == PROCESSOR_SET_BASIC_INFO) {
520 register processor_set_basic_info_t basic_info;
521
522 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
523 return(KERN_FAILURE);
524
525 basic_info = (processor_set_basic_info_t) info;
526 basic_info->processor_count = processor_avail_count;
527 basic_info->default_policy = POLICY_TIMESHARE;
528
529 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
530 *host = &realhost;
531 return(KERN_SUCCESS);
532 }
533 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
534 register policy_timeshare_base_t ts_base;
535
536 if (*count < POLICY_TIMESHARE_BASE_COUNT)
537 return(KERN_FAILURE);
538
539 ts_base = (policy_timeshare_base_t) info;
540 ts_base->base_priority = BASEPRI_DEFAULT;
541
542 *count = POLICY_TIMESHARE_BASE_COUNT;
543 *host = &realhost;
544 return(KERN_SUCCESS);
545 }
546 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
547 register policy_fifo_base_t fifo_base;
548
549 if (*count < POLICY_FIFO_BASE_COUNT)
550 return(KERN_FAILURE);
551
552 fifo_base = (policy_fifo_base_t) info;
553 fifo_base->base_priority = BASEPRI_DEFAULT;
554
555 *count = POLICY_FIFO_BASE_COUNT;
556 *host = &realhost;
557 return(KERN_SUCCESS);
558 }
559 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
560 register policy_rr_base_t rr_base;
561
562 if (*count < POLICY_RR_BASE_COUNT)
563 return(KERN_FAILURE);
564
565 rr_base = (policy_rr_base_t) info;
566 rr_base->base_priority = BASEPRI_DEFAULT;
567 rr_base->quantum = 1;
568
569 *count = POLICY_RR_BASE_COUNT;
570 *host = &realhost;
571 return(KERN_SUCCESS);
572 }
573 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
574 register policy_timeshare_limit_t ts_limit;
575
576 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
577 return(KERN_FAILURE);
578
579 ts_limit = (policy_timeshare_limit_t) info;
580 ts_limit->max_priority = MAXPRI_KERNEL;
581
582 *count = POLICY_TIMESHARE_LIMIT_COUNT;
583 *host = &realhost;
584 return(KERN_SUCCESS);
585 }
586 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
587 register policy_fifo_limit_t fifo_limit;
588
589 if (*count < POLICY_FIFO_LIMIT_COUNT)
590 return(KERN_FAILURE);
591
592 fifo_limit = (policy_fifo_limit_t) info;
593 fifo_limit->max_priority = MAXPRI_KERNEL;
594
595 *count = POLICY_FIFO_LIMIT_COUNT;
596 *host = &realhost;
597 return(KERN_SUCCESS);
598 }
599 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
600 register policy_rr_limit_t rr_limit;
601
602 if (*count < POLICY_RR_LIMIT_COUNT)
603 return(KERN_FAILURE);
604
605 rr_limit = (policy_rr_limit_t) info;
606 rr_limit->max_priority = MAXPRI_KERNEL;
607
608 *count = POLICY_RR_LIMIT_COUNT;
609 *host = &realhost;
610 return(KERN_SUCCESS);
611 }
612 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
613 register int *enabled;
614
615 if (*count < (sizeof(*enabled)/sizeof(int)))
616 return(KERN_FAILURE);
617
618 enabled = (int *) info;
619 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
620
621 *count = sizeof(*enabled)/sizeof(int);
622 *host = &realhost;
623 return(KERN_SUCCESS);
624 }
625
626
627 *host = HOST_NULL;
628 return(KERN_INVALID_ARGUMENT);
629 }
630
631 /*
632 * processor_set_statistics
633 *
634 * Returns scheduling statistics for a processor set.
635 */
636 kern_return_t
637 processor_set_statistics(
638 processor_set_t pset,
639 int flavor,
640 processor_set_info_t info,
641 mach_msg_type_number_t *count)
642 {
643 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
644 return (KERN_INVALID_PROCESSOR_SET);
645
646 if (flavor == PROCESSOR_SET_LOAD_INFO) {
647 register processor_set_load_info_t load_info;
648
649 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
650 return(KERN_FAILURE);
651
652 load_info = (processor_set_load_info_t) info;
653
654 load_info->mach_factor = sched_mach_factor;
655 load_info->load_average = sched_load_average;
656
657 load_info->task_count = tasks_count;
658 load_info->thread_count = threads_count;
659
660 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
661 return(KERN_SUCCESS);
662 }
663
664 return(KERN_INVALID_ARGUMENT);
665 }
666
667 /*
668 * processor_set_max_priority:
669 *
670 * Specify max priority permitted on processor set. This affects
671 * newly created and assigned threads. Optionally change existing
672 * ones.
673 */
674 kern_return_t
675 processor_set_max_priority(
676 __unused processor_set_t pset,
677 __unused int max_priority,
678 __unused boolean_t change_threads)
679 {
680 return (KERN_INVALID_ARGUMENT);
681 }
682
683 /*
684 * processor_set_policy_enable:
685 *
686 * Allow indicated policy on processor set.
687 */
688
689 kern_return_t
690 processor_set_policy_enable(
691 __unused processor_set_t pset,
692 __unused int policy)
693 {
694 return (KERN_INVALID_ARGUMENT);
695 }
696
697 /*
698 * processor_set_policy_disable:
699 *
700 * Forbid indicated policy on processor set. Time sharing cannot
701 * be forbidden.
702 */
703 kern_return_t
704 processor_set_policy_disable(
705 __unused processor_set_t pset,
706 __unused int policy,
707 __unused boolean_t change_threads)
708 {
709 return (KERN_INVALID_ARGUMENT);
710 }
711
712 #define THING_TASK 0
713 #define THING_THREAD 1
714
715 /*
716 * processor_set_things:
717 *
718 * Common internals for processor_set_{threads,tasks}
719 */
720 kern_return_t
721 processor_set_things(
722 processor_set_t pset,
723 mach_port_t **thing_list,
724 mach_msg_type_number_t *count,
725 int type)
726 {
727 unsigned int actual; /* this many things */
728 unsigned int maxthings;
729 unsigned int i;
730
731 vm_size_t size, size_needed;
732 void *addr;
733
734 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
735 return (KERN_INVALID_ARGUMENT);
736
737 size = 0;
738 addr = NULL;
739
740 for (;;) {
741 lck_mtx_lock(&tasks_threads_lock);
742
743 if (type == THING_TASK)
744 maxthings = tasks_count;
745 else
746 maxthings = threads_count;
747
748 /* do we have the memory we need? */
749
750 size_needed = maxthings * sizeof (mach_port_t);
751 if (size_needed <= size)
752 break;
753
754 /* unlock and allocate more memory */
755 lck_mtx_unlock(&tasks_threads_lock);
756
757 if (size != 0)
758 kfree(addr, size);
759
760 assert(size_needed > 0);
761 size = size_needed;
762
763 addr = kalloc(size);
764 if (addr == 0)
765 return (KERN_RESOURCE_SHORTAGE);
766 }
767
768 /* OK, have memory and the list locked */
769
770 actual = 0;
771 switch (type) {
772
773 case THING_TASK: {
774 task_t task, *task_list = (task_t *)addr;
775
776 for (task = (task_t)queue_first(&tasks);
777 !queue_end(&tasks, (queue_entry_t)task);
778 task = (task_t)queue_next(&task->tasks)) {
779 #if defined(SECURE_KERNEL)
780 if (task != kernel_task) {
781 #endif
782 task_reference_internal(task);
783 task_list[actual++] = task;
784 #if defined(SECURE_KERNEL)
785 }
786 #endif
787 }
788
789 break;
790 }
791
792 case THING_THREAD: {
793 thread_t thread, *thread_list = (thread_t *)addr;
794
795 for (thread = (thread_t)queue_first(&threads);
796 !queue_end(&threads, (queue_entry_t)thread);
797 thread = (thread_t)queue_next(&thread->threads)) {
798 thread_reference_internal(thread);
799 thread_list[actual++] = thread;
800 }
801
802 break;
803 }
804
805 }
806
807 lck_mtx_unlock(&tasks_threads_lock);
808
809 if (actual < maxthings)
810 size_needed = actual * sizeof (mach_port_t);
811
812 if (actual == 0) {
813 /* no things, so return null pointer and deallocate memory */
814 *thing_list = NULL;
815 *count = 0;
816
817 if (size != 0)
818 kfree(addr, size);
819 }
820 else {
821 /* if we allocated too much, must copy */
822
823 if (size_needed < size) {
824 void *newaddr;
825
826 newaddr = kalloc(size_needed);
827 if (newaddr == 0) {
828 switch (type) {
829
830 case THING_TASK: {
831 task_t *task_list = (task_t *)addr;
832
833 for (i = 0; i < actual; i++)
834 task_deallocate(task_list[i]);
835 break;
836 }
837
838 case THING_THREAD: {
839 thread_t *thread_list = (thread_t *)addr;
840
841 for (i = 0; i < actual; i++)
842 thread_deallocate(thread_list[i]);
843 break;
844 }
845
846 }
847
848 kfree(addr, size);
849 return (KERN_RESOURCE_SHORTAGE);
850 }
851
852 bcopy((void *) addr, (void *) newaddr, size_needed);
853 kfree(addr, size);
854 addr = newaddr;
855 }
856
857 *thing_list = (mach_port_t *)addr;
858 *count = actual;
859
860 /* do the conversion that Mig should handle */
861
862 switch (type) {
863
864 case THING_TASK: {
865 task_t *task_list = (task_t *)addr;
866
867 for (i = 0; i < actual; i++)
868 (*thing_list)[i] = convert_task_to_port(task_list[i]);
869 break;
870 }
871
872 case THING_THREAD: {
873 thread_t *thread_list = (thread_t *)addr;
874
875 for (i = 0; i < actual; i++)
876 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
877 break;
878 }
879
880 }
881 }
882
883 return (KERN_SUCCESS);
884 }
885
886
887 /*
888 * processor_set_tasks:
889 *
890 * List all tasks in the processor set.
891 */
892 kern_return_t
893 processor_set_tasks(
894 processor_set_t pset,
895 task_array_t *task_list,
896 mach_msg_type_number_t *count)
897 {
898 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
899 }
900
901 /*
902 * processor_set_threads:
903 *
904 * List all threads in the processor set.
905 */
906 #if defined(SECURE_KERNEL)
907 kern_return_t
908 processor_set_threads(
909 __unused processor_set_t pset,
910 __unused thread_array_t *thread_list,
911 __unused mach_msg_type_number_t *count)
912 {
913 return KERN_FAILURE;
914 }
915 #elif defined(CONFIG_EMBEDDED)
916 kern_return_t
917 processor_set_threads(
918 __unused processor_set_t pset,
919 __unused thread_array_t *thread_list,
920 __unused mach_msg_type_number_t *count)
921 {
922 return KERN_NOT_SUPPORTED;
923 }
924 #else
925 kern_return_t
926 processor_set_threads(
927 processor_set_t pset,
928 thread_array_t *thread_list,
929 mach_msg_type_number_t *count)
930 {
931 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
932 }
933 #endif
934
935 /*
936 * processor_set_policy_control
937 *
938 * Controls the scheduling attributes governing the processor set.
939 * Allows control of enabled policies, and per-policy base and limit
940 * priorities.
941 */
942 kern_return_t
943 processor_set_policy_control(
944 __unused processor_set_t pset,
945 __unused int flavor,
946 __unused processor_set_info_t policy_info,
947 __unused mach_msg_type_number_t count,
948 __unused boolean_t change)
949 {
950 return (KERN_INVALID_ARGUMENT);
951 }
952
953 #undef pset_deallocate
954 void pset_deallocate(processor_set_t pset);
955 void
956 pset_deallocate(
957 __unused processor_set_t pset)
958 {
959 return;
960 }
961
962 #undef pset_reference
963 void pset_reference(processor_set_t pset);
964 void
965 pset_reference(
966 __unused processor_set_t pset)
967 {
968 return;
969 }