]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
3410697249f5369eaedac9c4073475550f0fe061
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 int tasks_count;
93 queue_head_t threads;
94 int threads_count;
95 decl_lck_mtx_data(,tasks_threads_lock)
96
97 processor_t processor_list;
98 unsigned int processor_count;
99 static processor_t processor_list_tail;
100 decl_simple_lock_data(,processor_list_lock)
101
102 uint32_t processor_avail_count;
103
104 processor_t master_processor;
105 int master_cpu = 0;
106
107 /* Forwards */
108 kern_return_t processor_set_things(
109 processor_set_t pset,
110 mach_port_t **thing_list,
111 mach_msg_type_number_t *count,
112 int type);
113
114 void
115 processor_bootstrap(void)
116 {
117 pset_init(&pset0, &pset_node0);
118 pset_node0.psets = &pset0;
119
120 simple_lock_init(&pset_node_lock, 0);
121
122 queue_init(&tasks);
123 queue_init(&threads);
124
125 simple_lock_init(&processor_list_lock, 0);
126
127 master_processor = cpu_to_processor(master_cpu);
128
129 processor_init(master_processor, master_cpu, &pset0);
130 }
131
132 /*
133 * Initialize the given processor for the cpu
134 * indicated by cpu_id, and assign to the
135 * specified processor set.
136 */
137 void
138 processor_init(
139 processor_t processor,
140 int cpu_id,
141 processor_set_t pset)
142 {
143 run_queue_init(&processor->runq);
144
145 processor->state = PROCESSOR_OFF_LINE;
146 processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
147 processor->processor_set = pset;
148 processor->current_pri = MINPRI;
149 processor->cpu_id = cpu_id;
150 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
151 processor->deadline = UINT64_MAX;
152 processor->timeslice = 0;
153 processor->processor_meta = PROCESSOR_META_NULL;
154 processor->processor_self = IP_NULL;
155 processor_data_init(processor);
156 processor->processor_list = NULL;
157
158 simple_lock(&processor_list_lock);
159 if (processor_list == NULL)
160 processor_list = processor;
161 else
162 processor_list_tail->processor_list = processor;
163 processor_list_tail = processor;
164 processor_count++;
165 simple_unlock(&processor_list_lock);
166 }
167
168 void
169 processor_meta_init(
170 processor_t processor,
171 processor_t primary)
172 {
173 processor_meta_t pmeta = primary->processor_meta;
174
175 if (pmeta == PROCESSOR_META_NULL) {
176 pmeta = kalloc(sizeof (*pmeta));
177
178 queue_init(&pmeta->idle_queue);
179
180 pmeta->primary = primary;
181 }
182
183 processor->processor_meta = pmeta;
184 }
185
186 processor_set_t
187 processor_pset(
188 processor_t processor)
189 {
190 return (processor->processor_set);
191 }
192
193 pset_node_t
194 pset_node_root(void)
195 {
196 return &pset_node0;
197 }
198
199 processor_set_t
200 pset_create(
201 pset_node_t node)
202 {
203 processor_set_t *prev, pset = kalloc(sizeof (*pset));
204
205 if (pset != PROCESSOR_SET_NULL) {
206 pset_init(pset, node);
207
208 simple_lock(&pset_node_lock);
209
210 prev = &node->psets;
211 while (*prev != PROCESSOR_SET_NULL)
212 prev = &(*prev)->pset_list;
213
214 *prev = pset;
215
216 simple_unlock(&pset_node_lock);
217 }
218
219 return (pset);
220 }
221
222 /*
223 * Initialize the given processor_set structure.
224 */
225 void
226 pset_init(
227 processor_set_t pset,
228 pset_node_t node)
229 {
230 queue_init(&pset->active_queue);
231 queue_init(&pset->idle_queue);
232 pset->processor_count = 0;
233 pset->low_pri = pset->low_count = PROCESSOR_NULL;
234 pset_lock_init(pset);
235 pset->pset_self = IP_NULL;
236 pset->pset_name_self = IP_NULL;
237 pset->pset_list = PROCESSOR_SET_NULL;
238 pset->node = node;
239 }
240
241 kern_return_t
242 processor_info_count(
243 processor_flavor_t flavor,
244 mach_msg_type_number_t *count)
245 {
246 switch (flavor) {
247
248 case PROCESSOR_BASIC_INFO:
249 *count = PROCESSOR_BASIC_INFO_COUNT;
250 break;
251
252 case PROCESSOR_CPU_LOAD_INFO:
253 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
254 break;
255
256 default:
257 return (cpu_info_count(flavor, count));
258 }
259
260 return (KERN_SUCCESS);
261 }
262
263
264 kern_return_t
265 processor_info(
266 register processor_t processor,
267 processor_flavor_t flavor,
268 host_t *host,
269 processor_info_t info,
270 mach_msg_type_number_t *count)
271 {
272 register int cpu_id, state;
273 kern_return_t result;
274
275 if (processor == PROCESSOR_NULL)
276 return (KERN_INVALID_ARGUMENT);
277
278 cpu_id = processor->cpu_id;
279
280 switch (flavor) {
281
282 case PROCESSOR_BASIC_INFO:
283 {
284 register processor_basic_info_t basic_info;
285
286 if (*count < PROCESSOR_BASIC_INFO_COUNT)
287 return (KERN_FAILURE);
288
289 basic_info = (processor_basic_info_t) info;
290 basic_info->cpu_type = slot_type(cpu_id);
291 basic_info->cpu_subtype = slot_subtype(cpu_id);
292 state = processor->state;
293 if (state == PROCESSOR_OFF_LINE)
294 basic_info->running = FALSE;
295 else
296 basic_info->running = TRUE;
297 basic_info->slot_num = cpu_id;
298 if (processor == master_processor)
299 basic_info->is_master = TRUE;
300 else
301 basic_info->is_master = FALSE;
302
303 *count = PROCESSOR_BASIC_INFO_COUNT;
304 *host = &realhost;
305
306 return (KERN_SUCCESS);
307 }
308
309 case PROCESSOR_CPU_LOAD_INFO:
310 {
311 register processor_cpu_load_info_t cpu_load_info;
312
313 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
314 return (KERN_FAILURE);
315
316 cpu_load_info = (processor_cpu_load_info_t) info;
317 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
318 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
319 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
320 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
321 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
322 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
323 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
324
325 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
326 *host = &realhost;
327
328 return (KERN_SUCCESS);
329 }
330
331 default:
332 result = cpu_info(flavor, cpu_id, info, count);
333 if (result == KERN_SUCCESS)
334 *host = &realhost;
335
336 return (result);
337 }
338 }
339
340 kern_return_t
341 processor_start(
342 processor_t processor)
343 {
344 processor_set_t pset;
345 thread_t thread;
346 kern_return_t result;
347 spl_t s;
348
349 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
350 return (KERN_INVALID_ARGUMENT);
351
352 if (processor == master_processor) {
353 processor_t prev;
354
355 prev = thread_bind(processor);
356 thread_block(THREAD_CONTINUE_NULL);
357
358 result = cpu_start(processor->cpu_id);
359
360 thread_bind(prev);
361
362 return (result);
363 }
364
365 s = splsched();
366 pset = processor->processor_set;
367 pset_lock(pset);
368 if (processor->state != PROCESSOR_OFF_LINE) {
369 pset_unlock(pset);
370 splx(s);
371
372 return (KERN_FAILURE);
373 }
374
375 processor->state = PROCESSOR_START;
376 pset_unlock(pset);
377 splx(s);
378
379 /*
380 * Create the idle processor thread.
381 */
382 if (processor->idle_thread == THREAD_NULL) {
383 result = idle_thread_create(processor);
384 if (result != KERN_SUCCESS) {
385 s = splsched();
386 pset_lock(pset);
387 processor->state = PROCESSOR_OFF_LINE;
388 pset_unlock(pset);
389 splx(s);
390
391 return (result);
392 }
393 }
394
395 /*
396 * If there is no active thread, the processor
397 * has never been started. Create a dedicated
398 * start up thread.
399 */
400 if ( processor->active_thread == THREAD_NULL &&
401 processor->next_thread == THREAD_NULL ) {
402 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
403 if (result != KERN_SUCCESS) {
404 s = splsched();
405 pset_lock(pset);
406 processor->state = PROCESSOR_OFF_LINE;
407 pset_unlock(pset);
408 splx(s);
409
410 return (result);
411 }
412
413 s = splsched();
414 thread_lock(thread);
415 thread->bound_processor = processor;
416 processor->next_thread = thread;
417 thread->state = TH_RUN;
418 thread_unlock(thread);
419 splx(s);
420
421 thread_deallocate(thread);
422 }
423
424 if (processor->processor_self == IP_NULL)
425 ipc_processor_init(processor);
426
427 result = cpu_start(processor->cpu_id);
428 if (result != KERN_SUCCESS) {
429 s = splsched();
430 pset_lock(pset);
431 processor->state = PROCESSOR_OFF_LINE;
432 pset_unlock(pset);
433 splx(s);
434
435 return (result);
436 }
437
438 ipc_processor_enable(processor);
439
440 return (KERN_SUCCESS);
441 }
442
443 kern_return_t
444 processor_exit(
445 processor_t processor)
446 {
447 if (processor == PROCESSOR_NULL)
448 return(KERN_INVALID_ARGUMENT);
449
450 return(processor_shutdown(processor));
451 }
452
453 kern_return_t
454 processor_control(
455 processor_t processor,
456 processor_info_t info,
457 mach_msg_type_number_t count)
458 {
459 if (processor == PROCESSOR_NULL)
460 return(KERN_INVALID_ARGUMENT);
461
462 return(cpu_control(processor->cpu_id, info, count));
463 }
464
465 kern_return_t
466 processor_set_create(
467 __unused host_t host,
468 __unused processor_set_t *new_set,
469 __unused processor_set_t *new_name)
470 {
471 return(KERN_FAILURE);
472 }
473
474 kern_return_t
475 processor_set_destroy(
476 __unused processor_set_t pset)
477 {
478 return(KERN_FAILURE);
479 }
480
481 kern_return_t
482 processor_get_assignment(
483 processor_t processor,
484 processor_set_t *pset)
485 {
486 int state;
487
488 state = processor->state;
489 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
490 return(KERN_FAILURE);
491
492 *pset = &pset0;
493
494 return(KERN_SUCCESS);
495 }
496
497 kern_return_t
498 processor_set_info(
499 processor_set_t pset,
500 int flavor,
501 host_t *host,
502 processor_set_info_t info,
503 mach_msg_type_number_t *count)
504 {
505 if (pset == PROCESSOR_SET_NULL)
506 return(KERN_INVALID_ARGUMENT);
507
508 if (flavor == PROCESSOR_SET_BASIC_INFO) {
509 register processor_set_basic_info_t basic_info;
510
511 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
512 return(KERN_FAILURE);
513
514 basic_info = (processor_set_basic_info_t) info;
515 basic_info->processor_count = processor_avail_count;
516 basic_info->default_policy = POLICY_TIMESHARE;
517
518 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
519 *host = &realhost;
520 return(KERN_SUCCESS);
521 }
522 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
523 register policy_timeshare_base_t ts_base;
524
525 if (*count < POLICY_TIMESHARE_BASE_COUNT)
526 return(KERN_FAILURE);
527
528 ts_base = (policy_timeshare_base_t) info;
529 ts_base->base_priority = BASEPRI_DEFAULT;
530
531 *count = POLICY_TIMESHARE_BASE_COUNT;
532 *host = &realhost;
533 return(KERN_SUCCESS);
534 }
535 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
536 register policy_fifo_base_t fifo_base;
537
538 if (*count < POLICY_FIFO_BASE_COUNT)
539 return(KERN_FAILURE);
540
541 fifo_base = (policy_fifo_base_t) info;
542 fifo_base->base_priority = BASEPRI_DEFAULT;
543
544 *count = POLICY_FIFO_BASE_COUNT;
545 *host = &realhost;
546 return(KERN_SUCCESS);
547 }
548 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
549 register policy_rr_base_t rr_base;
550
551 if (*count < POLICY_RR_BASE_COUNT)
552 return(KERN_FAILURE);
553
554 rr_base = (policy_rr_base_t) info;
555 rr_base->base_priority = BASEPRI_DEFAULT;
556 rr_base->quantum = 1;
557
558 *count = POLICY_RR_BASE_COUNT;
559 *host = &realhost;
560 return(KERN_SUCCESS);
561 }
562 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
563 register policy_timeshare_limit_t ts_limit;
564
565 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
566 return(KERN_FAILURE);
567
568 ts_limit = (policy_timeshare_limit_t) info;
569 ts_limit->max_priority = MAXPRI_KERNEL;
570
571 *count = POLICY_TIMESHARE_LIMIT_COUNT;
572 *host = &realhost;
573 return(KERN_SUCCESS);
574 }
575 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
576 register policy_fifo_limit_t fifo_limit;
577
578 if (*count < POLICY_FIFO_LIMIT_COUNT)
579 return(KERN_FAILURE);
580
581 fifo_limit = (policy_fifo_limit_t) info;
582 fifo_limit->max_priority = MAXPRI_KERNEL;
583
584 *count = POLICY_FIFO_LIMIT_COUNT;
585 *host = &realhost;
586 return(KERN_SUCCESS);
587 }
588 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
589 register policy_rr_limit_t rr_limit;
590
591 if (*count < POLICY_RR_LIMIT_COUNT)
592 return(KERN_FAILURE);
593
594 rr_limit = (policy_rr_limit_t) info;
595 rr_limit->max_priority = MAXPRI_KERNEL;
596
597 *count = POLICY_RR_LIMIT_COUNT;
598 *host = &realhost;
599 return(KERN_SUCCESS);
600 }
601 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
602 register int *enabled;
603
604 if (*count < (sizeof(*enabled)/sizeof(int)))
605 return(KERN_FAILURE);
606
607 enabled = (int *) info;
608 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
609
610 *count = sizeof(*enabled)/sizeof(int);
611 *host = &realhost;
612 return(KERN_SUCCESS);
613 }
614
615
616 *host = HOST_NULL;
617 return(KERN_INVALID_ARGUMENT);
618 }
619
620 /*
621 * processor_set_statistics
622 *
623 * Returns scheduling statistics for a processor set.
624 */
625 kern_return_t
626 processor_set_statistics(
627 processor_set_t pset,
628 int flavor,
629 processor_set_info_t info,
630 mach_msg_type_number_t *count)
631 {
632 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
633 return (KERN_INVALID_PROCESSOR_SET);
634
635 if (flavor == PROCESSOR_SET_LOAD_INFO) {
636 register processor_set_load_info_t load_info;
637
638 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
639 return(KERN_FAILURE);
640
641 load_info = (processor_set_load_info_t) info;
642
643 load_info->mach_factor = sched_mach_factor;
644 load_info->load_average = sched_load_average;
645
646 load_info->task_count = tasks_count;
647 load_info->thread_count = threads_count;
648
649 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
650 return(KERN_SUCCESS);
651 }
652
653 return(KERN_INVALID_ARGUMENT);
654 }
655
656 /*
657 * processor_set_max_priority:
658 *
659 * Specify max priority permitted on processor set. This affects
660 * newly created and assigned threads. Optionally change existing
661 * ones.
662 */
663 kern_return_t
664 processor_set_max_priority(
665 __unused processor_set_t pset,
666 __unused int max_priority,
667 __unused boolean_t change_threads)
668 {
669 return (KERN_INVALID_ARGUMENT);
670 }
671
672 /*
673 * processor_set_policy_enable:
674 *
675 * Allow indicated policy on processor set.
676 */
677
678 kern_return_t
679 processor_set_policy_enable(
680 __unused processor_set_t pset,
681 __unused int policy)
682 {
683 return (KERN_INVALID_ARGUMENT);
684 }
685
686 /*
687 * processor_set_policy_disable:
688 *
689 * Forbid indicated policy on processor set. Time sharing cannot
690 * be forbidden.
691 */
692 kern_return_t
693 processor_set_policy_disable(
694 __unused processor_set_t pset,
695 __unused int policy,
696 __unused boolean_t change_threads)
697 {
698 return (KERN_INVALID_ARGUMENT);
699 }
700
701 #define THING_TASK 0
702 #define THING_THREAD 1
703
704 /*
705 * processor_set_things:
706 *
707 * Common internals for processor_set_{threads,tasks}
708 */
709 kern_return_t
710 processor_set_things(
711 processor_set_t pset,
712 mach_port_t **thing_list,
713 mach_msg_type_number_t *count,
714 int type)
715 {
716 unsigned int actual; /* this many things */
717 unsigned int maxthings;
718 unsigned int i;
719
720 vm_size_t size, size_needed;
721 void *addr;
722
723 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
724 return (KERN_INVALID_ARGUMENT);
725
726 size = 0;
727 addr = NULL;
728
729 for (;;) {
730 lck_mtx_lock(&tasks_threads_lock);
731
732 if (type == THING_TASK)
733 maxthings = tasks_count;
734 else
735 maxthings = threads_count;
736
737 /* do we have the memory we need? */
738
739 size_needed = maxthings * sizeof (mach_port_t);
740 if (size_needed <= size)
741 break;
742
743 /* unlock and allocate more memory */
744 lck_mtx_unlock(&tasks_threads_lock);
745
746 if (size != 0)
747 kfree(addr, size);
748
749 assert(size_needed > 0);
750 size = size_needed;
751
752 addr = kalloc(size);
753 if (addr == 0)
754 return (KERN_RESOURCE_SHORTAGE);
755 }
756
757 /* OK, have memory and the list locked */
758
759 actual = 0;
760 switch (type) {
761
762 case THING_TASK: {
763 task_t task, *task_list = (task_t *)addr;
764
765 for (task = (task_t)queue_first(&tasks);
766 !queue_end(&tasks, (queue_entry_t)task);
767 task = (task_t)queue_next(&task->tasks)) {
768 #if defined(SECURE_KERNEL)
769 if (task != kernel_task) {
770 #endif
771 task_reference_internal(task);
772 task_list[actual++] = task;
773 #if defined(SECURE_KERNEL)
774 }
775 #endif
776 }
777
778 break;
779 }
780
781 case THING_THREAD: {
782 thread_t thread, *thread_list = (thread_t *)addr;
783
784 for (thread = (thread_t)queue_first(&threads);
785 !queue_end(&threads, (queue_entry_t)thread);
786 thread = (thread_t)queue_next(&thread->threads)) {
787 thread_reference_internal(thread);
788 thread_list[actual++] = thread;
789 }
790
791 break;
792 }
793
794 }
795
796 lck_mtx_unlock(&tasks_threads_lock);
797
798 if (actual < maxthings)
799 size_needed = actual * sizeof (mach_port_t);
800
801 if (actual == 0) {
802 /* no things, so return null pointer and deallocate memory */
803 *thing_list = NULL;
804 *count = 0;
805
806 if (size != 0)
807 kfree(addr, size);
808 }
809 else {
810 /* if we allocated too much, must copy */
811
812 if (size_needed < size) {
813 void *newaddr;
814
815 newaddr = kalloc(size_needed);
816 if (newaddr == 0) {
817 switch (type) {
818
819 case THING_TASK: {
820 task_t *task_list = (task_t *)addr;
821
822 for (i = 0; i < actual; i++)
823 task_deallocate(task_list[i]);
824 break;
825 }
826
827 case THING_THREAD: {
828 thread_t *thread_list = (thread_t *)addr;
829
830 for (i = 0; i < actual; i++)
831 thread_deallocate(thread_list[i]);
832 break;
833 }
834
835 }
836
837 kfree(addr, size);
838 return (KERN_RESOURCE_SHORTAGE);
839 }
840
841 bcopy((void *) addr, (void *) newaddr, size_needed);
842 kfree(addr, size);
843 addr = newaddr;
844 }
845
846 *thing_list = (mach_port_t *)addr;
847 *count = actual;
848
849 /* do the conversion that Mig should handle */
850
851 switch (type) {
852
853 case THING_TASK: {
854 task_t *task_list = (task_t *)addr;
855
856 for (i = 0; i < actual; i++)
857 (*thing_list)[i] = convert_task_to_port(task_list[i]);
858 break;
859 }
860
861 case THING_THREAD: {
862 thread_t *thread_list = (thread_t *)addr;
863
864 for (i = 0; i < actual; i++)
865 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
866 break;
867 }
868
869 }
870 }
871
872 return (KERN_SUCCESS);
873 }
874
875
876 /*
877 * processor_set_tasks:
878 *
879 * List all tasks in the processor set.
880 */
881 kern_return_t
882 processor_set_tasks(
883 processor_set_t pset,
884 task_array_t *task_list,
885 mach_msg_type_number_t *count)
886 {
887 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
888 }
889
890 /*
891 * processor_set_threads:
892 *
893 * List all threads in the processor set.
894 */
895 #if defined(SECURE_KERNEL)
896 kern_return_t
897 processor_set_threads(
898 __unused processor_set_t pset,
899 __unused thread_array_t *thread_list,
900 __unused mach_msg_type_number_t *count)
901 {
902 return KERN_FAILURE;
903 }
904 #elif defined(CONFIG_EMBEDDED)
905 kern_return_t
906 processor_set_threads(
907 __unused processor_set_t pset,
908 __unused thread_array_t *thread_list,
909 __unused mach_msg_type_number_t *count)
910 {
911 return KERN_NOT_SUPPORTED;
912 }
913 #else
914 kern_return_t
915 processor_set_threads(
916 processor_set_t pset,
917 thread_array_t *thread_list,
918 mach_msg_type_number_t *count)
919 {
920 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
921 }
922 #endif
923
924 /*
925 * processor_set_policy_control
926 *
927 * Controls the scheduling attributes governing the processor set.
928 * Allows control of enabled policies, and per-policy base and limit
929 * priorities.
930 */
931 kern_return_t
932 processor_set_policy_control(
933 __unused processor_set_t pset,
934 __unused int flavor,
935 __unused processor_set_info_t policy_info,
936 __unused mach_msg_type_number_t count,
937 __unused boolean_t change)
938 {
939 return (KERN_INVALID_ARGUMENT);
940 }
941
942 #undef pset_deallocate
943 void pset_deallocate(processor_set_t pset);
944 void
945 pset_deallocate(
946 __unused processor_set_t pset)
947 {
948 return;
949 }
950
951 #undef pset_reference
952 void pset_reference(processor_set_t pset);
953 void
954 pset_reference(
955 __unused processor_set_t pset)
956 {
957 return;
958 }