]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 int tasks_count;
93 queue_head_t threads;
94 int threads_count;
95 decl_mutex_data(,tasks_threads_lock)
96
97 processor_t processor_list;
98 unsigned int processor_count;
99 static processor_t processor_list_tail;
100 decl_simple_lock_data(,processor_list_lock)
101
102 uint32_t processor_avail_count;
103
104 processor_t master_processor;
105 int master_cpu = 0;
106
107 /* Forwards */
108 kern_return_t processor_set_things(
109 processor_set_t pset,
110 mach_port_t **thing_list,
111 mach_msg_type_number_t *count,
112 int type);
113
114 void
115 processor_bootstrap(void)
116 {
117 pset_init(&pset0, &pset_node0);
118 pset_node0.psets = &pset0;
119
120 simple_lock_init(&pset_node_lock, 0);
121
122 mutex_init(&tasks_threads_lock, 0);
123 queue_init(&tasks);
124 queue_init(&threads);
125
126 simple_lock_init(&processor_list_lock, 0);
127
128 master_processor = cpu_to_processor(master_cpu);
129
130 processor_init(master_processor, master_cpu, &pset0);
131 }
132
133 /*
134 * Initialize the given processor for the cpu
135 * indicated by slot_num, and assign to the
136 * specified processor set.
137 */
138 void
139 processor_init(
140 processor_t p,
141 int slot_num,
142 processor_set_t pset)
143 {
144 run_queue_init(&p->runq);
145
146 p->state = PROCESSOR_OFF_LINE;
147 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
148 p->processor_set = pset;
149 p->current_pri = MINPRI;
150 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
151 p->deadline = UINT64_MAX;
152 p->timeslice = 0;
153 p->processor_self = IP_NULL;
154 simple_lock_init(&p->lock, 0);
155 processor_data_init(p);
156 PROCESSOR_DATA(p, slot_num) = slot_num;
157 p->processor_list = NULL;
158
159 simple_lock(&processor_list_lock);
160 if (processor_list == NULL)
161 processor_list = p;
162 else
163 processor_list_tail->processor_list = p;
164 processor_list_tail = p;
165 processor_count++;
166 simple_unlock(&processor_list_lock);
167 }
168
169 processor_set_t
170 processor_pset(
171 processor_t processor)
172 {
173 return (processor->processor_set);
174 }
175
176 pset_node_t
177 pset_node_root(void)
178 {
179 return &pset_node0;
180 }
181
182 processor_set_t
183 pset_create(
184 pset_node_t node)
185 {
186 processor_set_t *prev, pset = kalloc(sizeof (*pset));
187
188 if (pset != PROCESSOR_SET_NULL) {
189 pset_init(pset, node);
190
191 simple_lock(&pset_node_lock);
192
193 prev = &node->psets;
194 while (*prev != PROCESSOR_SET_NULL)
195 prev = &(*prev)->pset_list;
196
197 *prev = pset;
198
199 simple_unlock(&pset_node_lock);
200 }
201
202 return (pset);
203 }
204
205 /*
206 * Initialize the given processor_set structure.
207 */
208 void
209 pset_init(
210 processor_set_t pset,
211 pset_node_t node)
212 {
213 queue_init(&pset->active_queue);
214 queue_init(&pset->idle_queue);
215 pset->idle_count = 0;
216 pset->processor_count = 0;
217 pset->high_hint = PROCESSOR_NULL;
218 pset->low_hint = PROCESSOR_NULL;
219 pset_lock_init(pset);
220 pset->pset_self = IP_NULL;
221 pset->pset_name_self = IP_NULL;
222 pset->pset_list = PROCESSOR_SET_NULL;
223 pset->node = node;
224 }
225
226 kern_return_t
227 processor_info_count(
228 processor_flavor_t flavor,
229 mach_msg_type_number_t *count)
230 {
231 switch (flavor) {
232
233 case PROCESSOR_BASIC_INFO:
234 *count = PROCESSOR_BASIC_INFO_COUNT;
235 break;
236
237 case PROCESSOR_CPU_LOAD_INFO:
238 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
239 break;
240
241 default:
242 return (cpu_info_count(flavor, count));
243 }
244
245 return (KERN_SUCCESS);
246 }
247
248
249 kern_return_t
250 processor_info(
251 register processor_t processor,
252 processor_flavor_t flavor,
253 host_t *host,
254 processor_info_t info,
255 mach_msg_type_number_t *count)
256 {
257 register int slot_num, state;
258 kern_return_t result;
259
260 if (processor == PROCESSOR_NULL)
261 return (KERN_INVALID_ARGUMENT);
262
263 slot_num = PROCESSOR_DATA(processor, slot_num);
264
265 switch (flavor) {
266
267 case PROCESSOR_BASIC_INFO:
268 {
269 register processor_basic_info_t basic_info;
270
271 if (*count < PROCESSOR_BASIC_INFO_COUNT)
272 return (KERN_FAILURE);
273
274 basic_info = (processor_basic_info_t) info;
275 basic_info->cpu_type = slot_type(slot_num);
276 basic_info->cpu_subtype = slot_subtype(slot_num);
277 state = processor->state;
278 if (state == PROCESSOR_OFF_LINE)
279 basic_info->running = FALSE;
280 else
281 basic_info->running = TRUE;
282 basic_info->slot_num = slot_num;
283 if (processor == master_processor)
284 basic_info->is_master = TRUE;
285 else
286 basic_info->is_master = FALSE;
287
288 *count = PROCESSOR_BASIC_INFO_COUNT;
289 *host = &realhost;
290
291 return (KERN_SUCCESS);
292 }
293
294 case PROCESSOR_CPU_LOAD_INFO:
295 {
296 register processor_cpu_load_info_t cpu_load_info;
297
298 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
299 return (KERN_FAILURE);
300
301 cpu_load_info = (processor_cpu_load_info_t) info;
302 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
303 timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval;
304 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
305 timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval;
306 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
307 timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval;
308 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
309
310 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
311 *host = &realhost;
312
313 return (KERN_SUCCESS);
314 }
315
316 default:
317 result = cpu_info(flavor, slot_num, info, count);
318 if (result == KERN_SUCCESS)
319 *host = &realhost;
320
321 return (result);
322 }
323 }
324
325 kern_return_t
326 processor_start(
327 processor_t processor)
328 {
329 processor_set_t pset;
330 thread_t thread;
331 kern_return_t result;
332 spl_t s;
333
334 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
335 return (KERN_INVALID_ARGUMENT);
336
337 if (processor == master_processor) {
338 processor_t prev;
339
340 prev = thread_bind(processor);
341 thread_block(THREAD_CONTINUE_NULL);
342
343 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
344
345 thread_bind(prev);
346
347 return (result);
348 }
349
350 s = splsched();
351 pset = processor->processor_set;
352 pset_lock(pset);
353 if (processor->state != PROCESSOR_OFF_LINE) {
354 pset_unlock(pset);
355 splx(s);
356
357 return (KERN_FAILURE);
358 }
359
360 processor->state = PROCESSOR_START;
361 pset_unlock(pset);
362 splx(s);
363
364 /*
365 * Create the idle processor thread.
366 */
367 if (processor->idle_thread == THREAD_NULL) {
368 result = idle_thread_create(processor);
369 if (result != KERN_SUCCESS) {
370 s = splsched();
371 pset_lock(pset);
372 processor->state = PROCESSOR_OFF_LINE;
373 pset_unlock(pset);
374 splx(s);
375
376 return (result);
377 }
378 }
379
380 /*
381 * If there is no active thread, the processor
382 * has never been started. Create a dedicated
383 * start up thread.
384 */
385 if ( processor->active_thread == THREAD_NULL &&
386 processor->next_thread == THREAD_NULL ) {
387 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
388 if (result != KERN_SUCCESS) {
389 s = splsched();
390 pset_lock(pset);
391 processor->state = PROCESSOR_OFF_LINE;
392 pset_unlock(pset);
393 splx(s);
394
395 return (result);
396 }
397
398 s = splsched();
399 thread_lock(thread);
400 thread->bound_processor = processor;
401 processor->next_thread = thread;
402 thread->state = TH_RUN;
403 thread_unlock(thread);
404 splx(s);
405
406 thread_deallocate(thread);
407 }
408
409 if (processor->processor_self == IP_NULL)
410 ipc_processor_init(processor);
411
412 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
413 if (result != KERN_SUCCESS) {
414 s = splsched();
415 pset_lock(pset);
416 processor->state = PROCESSOR_OFF_LINE;
417 timer_call_shutdown(processor);
418 pset_unlock(pset);
419 splx(s);
420
421 return (result);
422 }
423
424 ipc_processor_enable(processor);
425
426 return (KERN_SUCCESS);
427 }
428
429 kern_return_t
430 processor_exit(
431 processor_t processor)
432 {
433 if (processor == PROCESSOR_NULL)
434 return(KERN_INVALID_ARGUMENT);
435
436 return(processor_shutdown(processor));
437 }
438
439 kern_return_t
440 processor_control(
441 processor_t processor,
442 processor_info_t info,
443 mach_msg_type_number_t count)
444 {
445 if (processor == PROCESSOR_NULL)
446 return(KERN_INVALID_ARGUMENT);
447
448 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
449 }
450
451 kern_return_t
452 processor_set_create(
453 __unused host_t host,
454 __unused processor_set_t *new_set,
455 __unused processor_set_t *new_name)
456 {
457 return(KERN_FAILURE);
458 }
459
460 kern_return_t
461 processor_set_destroy(
462 __unused processor_set_t pset)
463 {
464 return(KERN_FAILURE);
465 }
466
467 kern_return_t
468 processor_get_assignment(
469 processor_t processor,
470 processor_set_t *pset)
471 {
472 int state;
473
474 state = processor->state;
475 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
476 return(KERN_FAILURE);
477
478 *pset = &pset0;
479
480 return(KERN_SUCCESS);
481 }
482
483 kern_return_t
484 processor_set_info(
485 processor_set_t pset,
486 int flavor,
487 host_t *host,
488 processor_set_info_t info,
489 mach_msg_type_number_t *count)
490 {
491 if (pset == PROCESSOR_SET_NULL)
492 return(KERN_INVALID_ARGUMENT);
493
494 if (flavor == PROCESSOR_SET_BASIC_INFO) {
495 register processor_set_basic_info_t basic_info;
496
497 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
498 return(KERN_FAILURE);
499
500 basic_info = (processor_set_basic_info_t) info;
501 basic_info->processor_count = processor_avail_count;
502 basic_info->default_policy = POLICY_TIMESHARE;
503
504 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
505 *host = &realhost;
506 return(KERN_SUCCESS);
507 }
508 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
509 register policy_timeshare_base_t ts_base;
510
511 if (*count < POLICY_TIMESHARE_BASE_COUNT)
512 return(KERN_FAILURE);
513
514 ts_base = (policy_timeshare_base_t) info;
515 ts_base->base_priority = BASEPRI_DEFAULT;
516
517 *count = POLICY_TIMESHARE_BASE_COUNT;
518 *host = &realhost;
519 return(KERN_SUCCESS);
520 }
521 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
522 register policy_fifo_base_t fifo_base;
523
524 if (*count < POLICY_FIFO_BASE_COUNT)
525 return(KERN_FAILURE);
526
527 fifo_base = (policy_fifo_base_t) info;
528 fifo_base->base_priority = BASEPRI_DEFAULT;
529
530 *count = POLICY_FIFO_BASE_COUNT;
531 *host = &realhost;
532 return(KERN_SUCCESS);
533 }
534 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
535 register policy_rr_base_t rr_base;
536
537 if (*count < POLICY_RR_BASE_COUNT)
538 return(KERN_FAILURE);
539
540 rr_base = (policy_rr_base_t) info;
541 rr_base->base_priority = BASEPRI_DEFAULT;
542 rr_base->quantum = 1;
543
544 *count = POLICY_RR_BASE_COUNT;
545 *host = &realhost;
546 return(KERN_SUCCESS);
547 }
548 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
549 register policy_timeshare_limit_t ts_limit;
550
551 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
552 return(KERN_FAILURE);
553
554 ts_limit = (policy_timeshare_limit_t) info;
555 ts_limit->max_priority = MAXPRI_KERNEL;
556
557 *count = POLICY_TIMESHARE_LIMIT_COUNT;
558 *host = &realhost;
559 return(KERN_SUCCESS);
560 }
561 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
562 register policy_fifo_limit_t fifo_limit;
563
564 if (*count < POLICY_FIFO_LIMIT_COUNT)
565 return(KERN_FAILURE);
566
567 fifo_limit = (policy_fifo_limit_t) info;
568 fifo_limit->max_priority = MAXPRI_KERNEL;
569
570 *count = POLICY_FIFO_LIMIT_COUNT;
571 *host = &realhost;
572 return(KERN_SUCCESS);
573 }
574 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
575 register policy_rr_limit_t rr_limit;
576
577 if (*count < POLICY_RR_LIMIT_COUNT)
578 return(KERN_FAILURE);
579
580 rr_limit = (policy_rr_limit_t) info;
581 rr_limit->max_priority = MAXPRI_KERNEL;
582
583 *count = POLICY_RR_LIMIT_COUNT;
584 *host = &realhost;
585 return(KERN_SUCCESS);
586 }
587 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
588 register int *enabled;
589
590 if (*count < (sizeof(*enabled)/sizeof(int)))
591 return(KERN_FAILURE);
592
593 enabled = (int *) info;
594 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
595
596 *count = sizeof(*enabled)/sizeof(int);
597 *host = &realhost;
598 return(KERN_SUCCESS);
599 }
600
601
602 *host = HOST_NULL;
603 return(KERN_INVALID_ARGUMENT);
604 }
605
606 /*
607 * processor_set_statistics
608 *
609 * Returns scheduling statistics for a processor set.
610 */
611 kern_return_t
612 processor_set_statistics(
613 processor_set_t pset,
614 int flavor,
615 processor_set_info_t info,
616 mach_msg_type_number_t *count)
617 {
618 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
619 return (KERN_INVALID_PROCESSOR_SET);
620
621 if (flavor == PROCESSOR_SET_LOAD_INFO) {
622 register processor_set_load_info_t load_info;
623
624 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
625 return(KERN_FAILURE);
626
627 load_info = (processor_set_load_info_t) info;
628
629 load_info->mach_factor = sched_mach_factor;
630 load_info->load_average = sched_load_average;
631
632 load_info->task_count = tasks_count;
633 load_info->thread_count = threads_count;
634
635 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
636 return(KERN_SUCCESS);
637 }
638
639 return(KERN_INVALID_ARGUMENT);
640 }
641
642 /*
643 * processor_set_max_priority:
644 *
645 * Specify max priority permitted on processor set. This affects
646 * newly created and assigned threads. Optionally change existing
647 * ones.
648 */
649 kern_return_t
650 processor_set_max_priority(
651 __unused processor_set_t pset,
652 __unused int max_priority,
653 __unused boolean_t change_threads)
654 {
655 return (KERN_INVALID_ARGUMENT);
656 }
657
658 /*
659 * processor_set_policy_enable:
660 *
661 * Allow indicated policy on processor set.
662 */
663
664 kern_return_t
665 processor_set_policy_enable(
666 __unused processor_set_t pset,
667 __unused int policy)
668 {
669 return (KERN_INVALID_ARGUMENT);
670 }
671
672 /*
673 * processor_set_policy_disable:
674 *
675 * Forbid indicated policy on processor set. Time sharing cannot
676 * be forbidden.
677 */
678 kern_return_t
679 processor_set_policy_disable(
680 __unused processor_set_t pset,
681 __unused int policy,
682 __unused boolean_t change_threads)
683 {
684 return (KERN_INVALID_ARGUMENT);
685 }
686
687 #define THING_TASK 0
688 #define THING_THREAD 1
689
690 /*
691 * processor_set_things:
692 *
693 * Common internals for processor_set_{threads,tasks}
694 */
695 kern_return_t
696 processor_set_things(
697 processor_set_t pset,
698 mach_port_t **thing_list,
699 mach_msg_type_number_t *count,
700 int type)
701 {
702 unsigned int actual; /* this many things */
703 unsigned int maxthings;
704 unsigned int i;
705
706 vm_size_t size, size_needed;
707 void *addr;
708
709 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
710 return (KERN_INVALID_ARGUMENT);
711
712 size = 0;
713 addr = NULL;
714
715 for (;;) {
716 mutex_lock(&tasks_threads_lock);
717
718 if (type == THING_TASK)
719 maxthings = tasks_count;
720 else
721 maxthings = threads_count;
722
723 /* do we have the memory we need? */
724
725 size_needed = maxthings * sizeof (mach_port_t);
726 if (size_needed <= size)
727 break;
728
729 /* unlock and allocate more memory */
730 mutex_unlock(&tasks_threads_lock);
731
732 if (size != 0)
733 kfree(addr, size);
734
735 assert(size_needed > 0);
736 size = size_needed;
737
738 addr = kalloc(size);
739 if (addr == 0)
740 return (KERN_RESOURCE_SHORTAGE);
741 }
742
743 /* OK, have memory and the list locked */
744
745 actual = 0;
746 switch (type) {
747
748 case THING_TASK: {
749 task_t task, *task_list = (task_t *)addr;
750
751 for (task = (task_t)queue_first(&tasks);
752 !queue_end(&tasks, (queue_entry_t)task);
753 task = (task_t)queue_next(&task->tasks)) {
754 #if defined(SECURE_KERNEL)
755 if (task != kernel_task) {
756 #endif
757 task_reference_internal(task);
758 task_list[actual++] = task;
759 #if defined(SECURE_KERNEL)
760 }
761 #endif
762 }
763
764 break;
765 }
766
767 case THING_THREAD: {
768 thread_t thread, *thread_list = (thread_t *)addr;
769
770 for (thread = (thread_t)queue_first(&threads);
771 !queue_end(&threads, (queue_entry_t)thread);
772 thread = (thread_t)queue_next(&thread->threads)) {
773 thread_reference_internal(thread);
774 thread_list[actual++] = thread;
775 }
776
777 break;
778 }
779
780 }
781
782 mutex_unlock(&tasks_threads_lock);
783
784 if (actual < maxthings)
785 size_needed = actual * sizeof (mach_port_t);
786
787 if (actual == 0) {
788 /* no things, so return null pointer and deallocate memory */
789 *thing_list = NULL;
790 *count = 0;
791
792 if (size != 0)
793 kfree(addr, size);
794 }
795 else {
796 /* if we allocated too much, must copy */
797
798 if (size_needed < size) {
799 void *newaddr;
800
801 newaddr = kalloc(size_needed);
802 if (newaddr == 0) {
803 switch (type) {
804
805 case THING_TASK: {
806 task_t *task_list = (task_t *)addr;
807
808 for (i = 0; i < actual; i++)
809 task_deallocate(task_list[i]);
810 break;
811 }
812
813 case THING_THREAD: {
814 thread_t *thread_list = (thread_t *)addr;
815
816 for (i = 0; i < actual; i++)
817 thread_deallocate(thread_list[i]);
818 break;
819 }
820
821 }
822
823 kfree(addr, size);
824 return (KERN_RESOURCE_SHORTAGE);
825 }
826
827 bcopy((void *) addr, (void *) newaddr, size_needed);
828 kfree(addr, size);
829 addr = newaddr;
830 }
831
832 *thing_list = (mach_port_t *)addr;
833 *count = actual;
834
835 /* do the conversion that Mig should handle */
836
837 switch (type) {
838
839 case THING_TASK: {
840 task_t *task_list = (task_t *)addr;
841
842 for (i = 0; i < actual; i++)
843 (*thing_list)[i] = convert_task_to_port(task_list[i]);
844 break;
845 }
846
847 case THING_THREAD: {
848 thread_t *thread_list = (thread_t *)addr;
849
850 for (i = 0; i < actual; i++)
851 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
852 break;
853 }
854
855 }
856 }
857
858 return (KERN_SUCCESS);
859 }
860
861
862 /*
863 * processor_set_tasks:
864 *
865 * List all tasks in the processor set.
866 */
867 kern_return_t
868 processor_set_tasks(
869 processor_set_t pset,
870 task_array_t *task_list,
871 mach_msg_type_number_t *count)
872 {
873 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
874 }
875
876 /*
877 * processor_set_threads:
878 *
879 * List all threads in the processor set.
880 */
881 #if defined(SECURE_KERNEL)
882 kern_return_t
883 processor_set_threads(
884 __unused processor_set_t pset,
885 __unused thread_array_t *thread_list,
886 __unused mach_msg_type_number_t *count)
887 {
888 return KERN_FAILURE;
889 }
890 #else
891 kern_return_t
892 processor_set_threads(
893 processor_set_t pset,
894 thread_array_t *thread_list,
895 mach_msg_type_number_t *count)
896 {
897 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
898 }
899 #endif
900
901 /*
902 * processor_set_policy_control
903 *
904 * Controls the scheduling attributes governing the processor set.
905 * Allows control of enabled policies, and per-policy base and limit
906 * priorities.
907 */
908 kern_return_t
909 processor_set_policy_control(
910 __unused processor_set_t pset,
911 __unused int flavor,
912 __unused processor_set_info_t policy_info,
913 __unused mach_msg_type_number_t count,
914 __unused boolean_t change)
915 {
916 return (KERN_INVALID_ARGUMENT);
917 }
918
919 #undef pset_deallocate
920 void pset_deallocate(processor_set_t pset);
921 void
922 pset_deallocate(
923 __unused processor_set_t pset)
924 {
925 return;
926 }
927
928 #undef pset_reference
929 void pset_reference(processor_set_t pset);
930 void
931 pset_reference(
932 __unused processor_set_t pset)
933 {
934 return;
935 }