]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
518891c79c9de2144f1babd19ea97f1a765c9fc7
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 struct processor_set pset0;
88 struct pset_node pset_node0;
89 decl_simple_lock_data(static,pset_node_lock)
90
91 queue_head_t tasks;
92 int tasks_count;
93 queue_head_t threads;
94 int threads_count;
95 decl_mutex_data(,tasks_threads_lock)
96
97 processor_t processor_list;
98 unsigned int processor_count;
99 static processor_t processor_list_tail;
100 decl_simple_lock_data(,processor_list_lock)
101
102 uint32_t processor_avail_count;
103
104 processor_t master_processor;
105 int master_cpu = 0;
106
107 /* Forwards */
108 kern_return_t processor_set_things(
109 processor_set_t pset,
110 mach_port_t **thing_list,
111 mach_msg_type_number_t *count,
112 int type);
113
114 void
115 processor_bootstrap(void)
116 {
117 pset_init(&pset0, &pset_node0);
118 pset_node0.psets = &pset0;
119
120 simple_lock_init(&pset_node_lock, 0);
121
122 mutex_init(&tasks_threads_lock, 0);
123 queue_init(&tasks);
124 queue_init(&threads);
125
126 simple_lock_init(&processor_list_lock, 0);
127
128 master_processor = cpu_to_processor(master_cpu);
129
130 processor_init(master_processor, master_cpu, &pset0);
131 }
132
133 /*
134 * Initialize the given processor for the cpu
135 * indicated by slot_num, and assign to the
136 * specified processor set.
137 */
138 void
139 processor_init(
140 processor_t p,
141 int slot_num,
142 processor_set_t pset)
143 {
144 run_queue_init(&p->runq);
145
146 p->state = PROCESSOR_OFF_LINE;
147 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
148 p->processor_set = pset;
149 p->current_pri = MINPRI;
150 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
151 p->deadline = UINT64_MAX;
152 p->timeslice = 0;
153 p->processor_self = IP_NULL;
154 simple_lock_init(&p->lock, 0);
155 processor_data_init(p);
156 PROCESSOR_DATA(p, slot_num) = slot_num;
157 p->processor_list = NULL;
158
159 simple_lock(&processor_list_lock);
160 if (processor_list == NULL)
161 processor_list = p;
162 else
163 processor_list_tail->processor_list = p;
164 processor_list_tail = p;
165 processor_count++;
166 simple_unlock(&processor_list_lock);
167 }
168
169 processor_set_t
170 processor_pset(
171 processor_t processor)
172 {
173 return (processor->processor_set);
174 }
175
176 pset_node_t
177 pset_node_root(void)
178 {
179 return &pset_node0;
180 }
181
182 processor_set_t
183 pset_create(
184 pset_node_t node)
185 {
186 processor_set_t *prev, pset = kalloc(sizeof (*pset));
187
188 if (pset != PROCESSOR_SET_NULL) {
189 pset_init(pset, node);
190
191 simple_lock(&pset_node_lock);
192
193 prev = &node->psets;
194 while (*prev != PROCESSOR_SET_NULL)
195 prev = &(*prev)->pset_list;
196
197 *prev = pset;
198
199 simple_unlock(&pset_node_lock);
200 }
201
202 return (pset);
203 }
204
205 /*
206 * Initialize the given processor_set structure.
207 */
208 void
209 pset_init(
210 processor_set_t pset,
211 pset_node_t node)
212 {
213 queue_init(&pset->active_queue);
214 queue_init(&pset->idle_queue);
215 pset->idle_count = 0;
216 pset->processor_count = 0;
217 pset->low_pri = PROCESSOR_NULL;
218 pset_lock_init(pset);
219 pset->pset_self = IP_NULL;
220 pset->pset_name_self = IP_NULL;
221 pset->pset_list = PROCESSOR_SET_NULL;
222 pset->node = node;
223 }
224
225 kern_return_t
226 processor_info_count(
227 processor_flavor_t flavor,
228 mach_msg_type_number_t *count)
229 {
230 switch (flavor) {
231
232 case PROCESSOR_BASIC_INFO:
233 *count = PROCESSOR_BASIC_INFO_COUNT;
234 break;
235
236 case PROCESSOR_CPU_LOAD_INFO:
237 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
238 break;
239
240 default:
241 return (cpu_info_count(flavor, count));
242 }
243
244 return (KERN_SUCCESS);
245 }
246
247
248 kern_return_t
249 processor_info(
250 register processor_t processor,
251 processor_flavor_t flavor,
252 host_t *host,
253 processor_info_t info,
254 mach_msg_type_number_t *count)
255 {
256 register int slot_num, state;
257 kern_return_t result;
258
259 if (processor == PROCESSOR_NULL)
260 return (KERN_INVALID_ARGUMENT);
261
262 slot_num = PROCESSOR_DATA(processor, slot_num);
263
264 switch (flavor) {
265
266 case PROCESSOR_BASIC_INFO:
267 {
268 register processor_basic_info_t basic_info;
269
270 if (*count < PROCESSOR_BASIC_INFO_COUNT)
271 return (KERN_FAILURE);
272
273 basic_info = (processor_basic_info_t) info;
274 basic_info->cpu_type = slot_type(slot_num);
275 basic_info->cpu_subtype = slot_subtype(slot_num);
276 state = processor->state;
277 if (state == PROCESSOR_OFF_LINE)
278 basic_info->running = FALSE;
279 else
280 basic_info->running = TRUE;
281 basic_info->slot_num = slot_num;
282 if (processor == master_processor)
283 basic_info->is_master = TRUE;
284 else
285 basic_info->is_master = FALSE;
286
287 *count = PROCESSOR_BASIC_INFO_COUNT;
288 *host = &realhost;
289
290 return (KERN_SUCCESS);
291 }
292
293 case PROCESSOR_CPU_LOAD_INFO:
294 {
295 register processor_cpu_load_info_t cpu_load_info;
296
297 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
298 return (KERN_FAILURE);
299
300 cpu_load_info = (processor_cpu_load_info_t) info;
301 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
302 timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval;
303 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
304 timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval;
305 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
306 timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval;
307 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
308
309 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
310 *host = &realhost;
311
312 return (KERN_SUCCESS);
313 }
314
315 default:
316 result = cpu_info(flavor, slot_num, info, count);
317 if (result == KERN_SUCCESS)
318 *host = &realhost;
319
320 return (result);
321 }
322 }
323
324 kern_return_t
325 processor_start(
326 processor_t processor)
327 {
328 processor_set_t pset;
329 thread_t thread;
330 kern_return_t result;
331 spl_t s;
332
333 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
334 return (KERN_INVALID_ARGUMENT);
335
336 if (processor == master_processor) {
337 processor_t prev;
338
339 prev = thread_bind(processor);
340 thread_block(THREAD_CONTINUE_NULL);
341
342 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
343
344 thread_bind(prev);
345
346 return (result);
347 }
348
349 s = splsched();
350 pset = processor->processor_set;
351 pset_lock(pset);
352 if (processor->state != PROCESSOR_OFF_LINE) {
353 pset_unlock(pset);
354 splx(s);
355
356 return (KERN_FAILURE);
357 }
358
359 processor->state = PROCESSOR_START;
360 pset_unlock(pset);
361 splx(s);
362
363 /*
364 * Create the idle processor thread.
365 */
366 if (processor->idle_thread == THREAD_NULL) {
367 result = idle_thread_create(processor);
368 if (result != KERN_SUCCESS) {
369 s = splsched();
370 pset_lock(pset);
371 processor->state = PROCESSOR_OFF_LINE;
372 pset_unlock(pset);
373 splx(s);
374
375 return (result);
376 }
377 }
378
379 /*
380 * If there is no active thread, the processor
381 * has never been started. Create a dedicated
382 * start up thread.
383 */
384 if ( processor->active_thread == THREAD_NULL &&
385 processor->next_thread == THREAD_NULL ) {
386 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
387 if (result != KERN_SUCCESS) {
388 s = splsched();
389 pset_lock(pset);
390 processor->state = PROCESSOR_OFF_LINE;
391 pset_unlock(pset);
392 splx(s);
393
394 return (result);
395 }
396
397 s = splsched();
398 thread_lock(thread);
399 thread->bound_processor = processor;
400 processor->next_thread = thread;
401 thread->state = TH_RUN;
402 thread_unlock(thread);
403 splx(s);
404
405 thread_deallocate(thread);
406 }
407
408 if (processor->processor_self == IP_NULL)
409 ipc_processor_init(processor);
410
411 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
412 if (result != KERN_SUCCESS) {
413 s = splsched();
414 pset_lock(pset);
415 processor->state = PROCESSOR_OFF_LINE;
416 timer_call_shutdown(processor);
417 pset_unlock(pset);
418 splx(s);
419
420 return (result);
421 }
422
423 ipc_processor_enable(processor);
424
425 return (KERN_SUCCESS);
426 }
427
428 kern_return_t
429 processor_exit(
430 processor_t processor)
431 {
432 if (processor == PROCESSOR_NULL)
433 return(KERN_INVALID_ARGUMENT);
434
435 return(processor_shutdown(processor));
436 }
437
438 kern_return_t
439 processor_control(
440 processor_t processor,
441 processor_info_t info,
442 mach_msg_type_number_t count)
443 {
444 if (processor == PROCESSOR_NULL)
445 return(KERN_INVALID_ARGUMENT);
446
447 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
448 }
449
450 kern_return_t
451 processor_set_create(
452 __unused host_t host,
453 __unused processor_set_t *new_set,
454 __unused processor_set_t *new_name)
455 {
456 return(KERN_FAILURE);
457 }
458
459 kern_return_t
460 processor_set_destroy(
461 __unused processor_set_t pset)
462 {
463 return(KERN_FAILURE);
464 }
465
466 kern_return_t
467 processor_get_assignment(
468 processor_t processor,
469 processor_set_t *pset)
470 {
471 int state;
472
473 state = processor->state;
474 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
475 return(KERN_FAILURE);
476
477 *pset = &pset0;
478
479 return(KERN_SUCCESS);
480 }
481
482 kern_return_t
483 processor_set_info(
484 processor_set_t pset,
485 int flavor,
486 host_t *host,
487 processor_set_info_t info,
488 mach_msg_type_number_t *count)
489 {
490 if (pset == PROCESSOR_SET_NULL)
491 return(KERN_INVALID_ARGUMENT);
492
493 if (flavor == PROCESSOR_SET_BASIC_INFO) {
494 register processor_set_basic_info_t basic_info;
495
496 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
497 return(KERN_FAILURE);
498
499 basic_info = (processor_set_basic_info_t) info;
500 basic_info->processor_count = processor_avail_count;
501 basic_info->default_policy = POLICY_TIMESHARE;
502
503 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
504 *host = &realhost;
505 return(KERN_SUCCESS);
506 }
507 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
508 register policy_timeshare_base_t ts_base;
509
510 if (*count < POLICY_TIMESHARE_BASE_COUNT)
511 return(KERN_FAILURE);
512
513 ts_base = (policy_timeshare_base_t) info;
514 ts_base->base_priority = BASEPRI_DEFAULT;
515
516 *count = POLICY_TIMESHARE_BASE_COUNT;
517 *host = &realhost;
518 return(KERN_SUCCESS);
519 }
520 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
521 register policy_fifo_base_t fifo_base;
522
523 if (*count < POLICY_FIFO_BASE_COUNT)
524 return(KERN_FAILURE);
525
526 fifo_base = (policy_fifo_base_t) info;
527 fifo_base->base_priority = BASEPRI_DEFAULT;
528
529 *count = POLICY_FIFO_BASE_COUNT;
530 *host = &realhost;
531 return(KERN_SUCCESS);
532 }
533 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
534 register policy_rr_base_t rr_base;
535
536 if (*count < POLICY_RR_BASE_COUNT)
537 return(KERN_FAILURE);
538
539 rr_base = (policy_rr_base_t) info;
540 rr_base->base_priority = BASEPRI_DEFAULT;
541 rr_base->quantum = 1;
542
543 *count = POLICY_RR_BASE_COUNT;
544 *host = &realhost;
545 return(KERN_SUCCESS);
546 }
547 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
548 register policy_timeshare_limit_t ts_limit;
549
550 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
551 return(KERN_FAILURE);
552
553 ts_limit = (policy_timeshare_limit_t) info;
554 ts_limit->max_priority = MAXPRI_KERNEL;
555
556 *count = POLICY_TIMESHARE_LIMIT_COUNT;
557 *host = &realhost;
558 return(KERN_SUCCESS);
559 }
560 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
561 register policy_fifo_limit_t fifo_limit;
562
563 if (*count < POLICY_FIFO_LIMIT_COUNT)
564 return(KERN_FAILURE);
565
566 fifo_limit = (policy_fifo_limit_t) info;
567 fifo_limit->max_priority = MAXPRI_KERNEL;
568
569 *count = POLICY_FIFO_LIMIT_COUNT;
570 *host = &realhost;
571 return(KERN_SUCCESS);
572 }
573 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
574 register policy_rr_limit_t rr_limit;
575
576 if (*count < POLICY_RR_LIMIT_COUNT)
577 return(KERN_FAILURE);
578
579 rr_limit = (policy_rr_limit_t) info;
580 rr_limit->max_priority = MAXPRI_KERNEL;
581
582 *count = POLICY_RR_LIMIT_COUNT;
583 *host = &realhost;
584 return(KERN_SUCCESS);
585 }
586 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
587 register int *enabled;
588
589 if (*count < (sizeof(*enabled)/sizeof(int)))
590 return(KERN_FAILURE);
591
592 enabled = (int *) info;
593 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
594
595 *count = sizeof(*enabled)/sizeof(int);
596 *host = &realhost;
597 return(KERN_SUCCESS);
598 }
599
600
601 *host = HOST_NULL;
602 return(KERN_INVALID_ARGUMENT);
603 }
604
605 /*
606 * processor_set_statistics
607 *
608 * Returns scheduling statistics for a processor set.
609 */
610 kern_return_t
611 processor_set_statistics(
612 processor_set_t pset,
613 int flavor,
614 processor_set_info_t info,
615 mach_msg_type_number_t *count)
616 {
617 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
618 return (KERN_INVALID_PROCESSOR_SET);
619
620 if (flavor == PROCESSOR_SET_LOAD_INFO) {
621 register processor_set_load_info_t load_info;
622
623 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
624 return(KERN_FAILURE);
625
626 load_info = (processor_set_load_info_t) info;
627
628 load_info->mach_factor = sched_mach_factor;
629 load_info->load_average = sched_load_average;
630
631 load_info->task_count = tasks_count;
632 load_info->thread_count = threads_count;
633
634 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
635 return(KERN_SUCCESS);
636 }
637
638 return(KERN_INVALID_ARGUMENT);
639 }
640
641 /*
642 * processor_set_max_priority:
643 *
644 * Specify max priority permitted on processor set. This affects
645 * newly created and assigned threads. Optionally change existing
646 * ones.
647 */
648 kern_return_t
649 processor_set_max_priority(
650 __unused processor_set_t pset,
651 __unused int max_priority,
652 __unused boolean_t change_threads)
653 {
654 return (KERN_INVALID_ARGUMENT);
655 }
656
657 /*
658 * processor_set_policy_enable:
659 *
660 * Allow indicated policy on processor set.
661 */
662
663 kern_return_t
664 processor_set_policy_enable(
665 __unused processor_set_t pset,
666 __unused int policy)
667 {
668 return (KERN_INVALID_ARGUMENT);
669 }
670
671 /*
672 * processor_set_policy_disable:
673 *
674 * Forbid indicated policy on processor set. Time sharing cannot
675 * be forbidden.
676 */
677 kern_return_t
678 processor_set_policy_disable(
679 __unused processor_set_t pset,
680 __unused int policy,
681 __unused boolean_t change_threads)
682 {
683 return (KERN_INVALID_ARGUMENT);
684 }
685
686 #define THING_TASK 0
687 #define THING_THREAD 1
688
689 /*
690 * processor_set_things:
691 *
692 * Common internals for processor_set_{threads,tasks}
693 */
694 kern_return_t
695 processor_set_things(
696 processor_set_t pset,
697 mach_port_t **thing_list,
698 mach_msg_type_number_t *count,
699 int type)
700 {
701 unsigned int actual; /* this many things */
702 unsigned int maxthings;
703 unsigned int i;
704
705 vm_size_t size, size_needed;
706 void *addr;
707
708 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
709 return (KERN_INVALID_ARGUMENT);
710
711 size = 0;
712 addr = NULL;
713
714 for (;;) {
715 mutex_lock(&tasks_threads_lock);
716
717 if (type == THING_TASK)
718 maxthings = tasks_count;
719 else
720 maxthings = threads_count;
721
722 /* do we have the memory we need? */
723
724 size_needed = maxthings * sizeof (mach_port_t);
725 if (size_needed <= size)
726 break;
727
728 /* unlock and allocate more memory */
729 mutex_unlock(&tasks_threads_lock);
730
731 if (size != 0)
732 kfree(addr, size);
733
734 assert(size_needed > 0);
735 size = size_needed;
736
737 addr = kalloc(size);
738 if (addr == 0)
739 return (KERN_RESOURCE_SHORTAGE);
740 }
741
742 /* OK, have memory and the list locked */
743
744 actual = 0;
745 switch (type) {
746
747 case THING_TASK: {
748 task_t task, *task_list = (task_t *)addr;
749
750 for (task = (task_t)queue_first(&tasks);
751 !queue_end(&tasks, (queue_entry_t)task);
752 task = (task_t)queue_next(&task->tasks)) {
753 #if defined(SECURE_KERNEL)
754 if (task != kernel_task) {
755 #endif
756 task_reference_internal(task);
757 task_list[actual++] = task;
758 #if defined(SECURE_KERNEL)
759 }
760 #endif
761 }
762
763 break;
764 }
765
766 case THING_THREAD: {
767 thread_t thread, *thread_list = (thread_t *)addr;
768
769 for (thread = (thread_t)queue_first(&threads);
770 !queue_end(&threads, (queue_entry_t)thread);
771 thread = (thread_t)queue_next(&thread->threads)) {
772 thread_reference_internal(thread);
773 thread_list[actual++] = thread;
774 }
775
776 break;
777 }
778
779 }
780
781 mutex_unlock(&tasks_threads_lock);
782
783 if (actual < maxthings)
784 size_needed = actual * sizeof (mach_port_t);
785
786 if (actual == 0) {
787 /* no things, so return null pointer and deallocate memory */
788 *thing_list = NULL;
789 *count = 0;
790
791 if (size != 0)
792 kfree(addr, size);
793 }
794 else {
795 /* if we allocated too much, must copy */
796
797 if (size_needed < size) {
798 void *newaddr;
799
800 newaddr = kalloc(size_needed);
801 if (newaddr == 0) {
802 switch (type) {
803
804 case THING_TASK: {
805 task_t *task_list = (task_t *)addr;
806
807 for (i = 0; i < actual; i++)
808 task_deallocate(task_list[i]);
809 break;
810 }
811
812 case THING_THREAD: {
813 thread_t *thread_list = (thread_t *)addr;
814
815 for (i = 0; i < actual; i++)
816 thread_deallocate(thread_list[i]);
817 break;
818 }
819
820 }
821
822 kfree(addr, size);
823 return (KERN_RESOURCE_SHORTAGE);
824 }
825
826 bcopy((void *) addr, (void *) newaddr, size_needed);
827 kfree(addr, size);
828 addr = newaddr;
829 }
830
831 *thing_list = (mach_port_t *)addr;
832 *count = actual;
833
834 /* do the conversion that Mig should handle */
835
836 switch (type) {
837
838 case THING_TASK: {
839 task_t *task_list = (task_t *)addr;
840
841 for (i = 0; i < actual; i++)
842 (*thing_list)[i] = convert_task_to_port(task_list[i]);
843 break;
844 }
845
846 case THING_THREAD: {
847 thread_t *thread_list = (thread_t *)addr;
848
849 for (i = 0; i < actual; i++)
850 (*thing_list)[i] = convert_thread_to_port(thread_list[i]);
851 break;
852 }
853
854 }
855 }
856
857 return (KERN_SUCCESS);
858 }
859
860
861 /*
862 * processor_set_tasks:
863 *
864 * List all tasks in the processor set.
865 */
866 kern_return_t
867 processor_set_tasks(
868 processor_set_t pset,
869 task_array_t *task_list,
870 mach_msg_type_number_t *count)
871 {
872 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
873 }
874
875 /*
876 * processor_set_threads:
877 *
878 * List all threads in the processor set.
879 */
880 #if defined(SECURE_KERNEL)
881 kern_return_t
882 processor_set_threads(
883 __unused processor_set_t pset,
884 __unused thread_array_t *thread_list,
885 __unused mach_msg_type_number_t *count)
886 {
887 return KERN_FAILURE;
888 }
889 #else
890 kern_return_t
891 processor_set_threads(
892 processor_set_t pset,
893 thread_array_t *thread_list,
894 mach_msg_type_number_t *count)
895 {
896 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
897 }
898 #endif
899
900 /*
901 * processor_set_policy_control
902 *
903 * Controls the scheduling attributes governing the processor set.
904 * Allows control of enabled policies, and per-policy base and limit
905 * priorities.
906 */
907 kern_return_t
908 processor_set_policy_control(
909 __unused processor_set_t pset,
910 __unused int flavor,
911 __unused processor_set_info_t policy_info,
912 __unused mach_msg_type_number_t count,
913 __unused boolean_t change)
914 {
915 return (KERN_INVALID_ARGUMENT);
916 }
917
918 #undef pset_deallocate
919 void pset_deallocate(processor_set_t pset);
920 void
921 pset_deallocate(
922 __unused processor_set_t pset)
923 {
924 return;
925 }
926
927 #undef pset_reference
928 void pset_reference(processor_set_t pset);
929 void
930 pset_reference(
931 __unused processor_set_t pset)
932 {
933 return;
934 }