2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * processor.c: processor and processor_set manipulation routines.
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
79 #include <mach/mach_host_server.h>
84 struct processor_set default_pset
;
85 struct processor processor_array
[NCPUS
];
87 processor_t master_processor
;
88 processor_t processor_ptr
[NCPUS
];
92 processor_set_t pset
);
95 register processor_t pr
,
99 processor_set_t pset
);
101 kern_return_t
processor_set_base(
102 processor_set_t pset
,
107 kern_return_t
processor_set_limit(
108 processor_set_t pset
,
110 policy_limit_t limit
,
113 kern_return_t
processor_set_things(
114 processor_set_t pset
,
115 mach_port_t
**thing_list
,
116 mach_msg_type_number_t
*count
,
121 * Bootstrap the processor/pset system so the scheduler can run.
124 pset_sys_bootstrap(void)
128 pset_init(&default_pset
);
129 for (i
= 0; i
< NCPUS
; i
++) {
131 * Initialize processor data structures.
132 * Note that cpu_to_processor(i) is processor_ptr[i].
134 processor_ptr
[i
] = &processor_array
[i
];
135 processor_init(processor_ptr
[i
], i
);
137 master_processor
= cpu_to_processor(master_cpu
);
138 default_pset
.active
= TRUE
;
142 * Initialize the given processor_set structure.
146 register processor_set_t pset
)
150 /* setup run-queues */
151 simple_lock_init(&pset
->runq
.lock
, ETAP_THREAD_PSET_RUNQ
);
152 pset
->runq
.count
= 0;
153 for (i
= 0; i
< NRQBM
; i
++) {
154 pset
->runq
.bitmap
[i
] = 0;
156 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
157 pset
->runq
.highq
= IDLEPRI
;
158 for (i
= 0; i
< NRQS
; i
++) {
159 queue_init(&(pset
->runq
.queues
[i
]));
162 queue_init(&pset
->idle_queue
);
163 pset
->idle_count
= 0;
164 simple_lock_init(&pset
->idle_lock
, ETAP_THREAD_PSET_IDLE
);
165 pset
->mach_factor
= pset
->load_average
= 0;
166 pset
->sched_load
= 0;
167 queue_init(&pset
->processors
);
168 pset
->processor_count
= 0;
169 simple_lock_init(&pset
->processors_lock
, ETAP_THREAD_PSET
);
170 queue_init(&pset
->tasks
);
171 pset
->task_count
= 0;
172 queue_init(&pset
->threads
);
173 pset
->thread_count
= 0;
175 pset
->active
= FALSE
;
176 mutex_init(&pset
->lock
, ETAP_THREAD_PSET
);
177 pset
->pset_self
= IP_NULL
;
178 pset
->pset_name_self
= IP_NULL
;
179 pset
->set_quanta
= 1;
181 for (i
= 0; i
<= NCPUS
; i
++)
182 pset
->machine_quanta
[i
] = 1;
186 * Initialize the given processor structure for the processor in
187 * the slot specified by slot_num.
191 register processor_t pr
,
196 /* setup run-queues */
197 simple_lock_init(&pr
->runq
.lock
, ETAP_THREAD_PROC_RUNQ
);
199 for (i
= 0; i
< NRQBM
; i
++) {
200 pr
->runq
.bitmap
[i
] = 0;
202 setbit(MAXPRI
- IDLEPRI
, pr
->runq
.bitmap
);
203 pr
->runq
.highq
= IDLEPRI
;
204 for (i
= 0; i
< NRQS
; i
++) {
205 queue_init(&(pr
->runq
.queues
[i
]));
208 queue_init(&pr
->processor_queue
);
209 pr
->state
= PROCESSOR_OFF_LINE
;
210 pr
->next_thread
= THREAD_NULL
;
211 pr
->idle_thread
= THREAD_NULL
;
212 timer_call_setup(&pr
->quantum_timer
, thread_quantum_expire
, pr
);
213 pr
->slice_quanta
= 0;
214 pr
->processor_set
= PROCESSOR_SET_NULL
;
215 pr
->processor_set_next
= PROCESSOR_SET_NULL
;
216 queue_init(&pr
->processors
);
217 simple_lock_init(&pr
->lock
, ETAP_THREAD_PROC
);
218 pr
->processor_self
= IP_NULL
;
219 pr
->slot_num
= slot_num
;
223 * pset_remove_processor() removes a processor from a processor_set.
224 * It can only be called on the current processor. Caller must
225 * hold lock on current processor and processor set.
228 pset_remove_processor(
229 processor_set_t pset
,
230 processor_t processor
)
232 if (pset
!= processor
->processor_set
)
233 panic("pset_remove_processor: wrong pset");
235 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
236 processor
->processor_set
= PROCESSOR_SET_NULL
;
237 pset
->processor_count
--;
238 pset_quanta_set(pset
);
242 * pset_add_processor() adds a processor to a processor_set.
243 * It can only be called on the current processor. Caller must
244 * hold lock on curent processor and on pset. No reference counting on
245 * processors. Processor reference to pset is implicit.
249 processor_set_t pset
,
250 processor_t processor
)
252 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
253 processor
->processor_set
= pset
;
254 pset
->processor_count
++;
255 pset_quanta_set(pset
);
259 * pset_remove_task() removes a task from a processor_set.
260 * Caller must hold locks on pset and task. Pset reference count
261 * is not decremented; caller must explicitly pset_deallocate.
265 processor_set_t pset
,
268 if (pset
!= task
->processor_set
)
271 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
272 task
->processor_set
= PROCESSOR_SET_NULL
;
277 * pset_add_task() adds a task to a processor_set.
278 * Caller must hold locks on pset and task. Pset references to
279 * tasks are implicit.
283 processor_set_t pset
,
286 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
287 task
->processor_set
= pset
;
293 * pset_remove_thread() removes a thread from a processor_set.
294 * Caller must hold locks on pset and thread. Pset reference count
295 * is not decremented; caller must explicitly pset_deallocate.
299 processor_set_t pset
,
302 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
303 thread
->processor_set
= PROCESSOR_SET_NULL
;
304 pset
->thread_count
--;
308 * pset_add_thread() adds a thread to a processor_set.
309 * Caller must hold locks on pset and thread. Pset references to
310 * threads are implicit.
314 processor_set_t pset
,
317 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
318 thread
->processor_set
= pset
;
319 pset
->thread_count
++;
324 * thread_change_psets() changes the pset of a thread. Caller must
325 * hold locks on both psets and thread. The old pset must be
326 * explicitly pset_deallocat()'ed by caller.
331 processor_set_t old_pset
,
332 processor_set_t new_pset
)
334 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
335 old_pset
->thread_count
--;
336 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
337 thread
->processor_set
= new_pset
;
338 new_pset
->thread_count
++;
339 new_pset
->ref_count
++;
345 * Remove one reference to the processor set. Destroy processor_set
346 * if this was the last reference.
350 processor_set_t pset
)
352 if (pset
== PROCESSOR_SET_NULL
)
356 if (--pset
->ref_count
> 0) {
361 panic("pset_deallocate: default_pset destroyed");
367 * Add one reference to the processor set.
371 processor_set_t pset
)
380 processor_info_count(
381 processor_flavor_t flavor
,
382 mach_msg_type_number_t
*count
)
387 case PROCESSOR_BASIC_INFO
:
388 *count
= PROCESSOR_BASIC_INFO_COUNT
;
390 case PROCESSOR_CPU_LOAD_INFO
:
391 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
394 kr
= cpu_info_count(flavor
, count
);
402 register processor_t processor
,
403 processor_flavor_t flavor
,
405 processor_info_t info
,
406 mach_msg_type_number_t
*count
)
408 register int i
, slot_num
, state
;
409 register processor_basic_info_t basic_info
;
410 register processor_cpu_load_info_t cpu_load_info
;
413 if (processor
== PROCESSOR_NULL
)
414 return(KERN_INVALID_ARGUMENT
);
416 slot_num
= processor
->slot_num
;
420 case PROCESSOR_BASIC_INFO
:
422 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
423 return(KERN_FAILURE
);
425 basic_info
= (processor_basic_info_t
) info
;
426 basic_info
->cpu_type
= machine_slot
[slot_num
].cpu_type
;
427 basic_info
->cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
428 state
= processor
->state
;
429 if (state
== PROCESSOR_OFF_LINE
)
430 basic_info
->running
= FALSE
;
432 basic_info
->running
= TRUE
;
433 basic_info
->slot_num
= slot_num
;
434 if (processor
== master_processor
)
435 basic_info
->is_master
= TRUE
;
437 basic_info
->is_master
= FALSE
;
439 *count
= PROCESSOR_BASIC_INFO_COUNT
;
441 return(KERN_SUCCESS
);
443 case PROCESSOR_CPU_LOAD_INFO
:
445 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
446 return(KERN_FAILURE
);
448 cpu_load_info
= (processor_cpu_load_info_t
) info
;
449 for (i
=0;i
<CPU_STATE_MAX
;i
++)
450 cpu_load_info
->cpu_ticks
[i
] = machine_slot
[slot_num
].cpu_ticks
[i
];
452 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
454 return(KERN_SUCCESS
);
458 kr
=cpu_info(flavor
, slot_num
, info
, count
);
459 if (kr
== KERN_SUCCESS
)
468 processor_t processor
)
474 if (processor
== PROCESSOR_NULL
)
475 return(KERN_INVALID_ARGUMENT
);
477 if (processor
== master_processor
) {
478 thread_bind(current_thread(), processor
);
479 thread_block((void (*)(void)) 0);
480 kr
= cpu_start(processor
->slot_num
);
481 thread_bind(current_thread(), PROCESSOR_NULL
);
487 processor_lock(processor
);
489 state
= processor
->state
;
490 if (state
!= PROCESSOR_OFF_LINE
) {
491 processor_unlock(processor
);
493 return(KERN_FAILURE
);
495 processor
->state
= PROCESSOR_START
;
496 processor_unlock(processor
);
499 if (processor
->next_thread
== THREAD_NULL
) {
501 extern void start_cpu_thread(void);
503 thread
= kernel_thread_with_priority(
504 kernel_task
, MAXPRI_KERNEL
,
505 start_cpu_thread
, TRUE
, FALSE
);
509 thread_bind_locked(thread
, processor
);
510 thread_go_locked(thread
, THREAD_AWAKENED
);
511 (void)rem_runq(thread
);
512 processor
->next_thread
= thread
;
513 thread_unlock(thread
);
517 kr
= cpu_start(processor
->slot_num
);
519 if (kr
!= KERN_SUCCESS
) {
521 processor_lock(processor
);
522 processor
->state
= PROCESSOR_OFF_LINE
;
523 processor_unlock(processor
);
532 processor_t processor
)
534 if (processor
== PROCESSOR_NULL
)
535 return(KERN_INVALID_ARGUMENT
);
537 return(processor_shutdown(processor
));
542 processor_t processor
,
543 processor_info_t info
,
544 mach_msg_type_number_t count
)
546 if (processor
== PROCESSOR_NULL
)
547 return(KERN_INVALID_ARGUMENT
);
549 return(cpu_control(processor
->slot_num
, info
, count
));
553 * Precalculate the appropriate timesharing quanta based on load. The
554 * index into machine_quanta is the number of threads on the
555 * processor set queue. It is limited to the number of processors in
561 processor_set_t pset
)
563 register int i
, ncpus
;
565 ncpus
= pset
->processor_count
;
567 for (i
=1; i
<= ncpus
; i
++)
568 pset
->machine_quanta
[i
] = (ncpus
+ (i
/ 2)) / i
;
570 pset
->machine_quanta
[0] = pset
->machine_quanta
[1];
572 i
= (pset
->runq
.count
> ncpus
)? ncpus
: pset
->runq
.count
;
573 pset
->set_quanta
= pset
->machine_quanta
[i
];
577 processor_set_create(
579 processor_set_t
*new_set
,
580 processor_set_t
*new_name
)
583 host
++; new_set
++; new_name
++;
585 return(KERN_FAILURE
);
589 processor_set_destroy(
590 processor_set_t pset
)
595 return(KERN_FAILURE
);
599 processor_get_assignment(
600 processor_t processor
,
601 processor_set_t
*pset
)
605 state
= processor
->state
;
606 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
607 return(KERN_FAILURE
);
609 *pset
= processor
->processor_set
;
610 pset_reference(*pset
);
611 return(KERN_SUCCESS
);
616 processor_set_t pset
,
619 processor_set_info_t info
,
620 mach_msg_type_number_t
*count
)
622 if (pset
== PROCESSOR_SET_NULL
)
623 return(KERN_INVALID_ARGUMENT
);
625 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
626 register processor_set_basic_info_t basic_info
;
628 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
629 return(KERN_FAILURE
);
631 basic_info
= (processor_set_basic_info_t
) info
;
632 basic_info
->processor_count
= pset
->processor_count
;
633 basic_info
->default_policy
= POLICY_TIMESHARE
;
635 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
637 return(KERN_SUCCESS
);
639 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
640 register policy_timeshare_base_t ts_base
;
642 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
643 return(KERN_FAILURE
);
645 ts_base
= (policy_timeshare_base_t
) info
;
646 ts_base
->base_priority
= BASEPRI_DEFAULT
;
648 *count
= POLICY_TIMESHARE_BASE_COUNT
;
650 return(KERN_SUCCESS
);
652 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
653 register policy_fifo_base_t fifo_base
;
655 if (*count
< POLICY_FIFO_BASE_COUNT
)
656 return(KERN_FAILURE
);
658 fifo_base
= (policy_fifo_base_t
) info
;
659 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
661 *count
= POLICY_FIFO_BASE_COUNT
;
663 return(KERN_SUCCESS
);
665 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
666 register policy_rr_base_t rr_base
;
668 if (*count
< POLICY_RR_BASE_COUNT
)
669 return(KERN_FAILURE
);
671 rr_base
= (policy_rr_base_t
) info
;
672 rr_base
->base_priority
= BASEPRI_DEFAULT
;
673 rr_base
->quantum
= 1;
675 *count
= POLICY_RR_BASE_COUNT
;
677 return(KERN_SUCCESS
);
679 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
680 register policy_timeshare_limit_t ts_limit
;
682 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
683 return(KERN_FAILURE
);
685 ts_limit
= (policy_timeshare_limit_t
) info
;
686 ts_limit
->max_priority
= MAXPRI_STANDARD
;
688 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
690 return(KERN_SUCCESS
);
692 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
693 register policy_fifo_limit_t fifo_limit
;
695 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
696 return(KERN_FAILURE
);
698 fifo_limit
= (policy_fifo_limit_t
) info
;
699 fifo_limit
->max_priority
= MAXPRI_STANDARD
;
701 *count
= POLICY_FIFO_LIMIT_COUNT
;
703 return(KERN_SUCCESS
);
705 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
706 register policy_rr_limit_t rr_limit
;
708 if (*count
< POLICY_RR_LIMIT_COUNT
)
709 return(KERN_FAILURE
);
711 rr_limit
= (policy_rr_limit_t
) info
;
712 rr_limit
->max_priority
= MAXPRI_STANDARD
;
714 *count
= POLICY_RR_LIMIT_COUNT
;
716 return(KERN_SUCCESS
);
718 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
719 register int *enabled
;
721 if (*count
< (sizeof(*enabled
)/sizeof(int)))
722 return(KERN_FAILURE
);
724 enabled
= (int *) info
;
725 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
727 *count
= sizeof(*enabled
)/sizeof(int);
729 return(KERN_SUCCESS
);
734 return(KERN_INVALID_ARGUMENT
);
738 * processor_set_statistics
740 * Returns scheduling statistics for a processor set.
743 processor_set_statistics(
744 processor_set_t pset
,
746 processor_set_info_t info
,
747 mach_msg_type_number_t
*count
)
749 if (pset
== PROCESSOR_SET_NULL
)
750 return (KERN_INVALID_PROCESSOR_SET
);
752 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
753 register processor_set_load_info_t load_info
;
755 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
756 return(KERN_FAILURE
);
758 load_info
= (processor_set_load_info_t
) info
;
761 load_info
->task_count
= pset
->task_count
;
762 load_info
->thread_count
= pset
->thread_count
;
763 simple_lock(&pset
->processors_lock
);
764 load_info
->mach_factor
= pset
->mach_factor
;
765 load_info
->load_average
= pset
->load_average
;
766 simple_unlock(&pset
->processors_lock
);
769 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
770 return(KERN_SUCCESS
);
773 return(KERN_INVALID_ARGUMENT
);
777 * processor_set_max_priority:
779 * Specify max priority permitted on processor set. This affects
780 * newly created and assigned threads. Optionally change existing
784 processor_set_max_priority(
785 processor_set_t pset
,
787 boolean_t change_threads
)
789 return (KERN_INVALID_ARGUMENT
);
793 * processor_set_policy_enable:
795 * Allow indicated policy on processor set.
799 processor_set_policy_enable(
800 processor_set_t pset
,
803 return (KERN_INVALID_ARGUMENT
);
807 * processor_set_policy_disable:
809 * Forbid indicated policy on processor set. Time sharing cannot
813 processor_set_policy_disable(
814 processor_set_t pset
,
816 boolean_t change_threads
)
818 return (KERN_INVALID_ARGUMENT
);
822 #define THING_THREAD 1
825 * processor_set_things:
827 * Common internals for processor_set_{threads,tasks}
830 processor_set_things(
831 processor_set_t pset
,
832 mach_port_t
**thing_list
,
833 mach_msg_type_number_t
*count
,
836 unsigned int actual
; /* this many things */
839 vm_size_t size
, size_needed
;
842 if (pset
== PROCESSOR_SET_NULL
)
843 return KERN_INVALID_ARGUMENT
;
854 if (type
== THING_TASK
)
855 actual
= pset
->task_count
;
857 actual
= pset
->thread_count
;
859 /* do we have the memory we need? */
861 size_needed
= actual
* sizeof(mach_port_t
);
862 if (size_needed
<= size
)
865 /* unlock the pset and allocate more memory */
871 assert(size_needed
> 0);
876 return KERN_RESOURCE_SHORTAGE
;
879 /* OK, have memory and the processor_set is locked & active */
883 task_t
*tasks
= (task_t
*) addr
;
886 for (i
= 0, task
= (task_t
) queue_first(&pset
->tasks
);
888 i
++, task
= (task_t
) queue_next(&task
->pset_tasks
)) {
889 /* take ref for convert_task_to_port */
890 task_reference(task
);
893 assert(queue_end(&pset
->tasks
, (queue_entry_t
) task
));
898 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
900 thread_act_t thr_act
;
903 list
= &pset
->threads
;
904 thread
= (thread_t
) queue_first(list
);
906 while (i
< actual
&& !queue_end(list
, (queue_entry_t
)thread
)) {
907 thr_act
= thread_lock_act(thread
);
908 if (thr_act
&& thr_act
->ref_count
> 0) {
909 /* take ref for convert_act_to_port */
910 act_locked_act_reference(thr_act
);
911 thr_acts
[i
] = thr_act
;
914 thread_unlock_act(thread
);
915 thread
= (thread_t
) queue_next(&thread
->pset_threads
);
919 size_needed
= actual
* sizeof(mach_port_t
);
925 /* can unlock processor set now that we have the task/thread refs */
929 /* no things, so return null pointer and deallocate memory */
936 /* if we allocated too much, must copy */
938 if (size_needed
< size
) {
941 newaddr
= kalloc(size_needed
);
945 task_t
*tasks
= (task_t
*) addr
;
947 for (i
= 0; i
< actual
; i
++)
948 task_deallocate(tasks
[i
]);
953 thread_t
*threads
= (thread_t
*) addr
;
955 for (i
= 0; i
< actual
; i
++)
956 thread_deallocate(threads
[i
]);
961 return KERN_RESOURCE_SHORTAGE
;
964 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
969 *thing_list
= (mach_port_t
*) addr
;
972 /* do the conversion that Mig should handle */
976 task_t
*tasks
= (task_t
*) addr
;
978 for (i
= 0; i
< actual
; i
++)
979 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
984 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
986 for (i
= 0; i
< actual
; i
++)
987 (*thing_list
)[i
] = convert_act_to_port(thr_acts
[i
]);
993 return(KERN_SUCCESS
);
998 * processor_set_tasks:
1000 * List all tasks in the processor set.
1003 processor_set_tasks(
1004 processor_set_t pset
,
1005 task_array_t
*task_list
,
1006 mach_msg_type_number_t
*count
)
1008 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1012 * processor_set_threads:
1014 * List all threads in the processor set.
1017 processor_set_threads(
1018 processor_set_t pset
,
1019 thread_array_t
*thread_list
,
1020 mach_msg_type_number_t
*count
)
1022 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1026 * processor_set_base:
1028 * Specify per-policy base priority for a processor set. Set processor
1029 * set default policy to the given policy. This affects newly created
1030 * and assigned threads. Optionally change existing ones.
1034 processor_set_t pset
,
1039 return (KERN_INVALID_ARGUMENT
);
1043 * processor_set_limit:
1045 * Specify per-policy limits for a processor set. This affects
1046 * newly created and assigned threads. Optionally change existing
1050 processor_set_limit(
1051 processor_set_t pset
,
1053 policy_limit_t limit
,
1056 return (KERN_POLICY_LIMIT
);
1060 * processor_set_policy_control
1062 * Controls the scheduling attributes governing the processor set.
1063 * Allows control of enabled policies, and per-policy base and limit
1067 processor_set_policy_control(
1068 processor_set_t pset
,
1070 processor_set_info_t policy_info
,
1071 mach_msg_type_number_t count
,
1074 return (KERN_INVALID_ARGUMENT
);