2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * processor.c: processor and processor_set manipulation routines.
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
79 #include <mach/mach_host_server.h>
84 struct processor_set default_pset
;
85 struct processor processor_array
[NCPUS
];
89 processor_t master_processor
;
90 processor_t processor_ptr
[NCPUS
];
94 processor_set_t pset
);
97 register processor_t pr
,
100 void pset_quanta_setup(
101 processor_set_t pset
);
103 kern_return_t
processor_set_base(
104 processor_set_t pset
,
109 kern_return_t
processor_set_limit(
110 processor_set_t pset
,
112 policy_limit_t limit
,
115 kern_return_t
processor_set_things(
116 processor_set_t pset
,
117 mach_port_t
**thing_list
,
118 mach_msg_type_number_t
*count
,
123 * Bootstrap the processor/pset system so the scheduler can run.
126 pset_sys_bootstrap(void)
130 pset_init(&default_pset
);
132 for (i
= 0; i
< NCPUS
; i
++) {
134 * Initialize processor data structures.
135 * Note that cpu_to_processor(i) is processor_ptr[i].
137 processor_ptr
[i
] = &processor_array
[i
];
138 processor_init(processor_ptr
[i
], i
);
141 master_processor
= cpu_to_processor(master_cpu
);
143 default_pset
.active
= TRUE
;
147 * Initialize the given processor_set structure.
151 register processor_set_t pset
)
155 /* setup run queue */
156 pset
->runq
.highq
= IDLEPRI
;
157 for (i
= 0; i
< NRQBM
; i
++)
158 pset
->runq
.bitmap
[i
] = 0;
159 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
160 pset
->runq
.urgency
= pset
->runq
.count
= 0;
161 for (i
= 0; i
< NRQS
; i
++)
162 queue_init(&pset
->runq
.queues
[i
]);
164 queue_init(&pset
->idle_queue
);
165 pset
->idle_count
= 0;
166 queue_init(&pset
->active_queue
);
167 simple_lock_init(&pset
->sched_lock
, ETAP_THREAD_PSET_IDLE
);
168 pset
->run_count
= pset
->share_count
= 0;
169 pset
->mach_factor
= pset
->load_average
= 0;
170 pset
->sched_load
= 0;
171 queue_init(&pset
->processors
);
172 pset
->processor_count
= 0;
173 queue_init(&pset
->tasks
);
174 pset
->task_count
= 0;
175 queue_init(&pset
->threads
);
176 pset
->thread_count
= 0;
178 pset
->active
= FALSE
;
179 mutex_init(&pset
->lock
, ETAP_THREAD_PSET
);
180 pset
->pset_self
= IP_NULL
;
181 pset
->pset_name_self
= IP_NULL
;
182 pset
->timeshare_quanta
= 1;
184 for (i
= 0; i
<= NCPUS
; i
++)
185 pset
->quantum_factors
[i
] = 1;
189 * Initialize the given processor structure for the processor in
190 * the slot specified by slot_num.
194 register processor_t p
,
199 /* setup run queue */
200 p
->runq
.highq
= IDLEPRI
;
201 for (i
= 0; i
< NRQBM
; i
++)
202 p
->runq
.bitmap
[i
] = 0;
203 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
204 p
->runq
.urgency
= p
->runq
.count
= 0;
205 for (i
= 0; i
< NRQS
; i
++)
206 queue_init(&p
->runq
.queues
[i
]);
208 p
->state
= PROCESSOR_OFF_LINE
;
209 p
->active_thread
= p
->next_thread
= p
->idle_thread
= THREAD_NULL
;
210 p
->processor_set
= PROCESSOR_SET_NULL
;
211 p
->current_pri
= MINPRI
;
212 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
214 p
->deadline
= UINT64_MAX
;
215 simple_lock_init(&p
->lock
, ETAP_THREAD_PROC
);
216 p
->processor_self
= IP_NULL
;
217 p
->slot_num
= slot_num
;
223 * Remove one reference to the processor set. Destroy processor_set
224 * if this was the last reference.
228 processor_set_t pset
)
230 if (pset
== PROCESSOR_SET_NULL
)
233 assert(pset
== &default_pset
);
240 * Add one reference to the processor set.
244 processor_set_t pset
)
246 assert(pset
== &default_pset
);
249 #define pset_reference_locked(pset) assert(pset == &default_pset)
252 * pset_remove_processor() removes a processor from a processor_set.
253 * It can only be called on the current processor. Caller must
254 * hold lock on current processor and processor set.
257 pset_remove_processor(
258 processor_set_t pset
,
259 processor_t processor
)
261 if (pset
!= processor
->processor_set
)
262 panic("pset_remove_processor: wrong pset");
264 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
265 processor
->processor_set
= PROCESSOR_SET_NULL
;
266 pset
->processor_count
--;
267 pset_quanta_setup(pset
);
271 * pset_add_processor() adds a processor to a processor_set.
272 * It can only be called on the current processor. Caller must
273 * hold lock on curent processor and on pset. No reference counting on
274 * processors. Processor reference to pset is implicit.
278 processor_set_t pset
,
279 processor_t processor
)
281 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
282 processor
->processor_set
= pset
;
283 pset
->processor_count
++;
284 pset_quanta_setup(pset
);
288 * pset_remove_task() removes a task from a processor_set.
289 * Caller must hold locks on pset and task (unless task has
290 * no references left, in which case just the pset lock is
291 * needed). Pset reference count is not decremented;
292 * caller must explicitly pset_deallocate.
296 processor_set_t pset
,
299 if (pset
!= task
->processor_set
)
302 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
303 task
->processor_set
= PROCESSOR_SET_NULL
;
308 * pset_add_task() adds a task to a processor_set.
309 * Caller must hold locks on pset and task. Pset references to
310 * tasks are implicit.
314 processor_set_t pset
,
317 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
318 task
->processor_set
= pset
;
320 pset_reference_locked(pset
);
324 * pset_remove_thread() removes a thread from a processor_set.
325 * Caller must hold locks on pset and thread (but only if thread
326 * has outstanding references that could be used to lookup the pset).
327 * The pset reference count is not decremented; caller must explicitly
332 processor_set_t pset
,
335 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
336 thread
->processor_set
= PROCESSOR_SET_NULL
;
337 pset
->thread_count
--;
341 * pset_add_thread() adds a thread to a processor_set.
342 * Caller must hold locks on pset and thread. Pset references to
343 * threads are implicit.
347 processor_set_t pset
,
350 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
351 thread
->processor_set
= pset
;
352 pset
->thread_count
++;
353 pset_reference_locked(pset
);
357 * thread_change_psets() changes the pset of a thread. Caller must
358 * hold locks on both psets and thread. The old pset must be
359 * explicitly pset_deallocat()'ed by caller.
364 processor_set_t old_pset
,
365 processor_set_t new_pset
)
367 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
368 old_pset
->thread_count
--;
369 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
370 thread
->processor_set
= new_pset
;
371 new_pset
->thread_count
++;
372 pset_reference_locked(new_pset
);
377 processor_info_count(
378 processor_flavor_t flavor
,
379 mach_msg_type_number_t
*count
)
384 case PROCESSOR_BASIC_INFO
:
385 *count
= PROCESSOR_BASIC_INFO_COUNT
;
387 case PROCESSOR_CPU_LOAD_INFO
:
388 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
391 kr
= cpu_info_count(flavor
, count
);
399 register processor_t processor
,
400 processor_flavor_t flavor
,
402 processor_info_t info
,
403 mach_msg_type_number_t
*count
)
405 register int i
, slot_num
, state
;
406 register processor_basic_info_t basic_info
;
407 register processor_cpu_load_info_t cpu_load_info
;
410 if (processor
== PROCESSOR_NULL
)
411 return(KERN_INVALID_ARGUMENT
);
413 slot_num
= processor
->slot_num
;
417 case PROCESSOR_BASIC_INFO
:
419 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
420 return(KERN_FAILURE
);
422 basic_info
= (processor_basic_info_t
) info
;
423 basic_info
->cpu_type
= machine_slot
[slot_num
].cpu_type
;
424 basic_info
->cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
425 state
= processor
->state
;
426 if (state
== PROCESSOR_OFF_LINE
)
427 basic_info
->running
= FALSE
;
429 basic_info
->running
= TRUE
;
430 basic_info
->slot_num
= slot_num
;
431 if (processor
== master_processor
)
432 basic_info
->is_master
= TRUE
;
434 basic_info
->is_master
= FALSE
;
436 *count
= PROCESSOR_BASIC_INFO_COUNT
;
438 return(KERN_SUCCESS
);
440 case PROCESSOR_CPU_LOAD_INFO
:
442 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
443 return(KERN_FAILURE
);
445 cpu_load_info
= (processor_cpu_load_info_t
) info
;
446 for (i
=0;i
<CPU_STATE_MAX
;i
++)
447 cpu_load_info
->cpu_ticks
[i
] = machine_slot
[slot_num
].cpu_ticks
[i
];
449 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
451 return(KERN_SUCCESS
);
455 kr
=cpu_info(flavor
, slot_num
, info
, count
);
456 if (kr
== KERN_SUCCESS
)
465 processor_t processor
)
467 kern_return_t result
;
470 if (processor
== PROCESSOR_NULL
)
471 return(KERN_INVALID_ARGUMENT
);
473 if (processor
== master_processor
) {
476 prev
= thread_bind(current_thread(), processor
);
477 thread_block(THREAD_CONTINUE_NULL
);
479 result
= cpu_start(processor
->slot_num
);
481 thread_bind(current_thread(), prev
);
487 processor_lock(processor
);
488 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
489 processor_unlock(processor
);
492 return (KERN_FAILURE
);
495 processor
->state
= PROCESSOR_START
;
496 processor_unlock(processor
);
499 if (processor
->next_thread
== THREAD_NULL
) {
501 extern void start_cpu_thread(void);
503 thread
= kernel_thread_create(start_cpu_thread
, MAXPRI_KERNEL
);
507 thread
->bound_processor
= processor
;
508 processor
->next_thread
= thread
;
509 thread
->state
= TH_RUN
;
510 pset_run_incr(thread
->processor_set
);
511 thread_unlock(thread
);
515 if (processor
->processor_self
== IP_NULL
)
516 ipc_processor_init(processor
);
518 result
= cpu_start(processor
->slot_num
);
519 if (result
!= KERN_SUCCESS
) {
521 processor_lock(processor
);
522 processor
->state
= PROCESSOR_OFF_LINE
;
523 processor_unlock(processor
);
529 ipc_processor_enable(processor
);
531 return (KERN_SUCCESS
);
536 processor_t processor
)
538 if (processor
== PROCESSOR_NULL
)
539 return(KERN_INVALID_ARGUMENT
);
541 return(processor_shutdown(processor
));
546 processor_t processor
,
547 processor_info_t info
,
548 mach_msg_type_number_t count
)
550 if (processor
== PROCESSOR_NULL
)
551 return(KERN_INVALID_ARGUMENT
);
553 return(cpu_control(processor
->slot_num
, info
, count
));
557 * Precalculate the appropriate timesharing quanta based on load. The
558 * index into quantum_factors[] is the number of threads on the
559 * processor set queue. It is limited to the number of processors in
565 processor_set_t pset
)
567 register int i
, count
= pset
->processor_count
;
569 for (i
= 1; i
<= count
; i
++)
570 pset
->quantum_factors
[i
] = (count
+ (i
/ 2)) / i
;
572 pset
->quantum_factors
[0] = pset
->quantum_factors
[1];
574 timeshare_quanta_update(pset
);
578 processor_set_create(
580 processor_set_t
*new_set
,
581 processor_set_t
*new_name
)
584 host
++; new_set
++; new_name
++;
586 return(KERN_FAILURE
);
590 processor_set_destroy(
591 processor_set_t pset
)
596 return(KERN_FAILURE
);
600 processor_get_assignment(
601 processor_t processor
,
602 processor_set_t
*pset
)
606 state
= processor
->state
;
607 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
608 return(KERN_FAILURE
);
610 *pset
= processor
->processor_set
;
611 pset_reference(*pset
);
612 return(KERN_SUCCESS
);
617 processor_set_t pset
,
620 processor_set_info_t info
,
621 mach_msg_type_number_t
*count
)
623 if (pset
== PROCESSOR_SET_NULL
)
624 return(KERN_INVALID_ARGUMENT
);
626 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
627 register processor_set_basic_info_t basic_info
;
629 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
630 return(KERN_FAILURE
);
632 basic_info
= (processor_set_basic_info_t
) info
;
633 basic_info
->processor_count
= pset
->processor_count
;
634 basic_info
->default_policy
= POLICY_TIMESHARE
;
636 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
638 return(KERN_SUCCESS
);
640 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
641 register policy_timeshare_base_t ts_base
;
643 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
644 return(KERN_FAILURE
);
646 ts_base
= (policy_timeshare_base_t
) info
;
647 ts_base
->base_priority
= BASEPRI_DEFAULT
;
649 *count
= POLICY_TIMESHARE_BASE_COUNT
;
651 return(KERN_SUCCESS
);
653 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
654 register policy_fifo_base_t fifo_base
;
656 if (*count
< POLICY_FIFO_BASE_COUNT
)
657 return(KERN_FAILURE
);
659 fifo_base
= (policy_fifo_base_t
) info
;
660 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
662 *count
= POLICY_FIFO_BASE_COUNT
;
664 return(KERN_SUCCESS
);
666 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
667 register policy_rr_base_t rr_base
;
669 if (*count
< POLICY_RR_BASE_COUNT
)
670 return(KERN_FAILURE
);
672 rr_base
= (policy_rr_base_t
) info
;
673 rr_base
->base_priority
= BASEPRI_DEFAULT
;
674 rr_base
->quantum
= 1;
676 *count
= POLICY_RR_BASE_COUNT
;
678 return(KERN_SUCCESS
);
680 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
681 register policy_timeshare_limit_t ts_limit
;
683 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
684 return(KERN_FAILURE
);
686 ts_limit
= (policy_timeshare_limit_t
) info
;
687 ts_limit
->max_priority
= MAXPRI_STANDARD
;
689 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
691 return(KERN_SUCCESS
);
693 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
694 register policy_fifo_limit_t fifo_limit
;
696 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
697 return(KERN_FAILURE
);
699 fifo_limit
= (policy_fifo_limit_t
) info
;
700 fifo_limit
->max_priority
= MAXPRI_STANDARD
;
702 *count
= POLICY_FIFO_LIMIT_COUNT
;
704 return(KERN_SUCCESS
);
706 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
707 register policy_rr_limit_t rr_limit
;
709 if (*count
< POLICY_RR_LIMIT_COUNT
)
710 return(KERN_FAILURE
);
712 rr_limit
= (policy_rr_limit_t
) info
;
713 rr_limit
->max_priority
= MAXPRI_STANDARD
;
715 *count
= POLICY_RR_LIMIT_COUNT
;
717 return(KERN_SUCCESS
);
719 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
720 register int *enabled
;
722 if (*count
< (sizeof(*enabled
)/sizeof(int)))
723 return(KERN_FAILURE
);
725 enabled
= (int *) info
;
726 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
728 *count
= sizeof(*enabled
)/sizeof(int);
730 return(KERN_SUCCESS
);
735 return(KERN_INVALID_ARGUMENT
);
739 * processor_set_statistics
741 * Returns scheduling statistics for a processor set.
744 processor_set_statistics(
745 processor_set_t pset
,
747 processor_set_info_t info
,
748 mach_msg_type_number_t
*count
)
750 if (pset
== PROCESSOR_SET_NULL
)
751 return (KERN_INVALID_PROCESSOR_SET
);
753 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
754 register processor_set_load_info_t load_info
;
756 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
757 return(KERN_FAILURE
);
759 load_info
= (processor_set_load_info_t
) info
;
762 load_info
->task_count
= pset
->task_count
;
763 load_info
->thread_count
= pset
->thread_count
;
764 load_info
->mach_factor
= pset
->mach_factor
;
765 load_info
->load_average
= pset
->load_average
;
768 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
769 return(KERN_SUCCESS
);
772 return(KERN_INVALID_ARGUMENT
);
776 * processor_set_max_priority:
778 * Specify max priority permitted on processor set. This affects
779 * newly created and assigned threads. Optionally change existing
783 processor_set_max_priority(
784 processor_set_t pset
,
786 boolean_t change_threads
)
788 return (KERN_INVALID_ARGUMENT
);
792 * processor_set_policy_enable:
794 * Allow indicated policy on processor set.
798 processor_set_policy_enable(
799 processor_set_t pset
,
802 return (KERN_INVALID_ARGUMENT
);
806 * processor_set_policy_disable:
808 * Forbid indicated policy on processor set. Time sharing cannot
812 processor_set_policy_disable(
813 processor_set_t pset
,
815 boolean_t change_threads
)
817 return (KERN_INVALID_ARGUMENT
);
821 #define THING_THREAD 1
824 * processor_set_things:
826 * Common internals for processor_set_{threads,tasks}
829 processor_set_things(
830 processor_set_t pset
,
831 mach_port_t
**thing_list
,
832 mach_msg_type_number_t
*count
,
835 unsigned int actual
; /* this many things */
838 vm_size_t size
, size_needed
;
841 if (pset
== PROCESSOR_SET_NULL
)
842 return KERN_INVALID_ARGUMENT
;
853 if (type
== THING_TASK
)
854 actual
= pset
->task_count
;
856 actual
= pset
->thread_count
;
858 /* do we have the memory we need? */
860 size_needed
= actual
* sizeof(mach_port_t
);
861 if (size_needed
<= size
)
864 /* unlock the pset and allocate more memory */
870 assert(size_needed
> 0);
875 return KERN_RESOURCE_SHORTAGE
;
878 /* OK, have memory and the processor_set is locked & active */
882 task_t
*tasks
= (task_t
*) addr
;
885 for (i
= 0, task
= (task_t
) queue_first(&pset
->tasks
);
886 !queue_end(&pset
->tasks
, (queue_entry_t
) task
);
887 task
= (task_t
) queue_next(&task
->pset_tasks
)) {
890 if (task
->ref_count
> 0) {
891 /* take ref for convert_task_to_port */
892 task_reference_locked(task
);
901 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
903 thread_act_t thr_act
;
905 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
906 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
907 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
909 thr_act
= thread_lock_act(thread
);
910 if (thr_act
&& thr_act
->act_ref_count
> 0) {
911 /* take ref for convert_act_to_port */
912 act_reference_locked(thr_act
);
913 thr_acts
[i
++] = thr_act
;
915 thread_unlock_act(thread
);
921 /* can unlock processor set now that we have the task/thread refs */
926 size_needed
= actual
* sizeof(mach_port_t
);
931 /* no things, so return null pointer and deallocate memory */
938 /* if we allocated too much, must copy */
940 if (size_needed
< size
) {
943 newaddr
= kalloc(size_needed
);
947 task_t
*tasks
= (task_t
*) addr
;
949 for (i
= 0; i
< actual
; i
++)
950 task_deallocate(tasks
[i
]);
955 thread_act_t
*acts
= (thread_act_t
*) addr
;
957 for (i
= 0; i
< actual
; i
++)
958 act_deallocate(acts
[i
]);
963 return KERN_RESOURCE_SHORTAGE
;
966 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
971 *thing_list
= (mach_port_t
*) addr
;
974 /* do the conversion that Mig should handle */
978 task_t
*tasks
= (task_t
*) addr
;
980 for (i
= 0; i
< actual
; i
++)
981 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
986 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
988 for (i
= 0; i
< actual
; i
++)
989 (*thing_list
)[i
] = convert_act_to_port(thr_acts
[i
]);
995 return(KERN_SUCCESS
);
1000 * processor_set_tasks:
1002 * List all tasks in the processor set.
1005 processor_set_tasks(
1006 processor_set_t pset
,
1007 task_array_t
*task_list
,
1008 mach_msg_type_number_t
*count
)
1010 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1014 * processor_set_threads:
1016 * List all threads in the processor set.
1019 processor_set_threads(
1020 processor_set_t pset
,
1021 thread_array_t
*thread_list
,
1022 mach_msg_type_number_t
*count
)
1024 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1028 * processor_set_base:
1030 * Specify per-policy base priority for a processor set. Set processor
1031 * set default policy to the given policy. This affects newly created
1032 * and assigned threads. Optionally change existing ones.
1036 processor_set_t pset
,
1041 return (KERN_INVALID_ARGUMENT
);
1045 * processor_set_limit:
1047 * Specify per-policy limits for a processor set. This affects
1048 * newly created and assigned threads. Optionally change existing
1052 processor_set_limit(
1053 processor_set_t pset
,
1055 policy_limit_t limit
,
1058 return (KERN_POLICY_LIMIT
);
1062 * processor_set_policy_control
1064 * Controls the scheduling attributes governing the processor set.
1065 * Allows control of enabled policies, and per-policy base and limit
1069 processor_set_policy_control(
1070 processor_set_t pset
,
1072 processor_set_info_t policy_info
,
1073 mach_msg_type_number_t count
,
1076 return (KERN_INVALID_ARGUMENT
);