2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * processor.c: processor and processor_set manipulation routines.
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
79 #include <mach/mach_host_server.h>
84 struct processor_set default_pset
;
85 struct processor processor_array
[NCPUS
];
89 processor_t master_processor
;
90 processor_t processor_ptr
[NCPUS
];
94 processor_set_t pset
);
97 register processor_t pr
,
100 void pset_quanta_set(
101 processor_set_t pset
);
103 kern_return_t
processor_set_base(
104 processor_set_t pset
,
109 kern_return_t
processor_set_limit(
110 processor_set_t pset
,
112 policy_limit_t limit
,
115 kern_return_t
processor_set_things(
116 processor_set_t pset
,
117 mach_port_t
**thing_list
,
118 mach_msg_type_number_t
*count
,
123 * Bootstrap the processor/pset system so the scheduler can run.
126 pset_sys_bootstrap(void)
130 pset_init(&default_pset
);
131 for (i
= 0; i
< NCPUS
; i
++) {
133 * Initialize processor data structures.
134 * Note that cpu_to_processor(i) is processor_ptr[i].
136 processor_ptr
[i
] = &processor_array
[i
];
137 processor_init(processor_ptr
[i
], i
);
139 master_processor
= cpu_to_processor(master_cpu
);
140 master_processor
->cpu_data
= get_cpu_data();
141 default_pset
.active
= TRUE
;
145 * Initialize the given processor_set structure.
149 register processor_set_t pset
)
153 /* setup run queue */
154 simple_lock_init(&pset
->runq
.lock
, ETAP_THREAD_PSET_RUNQ
);
155 for (i
= 0; i
< NRQBM
; i
++)
156 pset
->runq
.bitmap
[i
] = 0;
157 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
158 pset
->runq
.highq
= IDLEPRI
;
159 pset
->runq
.urgency
= pset
->runq
.count
= 0;
160 for (i
= 0; i
< NRQS
; i
++)
161 queue_init(&pset
->runq
.queues
[i
]);
163 queue_init(&pset
->idle_queue
);
164 pset
->idle_count
= 0;
165 queue_init(&pset
->active_queue
);
166 simple_lock_init(&pset
->sched_lock
, ETAP_THREAD_PSET_IDLE
);
168 pset
->mach_factor
= pset
->load_average
= 0;
169 pset
->sched_load
= 0;
170 queue_init(&pset
->processors
);
171 pset
->processor_count
= 0;
172 simple_lock_init(&pset
->processors_lock
, ETAP_THREAD_PSET
);
173 queue_init(&pset
->tasks
);
174 pset
->task_count
= 0;
175 queue_init(&pset
->threads
);
176 pset
->thread_count
= 0;
178 pset
->active
= FALSE
;
179 mutex_init(&pset
->lock
, ETAP_THREAD_PSET
);
180 pset
->pset_self
= IP_NULL
;
181 pset
->pset_name_self
= IP_NULL
;
182 pset
->set_quanta
= 1;
184 for (i
= 0; i
<= NCPUS
; i
++)
185 pset
->machine_quanta
[i
] = 1;
189 * Initialize the given processor structure for the processor in
190 * the slot specified by slot_num.
194 register processor_t p
,
199 /* setup run queue */
200 simple_lock_init(&p
->runq
.lock
, ETAP_THREAD_PROC_RUNQ
);
201 for (i
= 0; i
< NRQBM
; i
++)
202 p
->runq
.bitmap
[i
] = 0;
203 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
204 p
->runq
.highq
= IDLEPRI
;
205 p
->runq
.urgency
= p
->runq
.count
= 0;
206 for (i
= 0; i
< NRQS
; i
++)
207 queue_init(&p
->runq
.queues
[i
]);
209 p
->state
= PROCESSOR_OFF_LINE
;
210 p
->current_pri
= MINPRI
;
211 p
->next_thread
= THREAD_NULL
;
212 p
->idle_thread
= THREAD_NULL
;
213 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
215 p
->processor_set
= PROCESSOR_SET_NULL
;
216 p
->processor_set_next
= PROCESSOR_SET_NULL
;
217 simple_lock_init(&p
->lock
, ETAP_THREAD_PROC
);
218 p
->processor_self
= IP_NULL
;
219 p
->slot_num
= slot_num
;
225 * Remove one reference to the processor set. Destroy processor_set
226 * if this was the last reference.
230 processor_set_t pset
)
232 if (pset
== PROCESSOR_SET_NULL
)
235 assert(pset
== &default_pset
);
242 * Add one reference to the processor set.
246 processor_set_t pset
)
248 assert(pset
== &default_pset
);
251 #define pset_reference_locked(pset) assert(pset == &default_pset)
254 * pset_remove_processor() removes a processor from a processor_set.
255 * It can only be called on the current processor. Caller must
256 * hold lock on current processor and processor set.
259 pset_remove_processor(
260 processor_set_t pset
,
261 processor_t processor
)
263 if (pset
!= processor
->processor_set
)
264 panic("pset_remove_processor: wrong pset");
266 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
267 processor
->processor_set
= PROCESSOR_SET_NULL
;
268 pset
->processor_count
--;
269 pset_quanta_set(pset
);
273 * pset_add_processor() adds a processor to a processor_set.
274 * It can only be called on the current processor. Caller must
275 * hold lock on curent processor and on pset. No reference counting on
276 * processors. Processor reference to pset is implicit.
280 processor_set_t pset
,
281 processor_t processor
)
283 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
284 processor
->processor_set
= pset
;
285 pset
->processor_count
++;
286 pset_quanta_set(pset
);
290 * pset_remove_task() removes a task from a processor_set.
291 * Caller must hold locks on pset and task (unless task has
292 * no references left, in which case just the pset lock is
293 * needed). Pset reference count is not decremented;
294 * caller must explicitly pset_deallocate.
298 processor_set_t pset
,
301 if (pset
!= task
->processor_set
)
304 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
305 task
->processor_set
= PROCESSOR_SET_NULL
;
310 * pset_add_task() adds a task to a processor_set.
311 * Caller must hold locks on pset and task. Pset references to
312 * tasks are implicit.
316 processor_set_t pset
,
319 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
320 task
->processor_set
= pset
;
322 pset_reference_locked(pset
);
326 * pset_remove_thread() removes a thread from a processor_set.
327 * Caller must hold locks on pset and thread (but only if thread
328 * has outstanding references that could be used to lookup the pset).
329 * The pset reference count is not decremented; caller must explicitly
334 processor_set_t pset
,
337 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
338 thread
->processor_set
= PROCESSOR_SET_NULL
;
339 pset
->thread_count
--;
343 * pset_add_thread() adds a thread to a processor_set.
344 * Caller must hold locks on pset and thread. Pset references to
345 * threads are implicit.
349 processor_set_t pset
,
352 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
353 thread
->processor_set
= pset
;
354 pset
->thread_count
++;
355 pset_reference_locked(pset
);
359 * thread_change_psets() changes the pset of a thread. Caller must
360 * hold locks on both psets and thread. The old pset must be
361 * explicitly pset_deallocat()'ed by caller.
366 processor_set_t old_pset
,
367 processor_set_t new_pset
)
369 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
370 old_pset
->thread_count
--;
371 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
372 thread
->processor_set
= new_pset
;
373 new_pset
->thread_count
++;
374 pset_reference_locked(new_pset
);
379 processor_info_count(
380 processor_flavor_t flavor
,
381 mach_msg_type_number_t
*count
)
386 case PROCESSOR_BASIC_INFO
:
387 *count
= PROCESSOR_BASIC_INFO_COUNT
;
389 case PROCESSOR_CPU_LOAD_INFO
:
390 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
393 kr
= cpu_info_count(flavor
, count
);
401 register processor_t processor
,
402 processor_flavor_t flavor
,
404 processor_info_t info
,
405 mach_msg_type_number_t
*count
)
407 register int i
, slot_num
, state
;
408 register processor_basic_info_t basic_info
;
409 register processor_cpu_load_info_t cpu_load_info
;
412 if (processor
== PROCESSOR_NULL
)
413 return(KERN_INVALID_ARGUMENT
);
415 slot_num
= processor
->slot_num
;
419 case PROCESSOR_BASIC_INFO
:
421 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
422 return(KERN_FAILURE
);
424 basic_info
= (processor_basic_info_t
) info
;
425 basic_info
->cpu_type
= machine_slot
[slot_num
].cpu_type
;
426 basic_info
->cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
427 state
= processor
->state
;
428 if (state
== PROCESSOR_OFF_LINE
)
429 basic_info
->running
= FALSE
;
431 basic_info
->running
= TRUE
;
432 basic_info
->slot_num
= slot_num
;
433 if (processor
== master_processor
)
434 basic_info
->is_master
= TRUE
;
436 basic_info
->is_master
= FALSE
;
438 *count
= PROCESSOR_BASIC_INFO_COUNT
;
440 return(KERN_SUCCESS
);
442 case PROCESSOR_CPU_LOAD_INFO
:
444 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
445 return(KERN_FAILURE
);
447 cpu_load_info
= (processor_cpu_load_info_t
) info
;
448 for (i
=0;i
<CPU_STATE_MAX
;i
++)
449 cpu_load_info
->cpu_ticks
[i
] = machine_slot
[slot_num
].cpu_ticks
[i
];
451 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
453 return(KERN_SUCCESS
);
457 kr
=cpu_info(flavor
, slot_num
, info
, count
);
458 if (kr
== KERN_SUCCESS
)
467 processor_t processor
)
473 if (processor
== PROCESSOR_NULL
)
474 return(KERN_INVALID_ARGUMENT
);
476 if (processor
== master_processor
) {
477 thread_bind(current_thread(), processor
);
478 thread_block(THREAD_CONTINUE_NULL
);
479 kr
= cpu_start(processor
->slot_num
);
480 thread_bind(current_thread(), PROCESSOR_NULL
);
486 processor_lock(processor
);
488 state
= processor
->state
;
489 if (state
!= PROCESSOR_OFF_LINE
) {
490 processor_unlock(processor
);
492 return(KERN_FAILURE
);
494 processor
->state
= PROCESSOR_START
;
495 processor_unlock(processor
);
498 if (processor
->next_thread
== THREAD_NULL
) {
500 extern void start_cpu_thread(void);
502 thread
= kernel_thread_with_priority(
503 kernel_task
, MAXPRI_KERNEL
,
504 start_cpu_thread
, TRUE
, FALSE
);
508 thread_bind_locked(thread
, processor
);
509 thread_go_locked(thread
, THREAD_AWAKENED
);
510 (void)rem_runq(thread
);
511 processor
->next_thread
= thread
;
512 thread_unlock(thread
);
516 kr
= cpu_start(processor
->slot_num
);
518 if (kr
!= KERN_SUCCESS
) {
520 processor_lock(processor
);
521 processor
->state
= PROCESSOR_OFF_LINE
;
522 processor_unlock(processor
);
531 processor_t processor
)
533 if (processor
== PROCESSOR_NULL
)
534 return(KERN_INVALID_ARGUMENT
);
536 return(processor_shutdown(processor
));
541 processor_t processor
,
542 processor_info_t info
,
543 mach_msg_type_number_t count
)
545 if (processor
== PROCESSOR_NULL
)
546 return(KERN_INVALID_ARGUMENT
);
548 return(cpu_control(processor
->slot_num
, info
, count
));
552 * Precalculate the appropriate timesharing quanta based on load. The
553 * index into machine_quanta is the number of threads on the
554 * processor set queue. It is limited to the number of processors in
560 processor_set_t pset
)
562 register int i
, count
= pset
->processor_count
;
564 for (i
= 1; i
<= count
; i
++)
565 pset
->machine_quanta
[i
] = (count
+ (i
/ 2)) / i
;
567 pset
->machine_quanta
[0] = pset
->machine_quanta
[1];
569 pset_quanta_update(pset
);
573 processor_set_create(
575 processor_set_t
*new_set
,
576 processor_set_t
*new_name
)
579 host
++; new_set
++; new_name
++;
581 return(KERN_FAILURE
);
585 processor_set_destroy(
586 processor_set_t pset
)
591 return(KERN_FAILURE
);
595 processor_get_assignment(
596 processor_t processor
,
597 processor_set_t
*pset
)
601 state
= processor
->state
;
602 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
603 return(KERN_FAILURE
);
605 *pset
= processor
->processor_set
;
606 pset_reference(*pset
);
607 return(KERN_SUCCESS
);
612 processor_set_t pset
,
615 processor_set_info_t info
,
616 mach_msg_type_number_t
*count
)
618 if (pset
== PROCESSOR_SET_NULL
)
619 return(KERN_INVALID_ARGUMENT
);
621 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
622 register processor_set_basic_info_t basic_info
;
624 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
625 return(KERN_FAILURE
);
627 basic_info
= (processor_set_basic_info_t
) info
;
628 basic_info
->processor_count
= pset
->processor_count
;
629 basic_info
->default_policy
= POLICY_TIMESHARE
;
631 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
633 return(KERN_SUCCESS
);
635 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
636 register policy_timeshare_base_t ts_base
;
638 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
639 return(KERN_FAILURE
);
641 ts_base
= (policy_timeshare_base_t
) info
;
642 ts_base
->base_priority
= BASEPRI_DEFAULT
;
644 *count
= POLICY_TIMESHARE_BASE_COUNT
;
646 return(KERN_SUCCESS
);
648 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
649 register policy_fifo_base_t fifo_base
;
651 if (*count
< POLICY_FIFO_BASE_COUNT
)
652 return(KERN_FAILURE
);
654 fifo_base
= (policy_fifo_base_t
) info
;
655 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
657 *count
= POLICY_FIFO_BASE_COUNT
;
659 return(KERN_SUCCESS
);
661 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
662 register policy_rr_base_t rr_base
;
664 if (*count
< POLICY_RR_BASE_COUNT
)
665 return(KERN_FAILURE
);
667 rr_base
= (policy_rr_base_t
) info
;
668 rr_base
->base_priority
= BASEPRI_DEFAULT
;
669 rr_base
->quantum
= 1;
671 *count
= POLICY_RR_BASE_COUNT
;
673 return(KERN_SUCCESS
);
675 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
676 register policy_timeshare_limit_t ts_limit
;
678 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
679 return(KERN_FAILURE
);
681 ts_limit
= (policy_timeshare_limit_t
) info
;
682 ts_limit
->max_priority
= MAXPRI_STANDARD
;
684 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
686 return(KERN_SUCCESS
);
688 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
689 register policy_fifo_limit_t fifo_limit
;
691 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
692 return(KERN_FAILURE
);
694 fifo_limit
= (policy_fifo_limit_t
) info
;
695 fifo_limit
->max_priority
= MAXPRI_STANDARD
;
697 *count
= POLICY_FIFO_LIMIT_COUNT
;
699 return(KERN_SUCCESS
);
701 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
702 register policy_rr_limit_t rr_limit
;
704 if (*count
< POLICY_RR_LIMIT_COUNT
)
705 return(KERN_FAILURE
);
707 rr_limit
= (policy_rr_limit_t
) info
;
708 rr_limit
->max_priority
= MAXPRI_STANDARD
;
710 *count
= POLICY_RR_LIMIT_COUNT
;
712 return(KERN_SUCCESS
);
714 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
715 register int *enabled
;
717 if (*count
< (sizeof(*enabled
)/sizeof(int)))
718 return(KERN_FAILURE
);
720 enabled
= (int *) info
;
721 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
723 *count
= sizeof(*enabled
)/sizeof(int);
725 return(KERN_SUCCESS
);
730 return(KERN_INVALID_ARGUMENT
);
734 * processor_set_statistics
736 * Returns scheduling statistics for a processor set.
739 processor_set_statistics(
740 processor_set_t pset
,
742 processor_set_info_t info
,
743 mach_msg_type_number_t
*count
)
745 if (pset
== PROCESSOR_SET_NULL
)
746 return (KERN_INVALID_PROCESSOR_SET
);
748 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
749 register processor_set_load_info_t load_info
;
751 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
752 return(KERN_FAILURE
);
754 load_info
= (processor_set_load_info_t
) info
;
757 load_info
->task_count
= pset
->task_count
;
758 load_info
->thread_count
= pset
->thread_count
;
759 load_info
->mach_factor
= pset
->mach_factor
;
760 load_info
->load_average
= pset
->load_average
;
763 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
764 return(KERN_SUCCESS
);
767 return(KERN_INVALID_ARGUMENT
);
771 * processor_set_max_priority:
773 * Specify max priority permitted on processor set. This affects
774 * newly created and assigned threads. Optionally change existing
778 processor_set_max_priority(
779 processor_set_t pset
,
781 boolean_t change_threads
)
783 return (KERN_INVALID_ARGUMENT
);
787 * processor_set_policy_enable:
789 * Allow indicated policy on processor set.
793 processor_set_policy_enable(
794 processor_set_t pset
,
797 return (KERN_INVALID_ARGUMENT
);
801 * processor_set_policy_disable:
803 * Forbid indicated policy on processor set. Time sharing cannot
807 processor_set_policy_disable(
808 processor_set_t pset
,
810 boolean_t change_threads
)
812 return (KERN_INVALID_ARGUMENT
);
816 #define THING_THREAD 1
819 * processor_set_things:
821 * Common internals for processor_set_{threads,tasks}
824 processor_set_things(
825 processor_set_t pset
,
826 mach_port_t
**thing_list
,
827 mach_msg_type_number_t
*count
,
830 unsigned int actual
; /* this many things */
833 vm_size_t size
, size_needed
;
836 if (pset
== PROCESSOR_SET_NULL
)
837 return KERN_INVALID_ARGUMENT
;
848 if (type
== THING_TASK
)
849 actual
= pset
->task_count
;
851 actual
= pset
->thread_count
;
853 /* do we have the memory we need? */
855 size_needed
= actual
* sizeof(mach_port_t
);
856 if (size_needed
<= size
)
859 /* unlock the pset and allocate more memory */
865 assert(size_needed
> 0);
870 return KERN_RESOURCE_SHORTAGE
;
873 /* OK, have memory and the processor_set is locked & active */
877 task_t
*tasks
= (task_t
*) addr
;
880 for (i
= 0, task
= (task_t
) queue_first(&pset
->tasks
);
881 !queue_end(&pset
->tasks
, (queue_entry_t
) task
);
882 task
= (task_t
) queue_next(&task
->pset_tasks
)) {
885 if (task
->ref_count
> 0) {
886 /* take ref for convert_task_to_port */
887 task_reference_locked(task
);
896 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
898 thread_act_t thr_act
;
900 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
901 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
902 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
904 thr_act
= thread_lock_act(thread
);
905 if (thr_act
&& thr_act
->ref_count
> 0) {
906 /* take ref for convert_act_to_port */
907 act_locked_act_reference(thr_act
);
908 thr_acts
[i
++] = thr_act
;
910 thread_unlock_act(thread
);
916 /* can unlock processor set now that we have the task/thread refs */
921 size_needed
= actual
* sizeof(mach_port_t
);
926 /* no things, so return null pointer and deallocate memory */
933 /* if we allocated too much, must copy */
935 if (size_needed
< size
) {
938 newaddr
= kalloc(size_needed
);
942 task_t
*tasks
= (task_t
*) addr
;
944 for (i
= 0; i
< actual
; i
++)
945 task_deallocate(tasks
[i
]);
950 thread_act_t
*acts
= (thread_act_t
*) addr
;
952 for (i
= 0; i
< actual
; i
++)
953 act_deallocate(acts
[i
]);
958 return KERN_RESOURCE_SHORTAGE
;
961 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
966 *thing_list
= (mach_port_t
*) addr
;
969 /* do the conversion that Mig should handle */
973 task_t
*tasks
= (task_t
*) addr
;
975 for (i
= 0; i
< actual
; i
++)
976 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
981 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
983 for (i
= 0; i
< actual
; i
++)
984 (*thing_list
)[i
] = convert_act_to_port(thr_acts
[i
]);
990 return(KERN_SUCCESS
);
995 * processor_set_tasks:
997 * List all tasks in the processor set.
1000 processor_set_tasks(
1001 processor_set_t pset
,
1002 task_array_t
*task_list
,
1003 mach_msg_type_number_t
*count
)
1005 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1009 * processor_set_threads:
1011 * List all threads in the processor set.
1014 processor_set_threads(
1015 processor_set_t pset
,
1016 thread_array_t
*thread_list
,
1017 mach_msg_type_number_t
*count
)
1019 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1023 * processor_set_base:
1025 * Specify per-policy base priority for a processor set. Set processor
1026 * set default policy to the given policy. This affects newly created
1027 * and assigned threads. Optionally change existing ones.
1031 processor_set_t pset
,
1036 return (KERN_INVALID_ARGUMENT
);
1040 * processor_set_limit:
1042 * Specify per-policy limits for a processor set. This affects
1043 * newly created and assigned threads. Optionally change existing
1047 processor_set_limit(
1048 processor_set_t pset
,
1050 policy_limit_t limit
,
1053 return (KERN_POLICY_LIMIT
);
1057 * processor_set_policy_control
1059 * Controls the scheduling attributes governing the processor set.
1060 * Allows control of enabled policies, and per-policy base and limit
1064 processor_set_policy_control(
1065 processor_set_t pset
,
1067 processor_set_info_t policy_info
,
1068 mach_msg_type_number_t count
,
1071 return (KERN_INVALID_ARGUMENT
);