2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * processor.c: processor and processor_set manipulation routines.
62 #include <mach/boolean.h>
63 #include <mach/policy.h>
64 #include <mach/processor_info.h>
65 #include <mach/vm_param.h>
66 #include <kern/cpu_number.h>
67 #include <kern/host.h>
68 #include <kern/machine.h>
69 #include <kern/misc_protos.h>
70 #include <kern/processor.h>
71 #include <kern/sched.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/ipc_host.h>
75 #include <kern/ipc_tt.h>
76 #include <ipc/ipc_port.h>
77 #include <kern/kalloc.h>
82 #include <mach/mach_host_server.h>
87 struct processor_set default_pset
;
88 struct processor processor_array
[NCPUS
];
92 processor_t master_processor
;
93 processor_t processor_ptr
[NCPUS
];
97 processor_set_t pset
);
100 register processor_t pr
,
103 void pset_quanta_set(
104 processor_set_t pset
);
106 kern_return_t
processor_set_base(
107 processor_set_t pset
,
112 kern_return_t
processor_set_limit(
113 processor_set_t pset
,
115 policy_limit_t limit
,
118 kern_return_t
processor_set_things(
119 processor_set_t pset
,
120 mach_port_t
**thing_list
,
121 mach_msg_type_number_t
*count
,
126 * Bootstrap the processor/pset system so the scheduler can run.
129 pset_sys_bootstrap(void)
133 pset_init(&default_pset
);
134 for (i
= 0; i
< NCPUS
; i
++) {
136 * Initialize processor data structures.
137 * Note that cpu_to_processor(i) is processor_ptr[i].
139 processor_ptr
[i
] = &processor_array
[i
];
140 processor_init(processor_ptr
[i
], i
);
142 master_processor
= cpu_to_processor(master_cpu
);
143 master_processor
->cpu_data
= get_cpu_data();
144 default_pset
.active
= TRUE
;
148 * Initialize the given processor_set structure.
152 register processor_set_t pset
)
156 /* setup run queue */
157 simple_lock_init(&pset
->runq
.lock
, ETAP_THREAD_PSET_RUNQ
);
158 for (i
= 0; i
< NRQBM
; i
++)
159 pset
->runq
.bitmap
[i
] = 0;
160 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
161 pset
->runq
.highq
= IDLEPRI
;
162 pset
->runq
.urgency
= pset
->runq
.count
= 0;
163 for (i
= 0; i
< NRQS
; i
++)
164 queue_init(&pset
->runq
.queues
[i
]);
166 queue_init(&pset
->idle_queue
);
167 pset
->idle_count
= 0;
168 queue_init(&pset
->active_queue
);
169 simple_lock_init(&pset
->sched_lock
, ETAP_THREAD_PSET_IDLE
);
171 pset
->mach_factor
= pset
->load_average
= 0;
172 pset
->sched_load
= 0;
173 queue_init(&pset
->processors
);
174 pset
->processor_count
= 0;
175 simple_lock_init(&pset
->processors_lock
, ETAP_THREAD_PSET
);
176 queue_init(&pset
->tasks
);
177 pset
->task_count
= 0;
178 queue_init(&pset
->threads
);
179 pset
->thread_count
= 0;
181 pset
->active
= FALSE
;
182 mutex_init(&pset
->lock
, ETAP_THREAD_PSET
);
183 pset
->pset_self
= IP_NULL
;
184 pset
->pset_name_self
= IP_NULL
;
185 pset
->set_quanta
= 1;
187 for (i
= 0; i
<= NCPUS
; i
++)
188 pset
->machine_quanta
[i
] = 1;
192 * Initialize the given processor structure for the processor in
193 * the slot specified by slot_num.
197 register processor_t p
,
202 /* setup run queue */
203 simple_lock_init(&p
->runq
.lock
, ETAP_THREAD_PROC_RUNQ
);
204 for (i
= 0; i
< NRQBM
; i
++)
205 p
->runq
.bitmap
[i
] = 0;
206 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
207 p
->runq
.highq
= IDLEPRI
;
208 p
->runq
.urgency
= p
->runq
.count
= 0;
209 for (i
= 0; i
< NRQS
; i
++)
210 queue_init(&p
->runq
.queues
[i
]);
212 p
->state
= PROCESSOR_OFF_LINE
;
213 p
->current_pri
= MINPRI
;
214 p
->next_thread
= THREAD_NULL
;
215 p
->idle_thread
= THREAD_NULL
;
216 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
218 p
->processor_set
= PROCESSOR_SET_NULL
;
219 p
->processor_set_next
= PROCESSOR_SET_NULL
;
220 simple_lock_init(&p
->lock
, ETAP_THREAD_PROC
);
221 p
->processor_self
= IP_NULL
;
222 p
->slot_num
= slot_num
;
228 * Remove one reference to the processor set. Destroy processor_set
229 * if this was the last reference.
233 processor_set_t pset
)
235 if (pset
== PROCESSOR_SET_NULL
)
238 assert(pset
== &default_pset
);
245 * Add one reference to the processor set.
249 processor_set_t pset
)
251 assert(pset
== &default_pset
);
254 #define pset_reference_locked(pset) assert(pset == &default_pset)
257 * pset_remove_processor() removes a processor from a processor_set.
258 * It can only be called on the current processor. Caller must
259 * hold lock on current processor and processor set.
262 pset_remove_processor(
263 processor_set_t pset
,
264 processor_t processor
)
266 if (pset
!= processor
->processor_set
)
267 panic("pset_remove_processor: wrong pset");
269 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
270 processor
->processor_set
= PROCESSOR_SET_NULL
;
271 pset
->processor_count
--;
272 pset_quanta_set(pset
);
276 * pset_add_processor() adds a processor to a processor_set.
277 * It can only be called on the current processor. Caller must
278 * hold lock on curent processor and on pset. No reference counting on
279 * processors. Processor reference to pset is implicit.
283 processor_set_t pset
,
284 processor_t processor
)
286 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
287 processor
->processor_set
= pset
;
288 pset
->processor_count
++;
289 pset_quanta_set(pset
);
293 * pset_remove_task() removes a task from a processor_set.
294 * Caller must hold locks on pset and task (unless task has
295 * no references left, in which case just the pset lock is
296 * needed). Pset reference count is not decremented;
297 * caller must explicitly pset_deallocate.
301 processor_set_t pset
,
304 if (pset
!= task
->processor_set
)
307 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
308 task
->processor_set
= PROCESSOR_SET_NULL
;
313 * pset_add_task() adds a task to a processor_set.
314 * Caller must hold locks on pset and task. Pset references to
315 * tasks are implicit.
319 processor_set_t pset
,
322 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
323 task
->processor_set
= pset
;
325 pset_reference_locked(pset
);
329 * pset_remove_thread() removes a thread from a processor_set.
330 * Caller must hold locks on pset and thread (but only if thread
331 * has outstanding references that could be used to lookup the pset).
332 * The pset reference count is not decremented; caller must explicitly
337 processor_set_t pset
,
340 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
341 thread
->processor_set
= PROCESSOR_SET_NULL
;
342 pset
->thread_count
--;
346 * pset_add_thread() adds a thread to a processor_set.
347 * Caller must hold locks on pset and thread. Pset references to
348 * threads are implicit.
352 processor_set_t pset
,
355 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
356 thread
->processor_set
= pset
;
357 pset
->thread_count
++;
358 pset_reference_locked(pset
);
362 * thread_change_psets() changes the pset of a thread. Caller must
363 * hold locks on both psets and thread. The old pset must be
364 * explicitly pset_deallocat()'ed by caller.
369 processor_set_t old_pset
,
370 processor_set_t new_pset
)
372 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
373 old_pset
->thread_count
--;
374 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
375 thread
->processor_set
= new_pset
;
376 new_pset
->thread_count
++;
377 pset_reference_locked(new_pset
);
382 processor_info_count(
383 processor_flavor_t flavor
,
384 mach_msg_type_number_t
*count
)
389 case PROCESSOR_BASIC_INFO
:
390 *count
= PROCESSOR_BASIC_INFO_COUNT
;
392 case PROCESSOR_CPU_LOAD_INFO
:
393 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
396 kr
= cpu_info_count(flavor
, count
);
404 register processor_t processor
,
405 processor_flavor_t flavor
,
407 processor_info_t info
,
408 mach_msg_type_number_t
*count
)
410 register int i
, slot_num
, state
;
411 register processor_basic_info_t basic_info
;
412 register processor_cpu_load_info_t cpu_load_info
;
415 if (processor
== PROCESSOR_NULL
)
416 return(KERN_INVALID_ARGUMENT
);
418 slot_num
= processor
->slot_num
;
422 case PROCESSOR_BASIC_INFO
:
424 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
425 return(KERN_FAILURE
);
427 basic_info
= (processor_basic_info_t
) info
;
428 basic_info
->cpu_type
= machine_slot
[slot_num
].cpu_type
;
429 basic_info
->cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
430 state
= processor
->state
;
431 if (state
== PROCESSOR_OFF_LINE
)
432 basic_info
->running
= FALSE
;
434 basic_info
->running
= TRUE
;
435 basic_info
->slot_num
= slot_num
;
436 if (processor
== master_processor
)
437 basic_info
->is_master
= TRUE
;
439 basic_info
->is_master
= FALSE
;
441 *count
= PROCESSOR_BASIC_INFO_COUNT
;
443 return(KERN_SUCCESS
);
445 case PROCESSOR_CPU_LOAD_INFO
:
447 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
448 return(KERN_FAILURE
);
450 cpu_load_info
= (processor_cpu_load_info_t
) info
;
451 for (i
=0;i
<CPU_STATE_MAX
;i
++)
452 cpu_load_info
->cpu_ticks
[i
] = machine_slot
[slot_num
].cpu_ticks
[i
];
454 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
456 return(KERN_SUCCESS
);
460 kr
=cpu_info(flavor
, slot_num
, info
, count
);
461 if (kr
== KERN_SUCCESS
)
470 processor_t processor
)
476 if (processor
== PROCESSOR_NULL
)
477 return(KERN_INVALID_ARGUMENT
);
479 if (processor
== master_processor
) {
480 thread_bind(current_thread(), processor
);
481 thread_block(THREAD_CONTINUE_NULL
);
482 kr
= cpu_start(processor
->slot_num
);
483 thread_bind(current_thread(), PROCESSOR_NULL
);
489 processor_lock(processor
);
491 state
= processor
->state
;
492 if (state
!= PROCESSOR_OFF_LINE
) {
493 processor_unlock(processor
);
495 return(KERN_FAILURE
);
497 processor
->state
= PROCESSOR_START
;
498 processor_unlock(processor
);
501 if (processor
->next_thread
== THREAD_NULL
) {
503 extern void start_cpu_thread(void);
505 thread
= kernel_thread_with_priority(
506 kernel_task
, MAXPRI_KERNEL
,
507 start_cpu_thread
, TRUE
, FALSE
);
511 thread_bind_locked(thread
, processor
);
512 thread_go_locked(thread
, THREAD_AWAKENED
);
513 (void)rem_runq(thread
);
514 processor
->next_thread
= thread
;
515 thread_unlock(thread
);
519 kr
= cpu_start(processor
->slot_num
);
521 if (kr
!= KERN_SUCCESS
) {
523 processor_lock(processor
);
524 processor
->state
= PROCESSOR_OFF_LINE
;
525 processor_unlock(processor
);
534 processor_t processor
)
536 if (processor
== PROCESSOR_NULL
)
537 return(KERN_INVALID_ARGUMENT
);
539 return(processor_shutdown(processor
));
544 processor_t processor
,
545 processor_info_t info
,
546 mach_msg_type_number_t count
)
548 if (processor
== PROCESSOR_NULL
)
549 return(KERN_INVALID_ARGUMENT
);
551 return(cpu_control(processor
->slot_num
, info
, count
));
555 * Precalculate the appropriate timesharing quanta based on load. The
556 * index into machine_quanta is the number of threads on the
557 * processor set queue. It is limited to the number of processors in
563 processor_set_t pset
)
565 register int i
, count
= pset
->processor_count
;
567 for (i
= 1; i
<= count
; i
++)
568 pset
->machine_quanta
[i
] = (count
+ (i
/ 2)) / i
;
570 pset
->machine_quanta
[0] = pset
->machine_quanta
[1];
572 pset_quanta_update(pset
);
576 processor_set_create(
578 processor_set_t
*new_set
,
579 processor_set_t
*new_name
)
582 host
++; new_set
++; new_name
++;
584 return(KERN_FAILURE
);
588 processor_set_destroy(
589 processor_set_t pset
)
594 return(KERN_FAILURE
);
598 processor_get_assignment(
599 processor_t processor
,
600 processor_set_t
*pset
)
604 state
= processor
->state
;
605 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
606 return(KERN_FAILURE
);
608 *pset
= processor
->processor_set
;
609 pset_reference(*pset
);
610 return(KERN_SUCCESS
);
615 processor_set_t pset
,
618 processor_set_info_t info
,
619 mach_msg_type_number_t
*count
)
621 if (pset
== PROCESSOR_SET_NULL
)
622 return(KERN_INVALID_ARGUMENT
);
624 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
625 register processor_set_basic_info_t basic_info
;
627 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
628 return(KERN_FAILURE
);
630 basic_info
= (processor_set_basic_info_t
) info
;
631 basic_info
->processor_count
= pset
->processor_count
;
632 basic_info
->default_policy
= POLICY_TIMESHARE
;
634 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
636 return(KERN_SUCCESS
);
638 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
639 register policy_timeshare_base_t ts_base
;
641 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
642 return(KERN_FAILURE
);
644 ts_base
= (policy_timeshare_base_t
) info
;
645 ts_base
->base_priority
= BASEPRI_DEFAULT
;
647 *count
= POLICY_TIMESHARE_BASE_COUNT
;
649 return(KERN_SUCCESS
);
651 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
652 register policy_fifo_base_t fifo_base
;
654 if (*count
< POLICY_FIFO_BASE_COUNT
)
655 return(KERN_FAILURE
);
657 fifo_base
= (policy_fifo_base_t
) info
;
658 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
660 *count
= POLICY_FIFO_BASE_COUNT
;
662 return(KERN_SUCCESS
);
664 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
665 register policy_rr_base_t rr_base
;
667 if (*count
< POLICY_RR_BASE_COUNT
)
668 return(KERN_FAILURE
);
670 rr_base
= (policy_rr_base_t
) info
;
671 rr_base
->base_priority
= BASEPRI_DEFAULT
;
672 rr_base
->quantum
= 1;
674 *count
= POLICY_RR_BASE_COUNT
;
676 return(KERN_SUCCESS
);
678 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
679 register policy_timeshare_limit_t ts_limit
;
681 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
682 return(KERN_FAILURE
);
684 ts_limit
= (policy_timeshare_limit_t
) info
;
685 ts_limit
->max_priority
= MAXPRI_STANDARD
;
687 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
689 return(KERN_SUCCESS
);
691 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
692 register policy_fifo_limit_t fifo_limit
;
694 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
695 return(KERN_FAILURE
);
697 fifo_limit
= (policy_fifo_limit_t
) info
;
698 fifo_limit
->max_priority
= MAXPRI_STANDARD
;
700 *count
= POLICY_FIFO_LIMIT_COUNT
;
702 return(KERN_SUCCESS
);
704 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
705 register policy_rr_limit_t rr_limit
;
707 if (*count
< POLICY_RR_LIMIT_COUNT
)
708 return(KERN_FAILURE
);
710 rr_limit
= (policy_rr_limit_t
) info
;
711 rr_limit
->max_priority
= MAXPRI_STANDARD
;
713 *count
= POLICY_RR_LIMIT_COUNT
;
715 return(KERN_SUCCESS
);
717 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
718 register int *enabled
;
720 if (*count
< (sizeof(*enabled
)/sizeof(int)))
721 return(KERN_FAILURE
);
723 enabled
= (int *) info
;
724 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
726 *count
= sizeof(*enabled
)/sizeof(int);
728 return(KERN_SUCCESS
);
733 return(KERN_INVALID_ARGUMENT
);
737 * processor_set_statistics
739 * Returns scheduling statistics for a processor set.
742 processor_set_statistics(
743 processor_set_t pset
,
745 processor_set_info_t info
,
746 mach_msg_type_number_t
*count
)
748 if (pset
== PROCESSOR_SET_NULL
)
749 return (KERN_INVALID_PROCESSOR_SET
);
751 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
752 register processor_set_load_info_t load_info
;
754 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
755 return(KERN_FAILURE
);
757 load_info
= (processor_set_load_info_t
) info
;
760 load_info
->task_count
= pset
->task_count
;
761 load_info
->thread_count
= pset
->thread_count
;
762 load_info
->mach_factor
= pset
->mach_factor
;
763 load_info
->load_average
= pset
->load_average
;
766 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
767 return(KERN_SUCCESS
);
770 return(KERN_INVALID_ARGUMENT
);
774 * processor_set_max_priority:
776 * Specify max priority permitted on processor set. This affects
777 * newly created and assigned threads. Optionally change existing
781 processor_set_max_priority(
782 processor_set_t pset
,
784 boolean_t change_threads
)
786 return (KERN_INVALID_ARGUMENT
);
790 * processor_set_policy_enable:
792 * Allow indicated policy on processor set.
796 processor_set_policy_enable(
797 processor_set_t pset
,
800 return (KERN_INVALID_ARGUMENT
);
804 * processor_set_policy_disable:
806 * Forbid indicated policy on processor set. Time sharing cannot
810 processor_set_policy_disable(
811 processor_set_t pset
,
813 boolean_t change_threads
)
815 return (KERN_INVALID_ARGUMENT
);
819 #define THING_THREAD 1
822 * processor_set_things:
824 * Common internals for processor_set_{threads,tasks}
827 processor_set_things(
828 processor_set_t pset
,
829 mach_port_t
**thing_list
,
830 mach_msg_type_number_t
*count
,
833 unsigned int actual
; /* this many things */
836 vm_size_t size
, size_needed
;
839 if (pset
== PROCESSOR_SET_NULL
)
840 return KERN_INVALID_ARGUMENT
;
851 if (type
== THING_TASK
)
852 actual
= pset
->task_count
;
854 actual
= pset
->thread_count
;
856 /* do we have the memory we need? */
858 size_needed
= actual
* sizeof(mach_port_t
);
859 if (size_needed
<= size
)
862 /* unlock the pset and allocate more memory */
868 assert(size_needed
> 0);
873 return KERN_RESOURCE_SHORTAGE
;
876 /* OK, have memory and the processor_set is locked & active */
880 task_t
*tasks
= (task_t
*) addr
;
883 for (i
= 0, task
= (task_t
) queue_first(&pset
->tasks
);
884 !queue_end(&pset
->tasks
, (queue_entry_t
) task
);
885 task
= (task_t
) queue_next(&task
->pset_tasks
)) {
888 if (task
->ref_count
> 0) {
889 /* take ref for convert_task_to_port */
890 task_reference_locked(task
);
899 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
901 thread_act_t thr_act
;
903 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
904 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
905 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
907 thr_act
= thread_lock_act(thread
);
908 if (thr_act
&& thr_act
->ref_count
> 0) {
909 /* take ref for convert_act_to_port */
910 act_locked_act_reference(thr_act
);
911 thr_acts
[i
++] = thr_act
;
913 thread_unlock_act(thread
);
919 /* can unlock processor set now that we have the task/thread refs */
924 size_needed
= actual
* sizeof(mach_port_t
);
929 /* no things, so return null pointer and deallocate memory */
936 /* if we allocated too much, must copy */
938 if (size_needed
< size
) {
941 newaddr
= kalloc(size_needed
);
945 task_t
*tasks
= (task_t
*) addr
;
947 for (i
= 0; i
< actual
; i
++)
948 task_deallocate(tasks
[i
]);
953 thread_act_t
*acts
= (thread_act_t
*) addr
;
955 for (i
= 0; i
< actual
; i
++)
956 act_deallocate(acts
[i
]);
961 return KERN_RESOURCE_SHORTAGE
;
964 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
969 *thing_list
= (mach_port_t
*) addr
;
972 /* do the conversion that Mig should handle */
976 task_t
*tasks
= (task_t
*) addr
;
978 for (i
= 0; i
< actual
; i
++)
979 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
984 thread_act_t
*thr_acts
= (thread_act_t
*) addr
;
986 for (i
= 0; i
< actual
; i
++)
987 (*thing_list
)[i
] = convert_act_to_port(thr_acts
[i
]);
993 return(KERN_SUCCESS
);
998 * processor_set_tasks:
1000 * List all tasks in the processor set.
1003 processor_set_tasks(
1004 processor_set_t pset
,
1005 task_array_t
*task_list
,
1006 mach_msg_type_number_t
*count
)
1008 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1012 * processor_set_threads:
1014 * List all threads in the processor set.
1017 processor_set_threads(
1018 processor_set_t pset
,
1019 thread_array_t
*thread_list
,
1020 mach_msg_type_number_t
*count
)
1022 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1026 * processor_set_base:
1028 * Specify per-policy base priority for a processor set. Set processor
1029 * set default policy to the given policy. This affects newly created
1030 * and assigned threads. Optionally change existing ones.
1034 processor_set_t pset
,
1039 return (KERN_INVALID_ARGUMENT
);
1043 * processor_set_limit:
1045 * Specify per-policy limits for a processor set. This affects
1046 * newly created and assigned threads. Optionally change existing
1050 processor_set_limit(
1051 processor_set_t pset
,
1053 policy_limit_t limit
,
1056 return (KERN_POLICY_LIMIT
);
1060 * processor_set_policy_control
1062 * Controls the scheduling attributes governing the processor set.
1063 * Allows control of enabled policies, and per-policy base and limit
1067 processor_set_policy_control(
1068 processor_set_t pset
,
1070 processor_set_info_t policy_info
,
1071 mach_msg_type_number_t count
,
1074 return (KERN_INVALID_ARGUMENT
);