2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
55 * processor.c: processor and processor_set manipulation routines.
58 #include <mach/boolean.h>
59 #include <mach/policy.h>
60 #include <mach/processor.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
79 #include <mach/mach_host_server.h>
80 #include <mach/processor_set_server.h>
85 struct processor_set default_pset
;
87 processor_t processor_list
;
88 unsigned int processor_count
;
89 static processor_t processor_list_tail
;
90 decl_simple_lock_data(,processor_list_lock
)
92 processor_t master_processor
;
96 kern_return_t
processor_set_base(
102 kern_return_t
processor_set_limit(
103 processor_set_t pset
,
105 policy_limit_t limit
,
108 kern_return_t
processor_set_things(
109 processor_set_t pset
,
110 mach_port_t
**thing_list
,
111 mach_msg_type_number_t
*count
,
115 processor_bootstrap(void)
117 simple_lock_init(&processor_list_lock
, 0);
119 master_processor
= cpu_to_processor(master_cpu
);
121 processor_init(master_processor
, master_cpu
);
125 * Initialize the given processor_set structure.
130 register processor_set_t pset
)
134 /* setup run queue */
135 pset
->runq
.highq
= IDLEPRI
;
136 for (i
= 0; i
< NRQBM
; i
++)
137 pset
->runq
.bitmap
[i
] = 0;
138 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
139 pset
->runq
.urgency
= pset
->runq
.count
= 0;
140 for (i
= 0; i
< NRQS
; i
++)
141 queue_init(&pset
->runq
.queues
[i
]);
143 queue_init(&pset
->idle_queue
);
144 pset
->idle_count
= 0;
145 queue_init(&pset
->active_queue
);
146 simple_lock_init(&pset
->sched_lock
, 0);
147 pset
->run_count
= pset
->share_count
= 0;
148 pset
->mach_factor
= pset
->load_average
= 0;
149 pset
->pri_shift
= INT8_MAX
;
150 queue_init(&pset
->processors
);
151 pset
->processor_count
= 0;
152 queue_init(&pset
->tasks
);
153 pset
->task_count
= 0;
154 queue_init(&pset
->threads
);
155 pset
->thread_count
= 0;
158 mutex_init(&pset
->lock
, 0);
159 pset
->pset_self
= IP_NULL
;
160 pset
->pset_name_self
= IP_NULL
;
161 pset
->timeshare_quanta
= 1;
165 * Initialize the given processor structure for the processor in
166 * the slot specified by slot_num.
170 register processor_t p
,
175 /* setup run queue */
176 p
->runq
.highq
= IDLEPRI
;
177 for (i
= 0; i
< NRQBM
; i
++)
178 p
->runq
.bitmap
[i
] = 0;
179 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
180 p
->runq
.urgency
= p
->runq
.count
= 0;
181 for (i
= 0; i
< NRQS
; i
++)
182 queue_init(&p
->runq
.queues
[i
]);
184 p
->state
= PROCESSOR_OFF_LINE
;
185 p
->active_thread
= p
->next_thread
= p
->idle_thread
= THREAD_NULL
;
186 p
->processor_set
= PROCESSOR_SET_NULL
;
187 p
->current_pri
= MINPRI
;
188 p
->deadline
= UINT64_MAX
;
189 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
191 simple_lock_init(&p
->lock
, 0);
192 p
->processor_self
= IP_NULL
;
193 processor_data_init(p
);
194 PROCESSOR_DATA(p
, slot_num
) = slot_num
;
196 simple_lock(&processor_list_lock
);
197 if (processor_list
== NULL
)
200 processor_list_tail
->processor_list
= p
;
201 processor_list_tail
= p
;
203 p
->processor_list
= NULL
;
204 simple_unlock(&processor_list_lock
);
210 * Remove one reference to the processor set. Destroy processor_set
211 * if this was the last reference.
215 processor_set_t pset
)
217 if (pset
== PROCESSOR_SET_NULL
)
220 assert(pset
== &default_pset
);
227 * Add one reference to the processor set.
231 processor_set_t pset
)
233 if (pset
== PROCESSOR_SET_NULL
)
236 assert(pset
== &default_pset
);
239 #define pset_reference_locked(pset) assert(pset == &default_pset)
242 * pset_remove_processor() removes a processor from a processor_set.
243 * It can only be called on the current processor. Caller must
244 * hold lock on current processor and processor set.
247 pset_remove_processor(
248 processor_set_t pset
,
249 processor_t processor
)
251 if (pset
!= processor
->processor_set
)
252 panic("pset_remove_processor: wrong pset");
254 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
255 processor
->processor_set
= PROCESSOR_SET_NULL
;
256 pset
->processor_count
--;
257 timeshare_quanta_update(pset
);
261 * pset_add_processor() adds a processor to a processor_set.
262 * It can only be called on the current processor. Caller must
263 * hold lock on curent processor and on pset. No reference counting on
264 * processors. Processor reference to pset is implicit.
268 processor_set_t pset
,
269 processor_t processor
)
271 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
272 processor
->processor_set
= pset
;
273 pset
->processor_count
++;
274 timeshare_quanta_update(pset
);
278 * pset_remove_task() removes a task from a processor_set.
279 * Caller must hold locks on pset and task (unless task has
280 * no references left, in which case just the pset lock is
281 * needed). Pset reference count is not decremented;
282 * caller must explicitly pset_deallocate.
286 processor_set_t pset
,
289 if (pset
!= task
->processor_set
)
292 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
297 * pset_add_task() adds a task to a processor_set.
298 * Caller must hold locks on pset and task. Pset references to
299 * tasks are implicit.
303 processor_set_t pset
,
306 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
307 task
->processor_set
= pset
;
309 pset_reference_locked(pset
);
313 * pset_remove_thread() removes a thread from a processor_set.
314 * Caller must hold locks on pset and thread (but only if thread
315 * has outstanding references that could be used to lookup the pset).
316 * The pset reference count is not decremented; caller must explicitly
321 processor_set_t pset
,
324 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
325 pset
->thread_count
--;
329 * pset_add_thread() adds a thread to a processor_set.
330 * Caller must hold locks on pset and thread. Pset references to
331 * threads are implicit.
335 processor_set_t pset
,
338 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
339 thread
->processor_set
= pset
;
340 pset
->thread_count
++;
341 pset_reference_locked(pset
);
345 * thread_change_psets() changes the pset of a thread. Caller must
346 * hold locks on both psets and thread. The old pset must be
347 * explicitly pset_deallocat()'ed by caller.
352 processor_set_t old_pset
,
353 processor_set_t new_pset
)
355 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
356 old_pset
->thread_count
--;
357 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
358 thread
->processor_set
= new_pset
;
359 new_pset
->thread_count
++;
360 pset_reference_locked(new_pset
);
365 processor_info_count(
366 processor_flavor_t flavor
,
367 mach_msg_type_number_t
*count
)
371 case PROCESSOR_BASIC_INFO
:
372 *count
= PROCESSOR_BASIC_INFO_COUNT
;
375 case PROCESSOR_CPU_LOAD_INFO
:
376 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
380 return (cpu_info_count(flavor
, count
));
383 return (KERN_SUCCESS
);
389 register processor_t processor
,
390 processor_flavor_t flavor
,
392 processor_info_t info
,
393 mach_msg_type_number_t
*count
)
395 register int i
, slot_num
, state
;
396 kern_return_t result
;
398 if (processor
== PROCESSOR_NULL
)
399 return (KERN_INVALID_ARGUMENT
);
401 slot_num
= PROCESSOR_DATA(processor
, slot_num
);
405 case PROCESSOR_BASIC_INFO
:
407 register processor_basic_info_t basic_info
;
409 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
410 return (KERN_FAILURE
);
412 basic_info
= (processor_basic_info_t
) info
;
413 basic_info
->cpu_type
= slot_type(slot_num
);
414 basic_info
->cpu_subtype
= slot_subtype(slot_num
);
415 state
= processor
->state
;
416 if (state
== PROCESSOR_OFF_LINE
)
417 basic_info
->running
= FALSE
;
419 basic_info
->running
= TRUE
;
420 basic_info
->slot_num
= slot_num
;
421 if (processor
== master_processor
)
422 basic_info
->is_master
= TRUE
;
424 basic_info
->is_master
= FALSE
;
426 *count
= PROCESSOR_BASIC_INFO_COUNT
;
429 return (KERN_SUCCESS
);
432 case PROCESSOR_CPU_LOAD_INFO
:
434 register processor_cpu_load_info_t cpu_load_info
;
435 register integer_t
*cpu_ticks
;
437 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
438 return (KERN_FAILURE
);
440 cpu_load_info
= (processor_cpu_load_info_t
) info
;
441 cpu_ticks
= PROCESSOR_DATA(processor
, cpu_ticks
);
442 for (i
=0; i
< CPU_STATE_MAX
; i
++)
443 cpu_load_info
->cpu_ticks
[i
] = cpu_ticks
[i
];
445 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
448 return (KERN_SUCCESS
);
452 result
= cpu_info(flavor
, slot_num
, info
, count
);
453 if (result
== KERN_SUCCESS
)
462 processor_t processor
)
464 kern_return_t result
;
468 if (processor
== PROCESSOR_NULL
)
469 return (KERN_INVALID_ARGUMENT
);
471 if (processor
== master_processor
) {
472 thread_t self
= current_thread();
475 prev
= thread_bind(self
, processor
);
476 thread_block(THREAD_CONTINUE_NULL
);
478 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
480 thread_bind(self
, prev
);
486 processor_lock(processor
);
487 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
488 processor_unlock(processor
);
491 return (KERN_FAILURE
);
494 processor
->state
= PROCESSOR_START
;
495 processor_unlock(processor
);
499 * Create the idle processor thread.
501 if (processor
->idle_thread
== THREAD_NULL
) {
502 result
= idle_thread_create(processor
);
503 if (result
!= KERN_SUCCESS
) {
505 processor_lock(processor
);
506 processor
->state
= PROCESSOR_OFF_LINE
;
507 processor_unlock(processor
);
515 * If there is no active thread, the processor
516 * has never been started. Create a dedicated
519 if ( processor
->active_thread
== THREAD_NULL
&&
520 processor
->next_thread
== THREAD_NULL
) {
521 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
522 if (result
!= KERN_SUCCESS
) {
524 processor_lock(processor
);
525 processor
->state
= PROCESSOR_OFF_LINE
;
526 processor_unlock(processor
);
534 thread
->bound_processor
= processor
;
535 processor
->next_thread
= thread
;
536 thread
->state
= TH_RUN
;
537 thread_unlock(thread
);
540 thread_deallocate(thread
);
543 if (processor
->processor_self
== IP_NULL
)
544 ipc_processor_init(processor
);
546 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
547 if (result
!= KERN_SUCCESS
) {
549 processor_lock(processor
);
550 processor
->state
= PROCESSOR_OFF_LINE
;
551 timer_call_shutdown(processor
);
552 processor_unlock(processor
);
558 ipc_processor_enable(processor
);
560 return (KERN_SUCCESS
);
565 processor_t processor
)
567 if (processor
== PROCESSOR_NULL
)
568 return(KERN_INVALID_ARGUMENT
);
570 return(processor_shutdown(processor
));
575 processor_t processor
,
576 processor_info_t info
,
577 mach_msg_type_number_t count
)
579 if (processor
== PROCESSOR_NULL
)
580 return(KERN_INVALID_ARGUMENT
);
582 return(cpu_control(PROCESSOR_DATA(processor
, slot_num
), info
, count
));
586 * Calculate the appropriate timesharing quanta based on set load.
590 timeshare_quanta_update(
591 processor_set_t pset
)
593 int pcount
= pset
->processor_count
;
594 int i
= pset
->runq
.count
;
602 i
= (pcount
+ (i
/ 2)) / i
;
604 pset
->timeshare_quanta
= i
;
608 processor_set_create(
609 __unused host_t host
,
610 __unused processor_set_t
*new_set
,
611 __unused processor_set_t
*new_name
)
613 return(KERN_FAILURE
);
617 processor_set_destroy(
618 __unused processor_set_t pset
)
620 return(KERN_FAILURE
);
624 processor_get_assignment(
625 processor_t processor
,
626 processor_set_t
*pset
)
630 state
= processor
->state
;
631 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
632 return(KERN_FAILURE
);
634 *pset
= processor
->processor_set
;
635 pset_reference(*pset
);
636 return(KERN_SUCCESS
);
641 processor_set_t pset
,
644 processor_set_info_t info
,
645 mach_msg_type_number_t
*count
)
647 if (pset
== PROCESSOR_SET_NULL
)
648 return(KERN_INVALID_ARGUMENT
);
650 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
651 register processor_set_basic_info_t basic_info
;
653 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
654 return(KERN_FAILURE
);
656 basic_info
= (processor_set_basic_info_t
) info
;
657 basic_info
->processor_count
= pset
->processor_count
;
658 basic_info
->default_policy
= POLICY_TIMESHARE
;
660 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
662 return(KERN_SUCCESS
);
664 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
665 register policy_timeshare_base_t ts_base
;
667 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
668 return(KERN_FAILURE
);
670 ts_base
= (policy_timeshare_base_t
) info
;
671 ts_base
->base_priority
= BASEPRI_DEFAULT
;
673 *count
= POLICY_TIMESHARE_BASE_COUNT
;
675 return(KERN_SUCCESS
);
677 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
678 register policy_fifo_base_t fifo_base
;
680 if (*count
< POLICY_FIFO_BASE_COUNT
)
681 return(KERN_FAILURE
);
683 fifo_base
= (policy_fifo_base_t
) info
;
684 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
686 *count
= POLICY_FIFO_BASE_COUNT
;
688 return(KERN_SUCCESS
);
690 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
691 register policy_rr_base_t rr_base
;
693 if (*count
< POLICY_RR_BASE_COUNT
)
694 return(KERN_FAILURE
);
696 rr_base
= (policy_rr_base_t
) info
;
697 rr_base
->base_priority
= BASEPRI_DEFAULT
;
698 rr_base
->quantum
= 1;
700 *count
= POLICY_RR_BASE_COUNT
;
702 return(KERN_SUCCESS
);
704 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
705 register policy_timeshare_limit_t ts_limit
;
707 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
708 return(KERN_FAILURE
);
710 ts_limit
= (policy_timeshare_limit_t
) info
;
711 ts_limit
->max_priority
= MAXPRI_KERNEL
;
713 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
715 return(KERN_SUCCESS
);
717 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
718 register policy_fifo_limit_t fifo_limit
;
720 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
721 return(KERN_FAILURE
);
723 fifo_limit
= (policy_fifo_limit_t
) info
;
724 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
726 *count
= POLICY_FIFO_LIMIT_COUNT
;
728 return(KERN_SUCCESS
);
730 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
731 register policy_rr_limit_t rr_limit
;
733 if (*count
< POLICY_RR_LIMIT_COUNT
)
734 return(KERN_FAILURE
);
736 rr_limit
= (policy_rr_limit_t
) info
;
737 rr_limit
->max_priority
= MAXPRI_KERNEL
;
739 *count
= POLICY_RR_LIMIT_COUNT
;
741 return(KERN_SUCCESS
);
743 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
744 register int *enabled
;
746 if (*count
< (sizeof(*enabled
)/sizeof(int)))
747 return(KERN_FAILURE
);
749 enabled
= (int *) info
;
750 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
752 *count
= sizeof(*enabled
)/sizeof(int);
754 return(KERN_SUCCESS
);
759 return(KERN_INVALID_ARGUMENT
);
763 * processor_set_statistics
765 * Returns scheduling statistics for a processor set.
768 processor_set_statistics(
769 processor_set_t pset
,
771 processor_set_info_t info
,
772 mach_msg_type_number_t
*count
)
774 if (pset
== PROCESSOR_SET_NULL
)
775 return (KERN_INVALID_PROCESSOR_SET
);
777 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
778 register processor_set_load_info_t load_info
;
780 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
781 return(KERN_FAILURE
);
783 load_info
= (processor_set_load_info_t
) info
;
786 load_info
->task_count
= pset
->task_count
;
787 load_info
->thread_count
= pset
->thread_count
;
788 load_info
->mach_factor
= pset
->mach_factor
;
789 load_info
->load_average
= pset
->load_average
;
792 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
793 return(KERN_SUCCESS
);
796 return(KERN_INVALID_ARGUMENT
);
800 * processor_set_max_priority:
802 * Specify max priority permitted on processor set. This affects
803 * newly created and assigned threads. Optionally change existing
807 processor_set_max_priority(
808 __unused processor_set_t pset
,
809 __unused
int max_priority
,
810 __unused boolean_t change_threads
)
812 return (KERN_INVALID_ARGUMENT
);
816 * processor_set_policy_enable:
818 * Allow indicated policy on processor set.
822 processor_set_policy_enable(
823 __unused processor_set_t pset
,
826 return (KERN_INVALID_ARGUMENT
);
830 * processor_set_policy_disable:
832 * Forbid indicated policy on processor set. Time sharing cannot
836 processor_set_policy_disable(
837 __unused processor_set_t pset
,
839 __unused boolean_t change_threads
)
841 return (KERN_INVALID_ARGUMENT
);
845 #define THING_THREAD 1
848 * processor_set_things:
850 * Common internals for processor_set_{threads,tasks}
853 processor_set_things(
854 processor_set_t pset
,
855 mach_port_t
**thing_list
,
856 mach_msg_type_number_t
*count
,
859 unsigned int actual
; /* this many things */
860 unsigned int maxthings
;
863 vm_size_t size
, size_needed
;
866 if (pset
== PROCESSOR_SET_NULL
)
867 return (KERN_INVALID_ARGUMENT
);
876 return (KERN_FAILURE
);
879 if (type
== THING_TASK
)
880 maxthings
= pset
->task_count
;
882 maxthings
= pset
->thread_count
;
884 /* do we have the memory we need? */
886 size_needed
= maxthings
* sizeof (mach_port_t
);
887 if (size_needed
<= size
)
890 /* unlock the pset and allocate more memory */
896 assert(size_needed
> 0);
901 return (KERN_RESOURCE_SHORTAGE
);
904 /* OK, have memory and the processor_set is locked & active */
911 task_t task
, *tasks
= (task_t
*)addr
;
913 for (task
= (task_t
)queue_first(&pset
->tasks
);
914 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
915 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
916 task_reference_internal(task
);
917 tasks
[actual
++] = task
;
925 thread_t thread
, *threads
= (thread_t
*)addr
;
927 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
928 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
929 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
930 thread_reference_internal(thread
);
931 threads
[actual
++] = thread
;
940 if (actual
< maxthings
)
941 size_needed
= actual
* sizeof (mach_port_t
);
944 /* no things, so return null pointer and deallocate memory */
952 /* if we allocated too much, must copy */
954 if (size_needed
< size
) {
957 newaddr
= kalloc(size_needed
);
963 task_t
*tasks
= (task_t
*)addr
;
965 for (i
= 0; i
< actual
; i
++)
966 task_deallocate(tasks
[i
]);
972 thread_t
*threads
= (thread_t
*)addr
;
974 for (i
= 0; i
< actual
; i
++)
975 thread_deallocate(threads
[i
]);
981 return (KERN_RESOURCE_SHORTAGE
);
984 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
989 *thing_list
= (mach_port_t
*)addr
;
992 /* do the conversion that Mig should handle */
998 task_t
*tasks
= (task_t
*)addr
;
1000 for (i
= 0; i
< actual
; i
++)
1001 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
1007 thread_t
*threads
= (thread_t
*)addr
;
1009 for (i
= 0; i
< actual
; i
++)
1010 (*thing_list
)[i
] = convert_thread_to_port(threads
[i
]);
1016 return (KERN_SUCCESS
);
1021 * processor_set_tasks:
1023 * List all tasks in the processor set.
1026 processor_set_tasks(
1027 processor_set_t pset
,
1028 task_array_t
*task_list
,
1029 mach_msg_type_number_t
*count
)
1031 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1035 * processor_set_threads:
1037 * List all threads in the processor set.
1040 processor_set_threads(
1041 processor_set_t pset
,
1042 thread_array_t
*thread_list
,
1043 mach_msg_type_number_t
*count
)
1045 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1049 * processor_set_base:
1051 * Specify per-policy base priority for a processor set. Set processor
1052 * set default policy to the given policy. This affects newly created
1053 * and assigned threads. Optionally change existing ones.
1057 __unused processor_set_t pset
,
1058 __unused policy_t policy
,
1059 __unused policy_base_t base
,
1060 __unused boolean_t change
)
1062 return (KERN_INVALID_ARGUMENT
);
1066 * processor_set_limit:
1068 * Specify per-policy limits for a processor set. This affects
1069 * newly created and assigned threads. Optionally change existing
1073 processor_set_limit(
1074 __unused processor_set_t pset
,
1075 __unused policy_t policy
,
1076 __unused policy_limit_t limit
,
1077 __unused boolean_t change
)
1079 return (KERN_POLICY_LIMIT
);
1083 * processor_set_policy_control
1085 * Controls the scheduling attributes governing the processor set.
1086 * Allows control of enabled policies, and per-policy base and limit
1090 processor_set_policy_control(
1091 __unused processor_set_t pset
,
1092 __unused
int flavor
,
1093 __unused processor_set_info_t policy_info
,
1094 __unused mach_msg_type_number_t count
,
1095 __unused boolean_t change
)
1097 return (KERN_INVALID_ARGUMENT
);