2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * processor.c: processor and processor_set manipulation routines.
57 #include <mach/boolean.h>
58 #include <mach/policy.h>
59 #include <mach/processor.h>
60 #include <mach/processor_info.h>
61 #include <mach/vm_param.h>
62 #include <kern/cpu_number.h>
63 #include <kern/host.h>
64 #include <kern/machine.h>
65 #include <kern/misc_protos.h>
66 #include <kern/processor.h>
67 #include <kern/sched.h>
68 #include <kern/task.h>
69 #include <kern/thread.h>
70 #include <kern/ipc_host.h>
71 #include <kern/ipc_tt.h>
72 #include <ipc/ipc_port.h>
73 #include <kern/kalloc.h>
78 #include <mach/mach_host_server.h>
79 #include <mach/processor_set_server.h>
84 struct processor_set default_pset
;
86 processor_t processor_list
;
87 unsigned int processor_count
;
88 static processor_t processor_list_tail
;
89 decl_simple_lock_data(,processor_list_lock
)
91 processor_t master_processor
;
95 kern_return_t
processor_set_base(
101 kern_return_t
processor_set_limit(
102 processor_set_t pset
,
104 policy_limit_t limit
,
107 kern_return_t
processor_set_things(
108 processor_set_t pset
,
109 mach_port_t
**thing_list
,
110 mach_msg_type_number_t
*count
,
114 processor_bootstrap(void)
116 simple_lock_init(&processor_list_lock
, 0);
118 master_processor
= cpu_to_processor(master_cpu
);
120 processor_init(master_processor
, master_cpu
);
124 * Initialize the given processor_set structure.
129 register processor_set_t pset
)
133 /* setup run queue */
134 pset
->runq
.highq
= IDLEPRI
;
135 for (i
= 0; i
< NRQBM
; i
++)
136 pset
->runq
.bitmap
[i
] = 0;
137 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
138 pset
->runq
.urgency
= pset
->runq
.count
= 0;
139 for (i
= 0; i
< NRQS
; i
++)
140 queue_init(&pset
->runq
.queues
[i
]);
142 queue_init(&pset
->idle_queue
);
143 pset
->idle_count
= 0;
144 queue_init(&pset
->active_queue
);
145 simple_lock_init(&pset
->sched_lock
, 0);
146 pset
->run_count
= pset
->share_count
= 0;
147 pset
->mach_factor
= pset
->load_average
= 0;
148 pset
->pri_shift
= INT8_MAX
;
149 queue_init(&pset
->processors
);
150 pset
->processor_count
= 0;
151 queue_init(&pset
->tasks
);
152 pset
->task_count
= 0;
153 queue_init(&pset
->threads
);
154 pset
->thread_count
= 0;
157 mutex_init(&pset
->lock
, 0);
158 pset
->pset_self
= IP_NULL
;
159 pset
->pset_name_self
= IP_NULL
;
160 pset
->timeshare_quanta
= 1;
164 * Initialize the given processor structure for the processor in
165 * the slot specified by slot_num.
169 register processor_t p
,
174 /* setup run queue */
175 p
->runq
.highq
= IDLEPRI
;
176 for (i
= 0; i
< NRQBM
; i
++)
177 p
->runq
.bitmap
[i
] = 0;
178 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
179 p
->runq
.urgency
= p
->runq
.count
= 0;
180 for (i
= 0; i
< NRQS
; i
++)
181 queue_init(&p
->runq
.queues
[i
]);
183 p
->state
= PROCESSOR_OFF_LINE
;
184 p
->active_thread
= p
->next_thread
= p
->idle_thread
= THREAD_NULL
;
185 p
->processor_set
= PROCESSOR_SET_NULL
;
186 p
->current_pri
= MINPRI
;
187 p
->deadline
= UINT64_MAX
;
188 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
190 simple_lock_init(&p
->lock
, 0);
191 p
->processor_self
= IP_NULL
;
192 processor_data_init(p
);
193 PROCESSOR_DATA(p
, slot_num
) = slot_num
;
195 simple_lock(&processor_list_lock
);
196 if (processor_list
== NULL
)
199 processor_list_tail
->processor_list
= p
;
200 processor_list_tail
= p
;
202 p
->processor_list
= NULL
;
203 simple_unlock(&processor_list_lock
);
209 * Remove one reference to the processor set. Destroy processor_set
210 * if this was the last reference.
214 processor_set_t pset
)
216 if (pset
== PROCESSOR_SET_NULL
)
219 assert(pset
== &default_pset
);
226 * Add one reference to the processor set.
230 processor_set_t pset
)
232 if (pset
== PROCESSOR_SET_NULL
)
235 assert(pset
== &default_pset
);
238 #define pset_reference_locked(pset) assert(pset == &default_pset)
241 * pset_remove_processor() removes a processor from a processor_set.
242 * It can only be called on the current processor. Caller must
243 * hold lock on current processor and processor set.
246 pset_remove_processor(
247 processor_set_t pset
,
248 processor_t processor
)
250 if (pset
!= processor
->processor_set
)
251 panic("pset_remove_processor: wrong pset");
253 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
254 processor
->processor_set
= PROCESSOR_SET_NULL
;
255 pset
->processor_count
--;
256 timeshare_quanta_update(pset
);
260 * pset_add_processor() adds a processor to a processor_set.
261 * It can only be called on the current processor. Caller must
262 * hold lock on curent processor and on pset. No reference counting on
263 * processors. Processor reference to pset is implicit.
267 processor_set_t pset
,
268 processor_t processor
)
270 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
271 processor
->processor_set
= pset
;
272 pset
->processor_count
++;
273 timeshare_quanta_update(pset
);
277 * pset_remove_task() removes a task from a processor_set.
278 * Caller must hold locks on pset and task (unless task has
279 * no references left, in which case just the pset lock is
280 * needed). Pset reference count is not decremented;
281 * caller must explicitly pset_deallocate.
285 processor_set_t pset
,
288 if (pset
!= task
->processor_set
)
291 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
296 * pset_add_task() adds a task to a processor_set.
297 * Caller must hold locks on pset and task. Pset references to
298 * tasks are implicit.
302 processor_set_t pset
,
305 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
306 task
->processor_set
= pset
;
308 pset_reference_locked(pset
);
312 * pset_remove_thread() removes a thread from a processor_set.
313 * Caller must hold locks on pset and thread (but only if thread
314 * has outstanding references that could be used to lookup the pset).
315 * The pset reference count is not decremented; caller must explicitly
320 processor_set_t pset
,
323 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
324 pset
->thread_count
--;
328 * pset_add_thread() adds a thread to a processor_set.
329 * Caller must hold locks on pset and thread. Pset references to
330 * threads are implicit.
334 processor_set_t pset
,
337 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
338 thread
->processor_set
= pset
;
339 pset
->thread_count
++;
340 pset_reference_locked(pset
);
344 * thread_change_psets() changes the pset of a thread. Caller must
345 * hold locks on both psets and thread. The old pset must be
346 * explicitly pset_deallocat()'ed by caller.
351 processor_set_t old_pset
,
352 processor_set_t new_pset
)
354 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
355 old_pset
->thread_count
--;
356 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
357 thread
->processor_set
= new_pset
;
358 new_pset
->thread_count
++;
359 pset_reference_locked(new_pset
);
364 processor_info_count(
365 processor_flavor_t flavor
,
366 mach_msg_type_number_t
*count
)
370 case PROCESSOR_BASIC_INFO
:
371 *count
= PROCESSOR_BASIC_INFO_COUNT
;
374 case PROCESSOR_CPU_LOAD_INFO
:
375 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
379 return (cpu_info_count(flavor
, count
));
382 return (KERN_SUCCESS
);
388 register processor_t processor
,
389 processor_flavor_t flavor
,
391 processor_info_t info
,
392 mach_msg_type_number_t
*count
)
394 register int i
, slot_num
, state
;
395 kern_return_t result
;
397 if (processor
== PROCESSOR_NULL
)
398 return (KERN_INVALID_ARGUMENT
);
400 slot_num
= PROCESSOR_DATA(processor
, slot_num
);
404 case PROCESSOR_BASIC_INFO
:
406 register processor_basic_info_t basic_info
;
408 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
409 return (KERN_FAILURE
);
411 basic_info
= (processor_basic_info_t
) info
;
412 basic_info
->cpu_type
= slot_type(slot_num
);
413 basic_info
->cpu_subtype
= slot_subtype(slot_num
);
414 state
= processor
->state
;
415 if (state
== PROCESSOR_OFF_LINE
)
416 basic_info
->running
= FALSE
;
418 basic_info
->running
= TRUE
;
419 basic_info
->slot_num
= slot_num
;
420 if (processor
== master_processor
)
421 basic_info
->is_master
= TRUE
;
423 basic_info
->is_master
= FALSE
;
425 *count
= PROCESSOR_BASIC_INFO_COUNT
;
428 return (KERN_SUCCESS
);
431 case PROCESSOR_CPU_LOAD_INFO
:
433 register processor_cpu_load_info_t cpu_load_info
;
434 register integer_t
*cpu_ticks
;
436 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
437 return (KERN_FAILURE
);
439 cpu_load_info
= (processor_cpu_load_info_t
) info
;
440 cpu_ticks
= PROCESSOR_DATA(processor
, cpu_ticks
);
441 for (i
=0; i
< CPU_STATE_MAX
; i
++)
442 cpu_load_info
->cpu_ticks
[i
] = cpu_ticks
[i
];
444 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
447 return (KERN_SUCCESS
);
451 result
= cpu_info(flavor
, slot_num
, info
, count
);
452 if (result
== KERN_SUCCESS
)
461 processor_t processor
)
463 kern_return_t result
;
467 if (processor
== PROCESSOR_NULL
)
468 return (KERN_INVALID_ARGUMENT
);
470 if (processor
== master_processor
) {
471 thread_t self
= current_thread();
474 prev
= thread_bind(self
, processor
);
475 thread_block(THREAD_CONTINUE_NULL
);
477 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
479 thread_bind(self
, prev
);
485 processor_lock(processor
);
486 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
487 processor_unlock(processor
);
490 return (KERN_FAILURE
);
493 processor
->state
= PROCESSOR_START
;
494 processor_unlock(processor
);
498 * Create the idle processor thread.
500 if (processor
->idle_thread
== THREAD_NULL
) {
501 result
= idle_thread_create(processor
);
502 if (result
!= KERN_SUCCESS
) {
504 processor_lock(processor
);
505 processor
->state
= PROCESSOR_OFF_LINE
;
506 processor_unlock(processor
);
514 * If there is no active thread, the processor
515 * has never been started. Create a dedicated
518 if ( processor
->active_thread
== THREAD_NULL
&&
519 processor
->next_thread
== THREAD_NULL
) {
520 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
521 if (result
!= KERN_SUCCESS
) {
523 processor_lock(processor
);
524 processor
->state
= PROCESSOR_OFF_LINE
;
525 processor_unlock(processor
);
533 thread
->bound_processor
= processor
;
534 processor
->next_thread
= thread
;
535 thread
->state
= TH_RUN
;
536 thread_unlock(thread
);
539 thread_deallocate(thread
);
542 if (processor
->processor_self
== IP_NULL
)
543 ipc_processor_init(processor
);
545 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
546 if (result
!= KERN_SUCCESS
) {
548 processor_lock(processor
);
549 processor
->state
= PROCESSOR_OFF_LINE
;
550 timer_call_shutdown(processor
);
551 processor_unlock(processor
);
557 ipc_processor_enable(processor
);
559 return (KERN_SUCCESS
);
564 processor_t processor
)
566 if (processor
== PROCESSOR_NULL
)
567 return(KERN_INVALID_ARGUMENT
);
569 return(processor_shutdown(processor
));
574 processor_t processor
,
575 processor_info_t info
,
576 mach_msg_type_number_t count
)
578 if (processor
== PROCESSOR_NULL
)
579 return(KERN_INVALID_ARGUMENT
);
581 return(cpu_control(PROCESSOR_DATA(processor
, slot_num
), info
, count
));
585 * Calculate the appropriate timesharing quanta based on set load.
589 timeshare_quanta_update(
590 processor_set_t pset
)
592 int pcount
= pset
->processor_count
;
593 int i
= pset
->runq
.count
;
601 i
= (pcount
+ (i
/ 2)) / i
;
603 pset
->timeshare_quanta
= i
;
607 processor_set_create(
608 __unused host_t host
,
609 __unused processor_set_t
*new_set
,
610 __unused processor_set_t
*new_name
)
612 return(KERN_FAILURE
);
616 processor_set_destroy(
617 __unused processor_set_t pset
)
619 return(KERN_FAILURE
);
623 processor_get_assignment(
624 processor_t processor
,
625 processor_set_t
*pset
)
629 state
= processor
->state
;
630 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
631 return(KERN_FAILURE
);
633 *pset
= processor
->processor_set
;
634 pset_reference(*pset
);
635 return(KERN_SUCCESS
);
640 processor_set_t pset
,
643 processor_set_info_t info
,
644 mach_msg_type_number_t
*count
)
646 if (pset
== PROCESSOR_SET_NULL
)
647 return(KERN_INVALID_ARGUMENT
);
649 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
650 register processor_set_basic_info_t basic_info
;
652 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
653 return(KERN_FAILURE
);
655 basic_info
= (processor_set_basic_info_t
) info
;
656 basic_info
->processor_count
= pset
->processor_count
;
657 basic_info
->default_policy
= POLICY_TIMESHARE
;
659 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
661 return(KERN_SUCCESS
);
663 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
664 register policy_timeshare_base_t ts_base
;
666 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
667 return(KERN_FAILURE
);
669 ts_base
= (policy_timeshare_base_t
) info
;
670 ts_base
->base_priority
= BASEPRI_DEFAULT
;
672 *count
= POLICY_TIMESHARE_BASE_COUNT
;
674 return(KERN_SUCCESS
);
676 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
677 register policy_fifo_base_t fifo_base
;
679 if (*count
< POLICY_FIFO_BASE_COUNT
)
680 return(KERN_FAILURE
);
682 fifo_base
= (policy_fifo_base_t
) info
;
683 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
685 *count
= POLICY_FIFO_BASE_COUNT
;
687 return(KERN_SUCCESS
);
689 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
690 register policy_rr_base_t rr_base
;
692 if (*count
< POLICY_RR_BASE_COUNT
)
693 return(KERN_FAILURE
);
695 rr_base
= (policy_rr_base_t
) info
;
696 rr_base
->base_priority
= BASEPRI_DEFAULT
;
697 rr_base
->quantum
= 1;
699 *count
= POLICY_RR_BASE_COUNT
;
701 return(KERN_SUCCESS
);
703 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
704 register policy_timeshare_limit_t ts_limit
;
706 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
707 return(KERN_FAILURE
);
709 ts_limit
= (policy_timeshare_limit_t
) info
;
710 ts_limit
->max_priority
= MAXPRI_KERNEL
;
712 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
714 return(KERN_SUCCESS
);
716 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
717 register policy_fifo_limit_t fifo_limit
;
719 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
720 return(KERN_FAILURE
);
722 fifo_limit
= (policy_fifo_limit_t
) info
;
723 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
725 *count
= POLICY_FIFO_LIMIT_COUNT
;
727 return(KERN_SUCCESS
);
729 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
730 register policy_rr_limit_t rr_limit
;
732 if (*count
< POLICY_RR_LIMIT_COUNT
)
733 return(KERN_FAILURE
);
735 rr_limit
= (policy_rr_limit_t
) info
;
736 rr_limit
->max_priority
= MAXPRI_KERNEL
;
738 *count
= POLICY_RR_LIMIT_COUNT
;
740 return(KERN_SUCCESS
);
742 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
743 register int *enabled
;
745 if (*count
< (sizeof(*enabled
)/sizeof(int)))
746 return(KERN_FAILURE
);
748 enabled
= (int *) info
;
749 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
751 *count
= sizeof(*enabled
)/sizeof(int);
753 return(KERN_SUCCESS
);
758 return(KERN_INVALID_ARGUMENT
);
762 * processor_set_statistics
764 * Returns scheduling statistics for a processor set.
767 processor_set_statistics(
768 processor_set_t pset
,
770 processor_set_info_t info
,
771 mach_msg_type_number_t
*count
)
773 if (pset
== PROCESSOR_SET_NULL
)
774 return (KERN_INVALID_PROCESSOR_SET
);
776 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
777 register processor_set_load_info_t load_info
;
779 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
780 return(KERN_FAILURE
);
782 load_info
= (processor_set_load_info_t
) info
;
785 load_info
->task_count
= pset
->task_count
;
786 load_info
->thread_count
= pset
->thread_count
;
787 load_info
->mach_factor
= pset
->mach_factor
;
788 load_info
->load_average
= pset
->load_average
;
791 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
792 return(KERN_SUCCESS
);
795 return(KERN_INVALID_ARGUMENT
);
799 * processor_set_max_priority:
801 * Specify max priority permitted on processor set. This affects
802 * newly created and assigned threads. Optionally change existing
806 processor_set_max_priority(
807 __unused processor_set_t pset
,
808 __unused
int max_priority
,
809 __unused boolean_t change_threads
)
811 return (KERN_INVALID_ARGUMENT
);
815 * processor_set_policy_enable:
817 * Allow indicated policy on processor set.
821 processor_set_policy_enable(
822 __unused processor_set_t pset
,
825 return (KERN_INVALID_ARGUMENT
);
829 * processor_set_policy_disable:
831 * Forbid indicated policy on processor set. Time sharing cannot
835 processor_set_policy_disable(
836 __unused processor_set_t pset
,
838 __unused boolean_t change_threads
)
840 return (KERN_INVALID_ARGUMENT
);
844 #define THING_THREAD 1
847 * processor_set_things:
849 * Common internals for processor_set_{threads,tasks}
852 processor_set_things(
853 processor_set_t pset
,
854 mach_port_t
**thing_list
,
855 mach_msg_type_number_t
*count
,
858 unsigned int actual
; /* this many things */
859 unsigned int maxthings
;
862 vm_size_t size
, size_needed
;
865 if (pset
== PROCESSOR_SET_NULL
)
866 return (KERN_INVALID_ARGUMENT
);
875 return (KERN_FAILURE
);
878 if (type
== THING_TASK
)
879 maxthings
= pset
->task_count
;
881 maxthings
= pset
->thread_count
;
883 /* do we have the memory we need? */
885 size_needed
= maxthings
* sizeof (mach_port_t
);
886 if (size_needed
<= size
)
889 /* unlock the pset and allocate more memory */
895 assert(size_needed
> 0);
900 return (KERN_RESOURCE_SHORTAGE
);
903 /* OK, have memory and the processor_set is locked & active */
910 task_t task
, *tasks
= (task_t
*)addr
;
912 for (task
= (task_t
)queue_first(&pset
->tasks
);
913 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
914 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
915 task_reference_internal(task
);
916 tasks
[actual
++] = task
;
924 thread_t thread
, *threads
= (thread_t
*)addr
;
926 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
927 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
928 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
929 thread_reference_internal(thread
);
930 threads
[actual
++] = thread
;
939 if (actual
< maxthings
)
940 size_needed
= actual
* sizeof (mach_port_t
);
943 /* no things, so return null pointer and deallocate memory */
951 /* if we allocated too much, must copy */
953 if (size_needed
< size
) {
956 newaddr
= kalloc(size_needed
);
962 task_t
*tasks
= (task_t
*)addr
;
964 for (i
= 0; i
< actual
; i
++)
965 task_deallocate(tasks
[i
]);
971 thread_t
*threads
= (thread_t
*)addr
;
973 for (i
= 0; i
< actual
; i
++)
974 thread_deallocate(threads
[i
]);
980 return (KERN_RESOURCE_SHORTAGE
);
983 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
988 *thing_list
= (mach_port_t
*)addr
;
991 /* do the conversion that Mig should handle */
997 task_t
*tasks
= (task_t
*)addr
;
999 for (i
= 0; i
< actual
; i
++)
1000 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
1006 thread_t
*threads
= (thread_t
*)addr
;
1008 for (i
= 0; i
< actual
; i
++)
1009 (*thing_list
)[i
] = convert_thread_to_port(threads
[i
]);
1015 return (KERN_SUCCESS
);
1020 * processor_set_tasks:
1022 * List all tasks in the processor set.
1025 processor_set_tasks(
1026 processor_set_t pset
,
1027 task_array_t
*task_list
,
1028 mach_msg_type_number_t
*count
)
1030 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1034 * processor_set_threads:
1036 * List all threads in the processor set.
1039 processor_set_threads(
1040 processor_set_t pset
,
1041 thread_array_t
*thread_list
,
1042 mach_msg_type_number_t
*count
)
1044 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1048 * processor_set_base:
1050 * Specify per-policy base priority for a processor set. Set processor
1051 * set default policy to the given policy. This affects newly created
1052 * and assigned threads. Optionally change existing ones.
1056 __unused processor_set_t pset
,
1057 __unused policy_t policy
,
1058 __unused policy_base_t base
,
1059 __unused boolean_t change
)
1061 return (KERN_INVALID_ARGUMENT
);
1065 * processor_set_limit:
1067 * Specify per-policy limits for a processor set. This affects
1068 * newly created and assigned threads. Optionally change existing
1072 processor_set_limit(
1073 __unused processor_set_t pset
,
1074 __unused policy_t policy
,
1075 __unused policy_limit_t limit
,
1076 __unused boolean_t change
)
1078 return (KERN_POLICY_LIMIT
);
1082 * processor_set_policy_control
1084 * Controls the scheduling attributes governing the processor set.
1085 * Allows control of enabled policies, and per-policy base and limit
1089 processor_set_policy_control(
1090 __unused processor_set_t pset
,
1091 __unused
int flavor
,
1092 __unused processor_set_info_t policy_info
,
1093 __unused mach_msg_type_number_t count
,
1094 __unused boolean_t change
)
1096 return (KERN_INVALID_ARGUMENT
);