2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
90 struct processor_set default_pset
;
92 processor_t processor_list
;
93 unsigned int processor_count
;
94 static processor_t processor_list_tail
;
95 decl_simple_lock_data(,processor_list_lock
)
97 processor_t master_processor
;
101 kern_return_t
processor_set_base(
102 processor_set_t pset
,
107 kern_return_t
processor_set_limit(
108 processor_set_t pset
,
110 policy_limit_t limit
,
113 kern_return_t
processor_set_things(
114 processor_set_t pset
,
115 mach_port_t
**thing_list
,
116 mach_msg_type_number_t
*count
,
120 processor_bootstrap(void)
122 simple_lock_init(&processor_list_lock
, 0);
124 master_processor
= cpu_to_processor(master_cpu
);
126 processor_init(master_processor
, master_cpu
);
130 * Initialize the given processor_set structure.
135 register processor_set_t pset
)
139 /* setup run queue */
140 pset
->runq
.highq
= IDLEPRI
;
141 for (i
= 0; i
< NRQBM
; i
++)
142 pset
->runq
.bitmap
[i
] = 0;
143 setbit(MAXPRI
- IDLEPRI
, pset
->runq
.bitmap
);
144 pset
->runq
.urgency
= pset
->runq
.count
= 0;
145 for (i
= 0; i
< NRQS
; i
++)
146 queue_init(&pset
->runq
.queues
[i
]);
148 queue_init(&pset
->idle_queue
);
149 pset
->idle_count
= 0;
150 queue_init(&pset
->active_queue
);
151 simple_lock_init(&pset
->sched_lock
, 0);
152 pset
->run_count
= pset
->share_count
= 0;
153 pset
->mach_factor
= pset
->load_average
= 0;
154 pset
->pri_shift
= INT8_MAX
;
155 queue_init(&pset
->processors
);
156 pset
->processor_count
= 0;
157 queue_init(&pset
->tasks
);
158 pset
->task_count
= 0;
159 queue_init(&pset
->threads
);
160 pset
->thread_count
= 0;
163 mutex_init(&pset
->lock
, 0);
164 pset
->pset_self
= IP_NULL
;
165 pset
->pset_name_self
= IP_NULL
;
166 pset
->timeshare_quanta
= 1;
170 * Initialize the given processor structure for the processor in
171 * the slot specified by slot_num.
175 register processor_t p
,
180 /* setup run queue */
181 p
->runq
.highq
= IDLEPRI
;
182 for (i
= 0; i
< NRQBM
; i
++)
183 p
->runq
.bitmap
[i
] = 0;
184 setbit(MAXPRI
- IDLEPRI
, p
->runq
.bitmap
);
185 p
->runq
.urgency
= p
->runq
.count
= 0;
186 for (i
= 0; i
< NRQS
; i
++)
187 queue_init(&p
->runq
.queues
[i
]);
189 p
->state
= PROCESSOR_OFF_LINE
;
190 p
->active_thread
= p
->next_thread
= p
->idle_thread
= THREAD_NULL
;
191 p
->processor_set
= PROCESSOR_SET_NULL
;
192 p
->current_pri
= MINPRI
;
193 p
->deadline
= UINT64_MAX
;
194 timer_call_setup(&p
->quantum_timer
, thread_quantum_expire
, p
);
196 simple_lock_init(&p
->lock
, 0);
197 p
->processor_self
= IP_NULL
;
198 processor_data_init(p
);
199 PROCESSOR_DATA(p
, slot_num
) = slot_num
;
201 simple_lock(&processor_list_lock
);
202 if (processor_list
== NULL
)
205 processor_list_tail
->processor_list
= p
;
206 processor_list_tail
= p
;
208 p
->processor_list
= NULL
;
209 simple_unlock(&processor_list_lock
);
215 * Remove one reference to the processor set. Destroy processor_set
216 * if this was the last reference.
220 processor_set_t pset
)
222 if (pset
== PROCESSOR_SET_NULL
)
225 assert(pset
== &default_pset
);
232 * Add one reference to the processor set.
236 processor_set_t pset
)
238 if (pset
== PROCESSOR_SET_NULL
)
241 assert(pset
== &default_pset
);
244 #define pset_reference_locked(pset) assert(pset == &default_pset)
247 * pset_remove_processor() removes a processor from a processor_set.
248 * It can only be called on the current processor. Caller must
249 * hold lock on current processor and processor set.
252 pset_remove_processor(
253 processor_set_t pset
,
254 processor_t processor
)
256 if (pset
!= processor
->processor_set
)
257 panic("pset_remove_processor: wrong pset");
259 queue_remove(&pset
->processors
, processor
, processor_t
, processors
);
260 processor
->processor_set
= PROCESSOR_SET_NULL
;
261 pset
->processor_count
--;
262 timeshare_quanta_update(pset
);
266 * pset_add_processor() adds a processor to a processor_set.
267 * It can only be called on the current processor. Caller must
268 * hold lock on curent processor and on pset. No reference counting on
269 * processors. Processor reference to pset is implicit.
273 processor_set_t pset
,
274 processor_t processor
)
276 queue_enter(&pset
->processors
, processor
, processor_t
, processors
);
277 processor
->processor_set
= pset
;
278 pset
->processor_count
++;
279 timeshare_quanta_update(pset
);
283 * pset_remove_task() removes a task from a processor_set.
284 * Caller must hold locks on pset and task (unless task has
285 * no references left, in which case just the pset lock is
286 * needed). Pset reference count is not decremented;
287 * caller must explicitly pset_deallocate.
291 processor_set_t pset
,
294 if (pset
!= task
->processor_set
)
297 queue_remove(&pset
->tasks
, task
, task_t
, pset_tasks
);
302 * pset_add_task() adds a task to a processor_set.
303 * Caller must hold locks on pset and task. Pset references to
304 * tasks are implicit.
308 processor_set_t pset
,
311 queue_enter(&pset
->tasks
, task
, task_t
, pset_tasks
);
312 task
->processor_set
= pset
;
314 pset_reference_locked(pset
);
318 * pset_remove_thread() removes a thread from a processor_set.
319 * Caller must hold locks on pset and thread (but only if thread
320 * has outstanding references that could be used to lookup the pset).
321 * The pset reference count is not decremented; caller must explicitly
326 processor_set_t pset
,
329 queue_remove(&pset
->threads
, thread
, thread_t
, pset_threads
);
330 pset
->thread_count
--;
334 * pset_add_thread() adds a thread to a processor_set.
335 * Caller must hold locks on pset and thread. Pset references to
336 * threads are implicit.
340 processor_set_t pset
,
343 queue_enter(&pset
->threads
, thread
, thread_t
, pset_threads
);
344 thread
->processor_set
= pset
;
345 pset
->thread_count
++;
346 pset_reference_locked(pset
);
350 * thread_change_psets() changes the pset of a thread. Caller must
351 * hold locks on both psets and thread. The old pset must be
352 * explicitly pset_deallocat()'ed by caller.
357 processor_set_t old_pset
,
358 processor_set_t new_pset
)
360 queue_remove(&old_pset
->threads
, thread
, thread_t
, pset_threads
);
361 old_pset
->thread_count
--;
362 queue_enter(&new_pset
->threads
, thread
, thread_t
, pset_threads
);
363 thread
->processor_set
= new_pset
;
364 new_pset
->thread_count
++;
365 pset_reference_locked(new_pset
);
370 processor_info_count(
371 processor_flavor_t flavor
,
372 mach_msg_type_number_t
*count
)
376 case PROCESSOR_BASIC_INFO
:
377 *count
= PROCESSOR_BASIC_INFO_COUNT
;
380 case PROCESSOR_CPU_LOAD_INFO
:
381 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
385 return (cpu_info_count(flavor
, count
));
388 return (KERN_SUCCESS
);
394 register processor_t processor
,
395 processor_flavor_t flavor
,
397 processor_info_t info
,
398 mach_msg_type_number_t
*count
)
400 register int i
, slot_num
, state
;
401 kern_return_t result
;
403 if (processor
== PROCESSOR_NULL
)
404 return (KERN_INVALID_ARGUMENT
);
406 slot_num
= PROCESSOR_DATA(processor
, slot_num
);
410 case PROCESSOR_BASIC_INFO
:
412 register processor_basic_info_t basic_info
;
414 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
415 return (KERN_FAILURE
);
417 basic_info
= (processor_basic_info_t
) info
;
418 basic_info
->cpu_type
= slot_type(slot_num
);
419 basic_info
->cpu_subtype
= slot_subtype(slot_num
);
420 state
= processor
->state
;
421 if (state
== PROCESSOR_OFF_LINE
)
422 basic_info
->running
= FALSE
;
424 basic_info
->running
= TRUE
;
425 basic_info
->slot_num
= slot_num
;
426 if (processor
== master_processor
)
427 basic_info
->is_master
= TRUE
;
429 basic_info
->is_master
= FALSE
;
431 *count
= PROCESSOR_BASIC_INFO_COUNT
;
434 return (KERN_SUCCESS
);
437 case PROCESSOR_CPU_LOAD_INFO
:
439 register processor_cpu_load_info_t cpu_load_info
;
440 register integer_t
*cpu_ticks
;
442 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
443 return (KERN_FAILURE
);
445 cpu_load_info
= (processor_cpu_load_info_t
) info
;
446 cpu_ticks
= PROCESSOR_DATA(processor
, cpu_ticks
);
447 for (i
=0; i
< CPU_STATE_MAX
; i
++)
448 cpu_load_info
->cpu_ticks
[i
] = cpu_ticks
[i
];
450 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
453 return (KERN_SUCCESS
);
457 result
= cpu_info(flavor
, slot_num
, info
, count
);
458 if (result
== KERN_SUCCESS
)
467 processor_t processor
)
469 kern_return_t result
;
473 if (processor
== PROCESSOR_NULL
)
474 return (KERN_INVALID_ARGUMENT
);
476 if (processor
== master_processor
) {
477 thread_t self
= current_thread();
480 prev
= thread_bind(self
, processor
);
481 thread_block(THREAD_CONTINUE_NULL
);
483 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
485 thread_bind(self
, prev
);
491 processor_lock(processor
);
492 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
493 processor_unlock(processor
);
496 return (KERN_FAILURE
);
499 processor
->state
= PROCESSOR_START
;
500 processor_unlock(processor
);
504 * Create the idle processor thread.
506 if (processor
->idle_thread
== THREAD_NULL
) {
507 result
= idle_thread_create(processor
);
508 if (result
!= KERN_SUCCESS
) {
510 processor_lock(processor
);
511 processor
->state
= PROCESSOR_OFF_LINE
;
512 processor_unlock(processor
);
520 * If there is no active thread, the processor
521 * has never been started. Create a dedicated
524 if ( processor
->active_thread
== THREAD_NULL
&&
525 processor
->next_thread
== THREAD_NULL
) {
526 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
527 if (result
!= KERN_SUCCESS
) {
529 processor_lock(processor
);
530 processor
->state
= PROCESSOR_OFF_LINE
;
531 processor_unlock(processor
);
539 thread
->bound_processor
= processor
;
540 processor
->next_thread
= thread
;
541 thread
->state
= TH_RUN
;
542 thread_unlock(thread
);
545 thread_deallocate(thread
);
548 if (processor
->processor_self
== IP_NULL
)
549 ipc_processor_init(processor
);
551 result
= cpu_start(PROCESSOR_DATA(processor
, slot_num
));
552 if (result
!= KERN_SUCCESS
) {
554 processor_lock(processor
);
555 processor
->state
= PROCESSOR_OFF_LINE
;
556 timer_call_shutdown(processor
);
557 processor_unlock(processor
);
563 ipc_processor_enable(processor
);
565 return (KERN_SUCCESS
);
570 processor_t processor
)
572 if (processor
== PROCESSOR_NULL
)
573 return(KERN_INVALID_ARGUMENT
);
575 return(processor_shutdown(processor
));
580 processor_t processor
,
581 processor_info_t info
,
582 mach_msg_type_number_t count
)
584 if (processor
== PROCESSOR_NULL
)
585 return(KERN_INVALID_ARGUMENT
);
587 return(cpu_control(PROCESSOR_DATA(processor
, slot_num
), info
, count
));
591 * Calculate the appropriate timesharing quanta based on set load.
595 timeshare_quanta_update(
596 processor_set_t pset
)
598 int pcount
= pset
->processor_count
;
599 int i
= pset
->runq
.count
;
607 i
= (pcount
+ (i
/ 2)) / i
;
609 pset
->timeshare_quanta
= i
;
613 processor_set_create(
614 __unused host_t host
,
615 __unused processor_set_t
*new_set
,
616 __unused processor_set_t
*new_name
)
618 return(KERN_FAILURE
);
622 processor_set_destroy(
623 __unused processor_set_t pset
)
625 return(KERN_FAILURE
);
629 processor_get_assignment(
630 processor_t processor
,
631 processor_set_t
*pset
)
635 state
= processor
->state
;
636 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
637 return(KERN_FAILURE
);
639 *pset
= processor
->processor_set
;
640 pset_reference(*pset
);
641 return(KERN_SUCCESS
);
646 processor_set_t pset
,
649 processor_set_info_t info
,
650 mach_msg_type_number_t
*count
)
652 if (pset
== PROCESSOR_SET_NULL
)
653 return(KERN_INVALID_ARGUMENT
);
655 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
656 register processor_set_basic_info_t basic_info
;
658 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
659 return(KERN_FAILURE
);
661 basic_info
= (processor_set_basic_info_t
) info
;
662 basic_info
->processor_count
= pset
->processor_count
;
663 basic_info
->default_policy
= POLICY_TIMESHARE
;
665 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
667 return(KERN_SUCCESS
);
669 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
670 register policy_timeshare_base_t ts_base
;
672 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
673 return(KERN_FAILURE
);
675 ts_base
= (policy_timeshare_base_t
) info
;
676 ts_base
->base_priority
= BASEPRI_DEFAULT
;
678 *count
= POLICY_TIMESHARE_BASE_COUNT
;
680 return(KERN_SUCCESS
);
682 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
683 register policy_fifo_base_t fifo_base
;
685 if (*count
< POLICY_FIFO_BASE_COUNT
)
686 return(KERN_FAILURE
);
688 fifo_base
= (policy_fifo_base_t
) info
;
689 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
691 *count
= POLICY_FIFO_BASE_COUNT
;
693 return(KERN_SUCCESS
);
695 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
696 register policy_rr_base_t rr_base
;
698 if (*count
< POLICY_RR_BASE_COUNT
)
699 return(KERN_FAILURE
);
701 rr_base
= (policy_rr_base_t
) info
;
702 rr_base
->base_priority
= BASEPRI_DEFAULT
;
703 rr_base
->quantum
= 1;
705 *count
= POLICY_RR_BASE_COUNT
;
707 return(KERN_SUCCESS
);
709 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
710 register policy_timeshare_limit_t ts_limit
;
712 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
713 return(KERN_FAILURE
);
715 ts_limit
= (policy_timeshare_limit_t
) info
;
716 ts_limit
->max_priority
= MAXPRI_KERNEL
;
718 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
720 return(KERN_SUCCESS
);
722 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
723 register policy_fifo_limit_t fifo_limit
;
725 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
726 return(KERN_FAILURE
);
728 fifo_limit
= (policy_fifo_limit_t
) info
;
729 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
731 *count
= POLICY_FIFO_LIMIT_COUNT
;
733 return(KERN_SUCCESS
);
735 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
736 register policy_rr_limit_t rr_limit
;
738 if (*count
< POLICY_RR_LIMIT_COUNT
)
739 return(KERN_FAILURE
);
741 rr_limit
= (policy_rr_limit_t
) info
;
742 rr_limit
->max_priority
= MAXPRI_KERNEL
;
744 *count
= POLICY_RR_LIMIT_COUNT
;
746 return(KERN_SUCCESS
);
748 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
749 register int *enabled
;
751 if (*count
< (sizeof(*enabled
)/sizeof(int)))
752 return(KERN_FAILURE
);
754 enabled
= (int *) info
;
755 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
757 *count
= sizeof(*enabled
)/sizeof(int);
759 return(KERN_SUCCESS
);
764 return(KERN_INVALID_ARGUMENT
);
768 * processor_set_statistics
770 * Returns scheduling statistics for a processor set.
773 processor_set_statistics(
774 processor_set_t pset
,
776 processor_set_info_t info
,
777 mach_msg_type_number_t
*count
)
779 if (pset
== PROCESSOR_SET_NULL
)
780 return (KERN_INVALID_PROCESSOR_SET
);
782 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
783 register processor_set_load_info_t load_info
;
785 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
786 return(KERN_FAILURE
);
788 load_info
= (processor_set_load_info_t
) info
;
791 load_info
->task_count
= pset
->task_count
;
792 load_info
->thread_count
= pset
->thread_count
;
793 load_info
->mach_factor
= pset
->mach_factor
;
794 load_info
->load_average
= pset
->load_average
;
797 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
798 return(KERN_SUCCESS
);
801 return(KERN_INVALID_ARGUMENT
);
805 * processor_set_max_priority:
807 * Specify max priority permitted on processor set. This affects
808 * newly created and assigned threads. Optionally change existing
812 processor_set_max_priority(
813 __unused processor_set_t pset
,
814 __unused
int max_priority
,
815 __unused boolean_t change_threads
)
817 return (KERN_INVALID_ARGUMENT
);
821 * processor_set_policy_enable:
823 * Allow indicated policy on processor set.
827 processor_set_policy_enable(
828 __unused processor_set_t pset
,
831 return (KERN_INVALID_ARGUMENT
);
835 * processor_set_policy_disable:
837 * Forbid indicated policy on processor set. Time sharing cannot
841 processor_set_policy_disable(
842 __unused processor_set_t pset
,
844 __unused boolean_t change_threads
)
846 return (KERN_INVALID_ARGUMENT
);
850 #define THING_THREAD 1
853 * processor_set_things:
855 * Common internals for processor_set_{threads,tasks}
858 processor_set_things(
859 processor_set_t pset
,
860 mach_port_t
**thing_list
,
861 mach_msg_type_number_t
*count
,
864 unsigned int actual
; /* this many things */
865 unsigned int maxthings
;
868 vm_size_t size
, size_needed
;
871 if (pset
== PROCESSOR_SET_NULL
)
872 return (KERN_INVALID_ARGUMENT
);
881 return (KERN_FAILURE
);
884 if (type
== THING_TASK
)
885 maxthings
= pset
->task_count
;
887 maxthings
= pset
->thread_count
;
889 /* do we have the memory we need? */
891 size_needed
= maxthings
* sizeof (mach_port_t
);
892 if (size_needed
<= size
)
895 /* unlock the pset and allocate more memory */
901 assert(size_needed
> 0);
906 return (KERN_RESOURCE_SHORTAGE
);
909 /* OK, have memory and the processor_set is locked & active */
916 task_t task
, *tasks
= (task_t
*)addr
;
918 for (task
= (task_t
)queue_first(&pset
->tasks
);
919 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
920 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
921 task_reference_internal(task
);
922 tasks
[actual
++] = task
;
930 thread_t thread
, *threads
= (thread_t
*)addr
;
932 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
933 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
934 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
935 thread_reference_internal(thread
);
936 threads
[actual
++] = thread
;
945 if (actual
< maxthings
)
946 size_needed
= actual
* sizeof (mach_port_t
);
949 /* no things, so return null pointer and deallocate memory */
957 /* if we allocated too much, must copy */
959 if (size_needed
< size
) {
962 newaddr
= kalloc(size_needed
);
968 task_t
*tasks
= (task_t
*)addr
;
970 for (i
= 0; i
< actual
; i
++)
971 task_deallocate(tasks
[i
]);
977 thread_t
*threads
= (thread_t
*)addr
;
979 for (i
= 0; i
< actual
; i
++)
980 thread_deallocate(threads
[i
]);
986 return (KERN_RESOURCE_SHORTAGE
);
989 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
994 *thing_list
= (mach_port_t
*)addr
;
997 /* do the conversion that Mig should handle */
1003 task_t
*tasks
= (task_t
*)addr
;
1005 for (i
= 0; i
< actual
; i
++)
1006 (*thing_list
)[i
] = convert_task_to_port(tasks
[i
]);
1012 thread_t
*threads
= (thread_t
*)addr
;
1014 for (i
= 0; i
< actual
; i
++)
1015 (*thing_list
)[i
] = convert_thread_to_port(threads
[i
]);
1021 return (KERN_SUCCESS
);
1026 * processor_set_tasks:
1028 * List all tasks in the processor set.
1031 processor_set_tasks(
1032 processor_set_t pset
,
1033 task_array_t
*task_list
,
1034 mach_msg_type_number_t
*count
)
1036 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
1040 * processor_set_threads:
1042 * List all threads in the processor set.
1045 processor_set_threads(
1046 processor_set_t pset
,
1047 thread_array_t
*thread_list
,
1048 mach_msg_type_number_t
*count
)
1050 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1054 * processor_set_base:
1056 * Specify per-policy base priority for a processor set. Set processor
1057 * set default policy to the given policy. This affects newly created
1058 * and assigned threads. Optionally change existing ones.
1062 __unused processor_set_t pset
,
1063 __unused policy_t policy
,
1064 __unused policy_base_t base
,
1065 __unused boolean_t change
)
1067 return (KERN_INVALID_ARGUMENT
);
1071 * processor_set_limit:
1073 * Specify per-policy limits for a processor set. This affects
1074 * newly created and assigned threads. Optionally change existing
1078 processor_set_limit(
1079 __unused processor_set_t pset
,
1080 __unused policy_t policy
,
1081 __unused policy_limit_t limit
,
1082 __unused boolean_t change
)
1084 return (KERN_POLICY_LIMIT
);
1088 * processor_set_policy_control
1090 * Controls the scheduling attributes governing the processor set.
1091 * Allows control of enabled policies, and per-policy base and limit
1095 processor_set_policy_control(
1096 __unused processor_set_t pset
,
1097 __unused
int flavor
,
1098 __unused processor_set_info_t policy_info
,
1099 __unused mach_msg_type_number_t count
,
1100 __unused boolean_t change
)
1102 return (KERN_INVALID_ARGUMENT
);