]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
0a994754e5d5734639349bfa2ed265addf6daf06
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60
61 /*
62 * processor.c: processor and processor_set manipulation routines.
63 */
64
65 #include <mach/boolean.h>
66 #include <mach/policy.h>
67 #include <mach/processor.h>
68 #include <mach/processor_info.h>
69 #include <mach/vm_param.h>
70 #include <kern/cpu_number.h>
71 #include <kern/host.h>
72 #include <kern/machine.h>
73 #include <kern/misc_protos.h>
74 #include <kern/processor.h>
75 #include <kern/sched.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/ipc_host.h>
79 #include <kern/ipc_tt.h>
80 #include <ipc/ipc_port.h>
81 #include <kern/kalloc.h>
82
83 /*
84 * Exported interface
85 */
86 #include <mach/mach_host_server.h>
87 #include <mach/processor_set_server.h>
88
89 /*
90 * Exported variables.
91 */
92 struct processor_set default_pset;
93
94 processor_t processor_list;
95 unsigned int processor_count;
96 static processor_t processor_list_tail;
97 decl_simple_lock_data(,processor_list_lock)
98
99 processor_t master_processor;
100 int master_cpu = 0;
101
102 /* Forwards */
103 kern_return_t processor_set_base(
104 processor_set_t pset,
105 policy_t policy,
106 policy_base_t base,
107 boolean_t change);
108
109 kern_return_t processor_set_limit(
110 processor_set_t pset,
111 policy_t policy,
112 policy_limit_t limit,
113 boolean_t change);
114
115 kern_return_t processor_set_things(
116 processor_set_t pset,
117 mach_port_t **thing_list,
118 mach_msg_type_number_t *count,
119 int type);
120
121 void
122 processor_bootstrap(void)
123 {
124 simple_lock_init(&processor_list_lock, 0);
125
126 master_processor = cpu_to_processor(master_cpu);
127
128 processor_init(master_processor, master_cpu);
129 }
130
131 /*
132 * Initialize the given processor_set structure.
133 */
134
135 void
136 pset_init(
137 register processor_set_t pset)
138 {
139 register int i;
140
141 /* setup run queue */
142 pset->runq.highq = IDLEPRI;
143 for (i = 0; i < NRQBM; i++)
144 pset->runq.bitmap[i] = 0;
145 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
146 pset->runq.urgency = pset->runq.count = 0;
147 for (i = 0; i < NRQS; i++)
148 queue_init(&pset->runq.queues[i]);
149
150 queue_init(&pset->idle_queue);
151 pset->idle_count = 0;
152 queue_init(&pset->active_queue);
153 simple_lock_init(&pset->sched_lock, 0);
154 pset->run_count = pset->share_count = 0;
155 pset->mach_factor = pset->load_average = 0;
156 pset->pri_shift = INT8_MAX;
157 queue_init(&pset->processors);
158 pset->processor_count = 0;
159 queue_init(&pset->tasks);
160 pset->task_count = 0;
161 queue_init(&pset->threads);
162 pset->thread_count = 0;
163 pset->ref_count = 1;
164 pset->active = TRUE;
165 mutex_init(&pset->lock, 0);
166 pset->pset_self = IP_NULL;
167 pset->pset_name_self = IP_NULL;
168 pset->timeshare_quanta = 1;
169 }
170
171 /*
172 * Initialize the given processor structure for the processor in
173 * the slot specified by slot_num.
174 */
175 void
176 processor_init(
177 register processor_t p,
178 int slot_num)
179 {
180 register int i;
181
182 /* setup run queue */
183 p->runq.highq = IDLEPRI;
184 for (i = 0; i < NRQBM; i++)
185 p->runq.bitmap[i] = 0;
186 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
187 p->runq.urgency = p->runq.count = 0;
188 for (i = 0; i < NRQS; i++)
189 queue_init(&p->runq.queues[i]);
190
191 p->state = PROCESSOR_OFF_LINE;
192 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
193 p->processor_set = PROCESSOR_SET_NULL;
194 p->current_pri = MINPRI;
195 p->deadline = UINT64_MAX;
196 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
197 p->timeslice = 0;
198 simple_lock_init(&p->lock, 0);
199 p->processor_self = IP_NULL;
200 processor_data_init(p);
201 PROCESSOR_DATA(p, slot_num) = slot_num;
202
203 simple_lock(&processor_list_lock);
204 if (processor_list == NULL)
205 processor_list = p;
206 else
207 processor_list_tail->processor_list = p;
208 processor_list_tail = p;
209 processor_count++;
210 p->processor_list = NULL;
211 simple_unlock(&processor_list_lock);
212 }
213
214 /*
215 * pset_deallocate:
216 *
217 * Remove one reference to the processor set. Destroy processor_set
218 * if this was the last reference.
219 */
220 void
221 pset_deallocate(
222 processor_set_t pset)
223 {
224 if (pset == PROCESSOR_SET_NULL)
225 return;
226
227 assert(pset == &default_pset);
228 return;
229 }
230
231 /*
232 * pset_reference:
233 *
234 * Add one reference to the processor set.
235 */
236 void
237 pset_reference(
238 processor_set_t pset)
239 {
240 if (pset == PROCESSOR_SET_NULL)
241 return;
242
243 assert(pset == &default_pset);
244 }
245
246 #define pset_reference_locked(pset) assert(pset == &default_pset)
247
248 /*
249 * pset_remove_processor() removes a processor from a processor_set.
250 * It can only be called on the current processor. Caller must
251 * hold lock on current processor and processor set.
252 */
253 void
254 pset_remove_processor(
255 processor_set_t pset,
256 processor_t processor)
257 {
258 if (pset != processor->processor_set)
259 panic("pset_remove_processor: wrong pset");
260
261 queue_remove(&pset->processors, processor, processor_t, processors);
262 processor->processor_set = PROCESSOR_SET_NULL;
263 pset->processor_count--;
264 timeshare_quanta_update(pset);
265 }
266
267 /*
268 * pset_add_processor() adds a processor to a processor_set.
269 * It can only be called on the current processor. Caller must
270 * hold lock on curent processor and on pset. No reference counting on
271 * processors. Processor reference to pset is implicit.
272 */
273 void
274 pset_add_processor(
275 processor_set_t pset,
276 processor_t processor)
277 {
278 queue_enter(&pset->processors, processor, processor_t, processors);
279 processor->processor_set = pset;
280 pset->processor_count++;
281 timeshare_quanta_update(pset);
282 }
283
284 /*
285 * pset_remove_task() removes a task from a processor_set.
286 * Caller must hold locks on pset and task (unless task has
287 * no references left, in which case just the pset lock is
288 * needed). Pset reference count is not decremented;
289 * caller must explicitly pset_deallocate.
290 */
291 void
292 pset_remove_task(
293 processor_set_t pset,
294 task_t task)
295 {
296 if (pset != task->processor_set)
297 return;
298
299 queue_remove(&pset->tasks, task, task_t, pset_tasks);
300 pset->task_count--;
301 }
302
303 /*
304 * pset_add_task() adds a task to a processor_set.
305 * Caller must hold locks on pset and task. Pset references to
306 * tasks are implicit.
307 */
308 void
309 pset_add_task(
310 processor_set_t pset,
311 task_t task)
312 {
313 queue_enter(&pset->tasks, task, task_t, pset_tasks);
314 task->processor_set = pset;
315 pset->task_count++;
316 pset_reference_locked(pset);
317 }
318
319 /*
320 * pset_remove_thread() removes a thread from a processor_set.
321 * Caller must hold locks on pset and thread (but only if thread
322 * has outstanding references that could be used to lookup the pset).
323 * The pset reference count is not decremented; caller must explicitly
324 * pset_deallocate.
325 */
326 void
327 pset_remove_thread(
328 processor_set_t pset,
329 thread_t thread)
330 {
331 queue_remove(&pset->threads, thread, thread_t, pset_threads);
332 pset->thread_count--;
333 }
334
335 /*
336 * pset_add_thread() adds a thread to a processor_set.
337 * Caller must hold locks on pset and thread. Pset references to
338 * threads are implicit.
339 */
340 void
341 pset_add_thread(
342 processor_set_t pset,
343 thread_t thread)
344 {
345 queue_enter(&pset->threads, thread, thread_t, pset_threads);
346 thread->processor_set = pset;
347 pset->thread_count++;
348 pset_reference_locked(pset);
349 }
350
351 /*
352 * thread_change_psets() changes the pset of a thread. Caller must
353 * hold locks on both psets and thread. The old pset must be
354 * explicitly pset_deallocat()'ed by caller.
355 */
356 void
357 thread_change_psets(
358 thread_t thread,
359 processor_set_t old_pset,
360 processor_set_t new_pset)
361 {
362 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
363 old_pset->thread_count--;
364 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
365 thread->processor_set = new_pset;
366 new_pset->thread_count++;
367 pset_reference_locked(new_pset);
368 }
369
370
371 kern_return_t
372 processor_info_count(
373 processor_flavor_t flavor,
374 mach_msg_type_number_t *count)
375 {
376 switch (flavor) {
377
378 case PROCESSOR_BASIC_INFO:
379 *count = PROCESSOR_BASIC_INFO_COUNT;
380 break;
381
382 case PROCESSOR_CPU_LOAD_INFO:
383 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
384 break;
385
386 default:
387 return (cpu_info_count(flavor, count));
388 }
389
390 return (KERN_SUCCESS);
391 }
392
393
394 kern_return_t
395 processor_info(
396 register processor_t processor,
397 processor_flavor_t flavor,
398 host_t *host,
399 processor_info_t info,
400 mach_msg_type_number_t *count)
401 {
402 register int i, slot_num, state;
403 kern_return_t result;
404
405 if (processor == PROCESSOR_NULL)
406 return (KERN_INVALID_ARGUMENT);
407
408 slot_num = PROCESSOR_DATA(processor, slot_num);
409
410 switch (flavor) {
411
412 case PROCESSOR_BASIC_INFO:
413 {
414 register processor_basic_info_t basic_info;
415
416 if (*count < PROCESSOR_BASIC_INFO_COUNT)
417 return (KERN_FAILURE);
418
419 basic_info = (processor_basic_info_t) info;
420 basic_info->cpu_type = slot_type(slot_num);
421 basic_info->cpu_subtype = slot_subtype(slot_num);
422 state = processor->state;
423 if (state == PROCESSOR_OFF_LINE)
424 basic_info->running = FALSE;
425 else
426 basic_info->running = TRUE;
427 basic_info->slot_num = slot_num;
428 if (processor == master_processor)
429 basic_info->is_master = TRUE;
430 else
431 basic_info->is_master = FALSE;
432
433 *count = PROCESSOR_BASIC_INFO_COUNT;
434 *host = &realhost;
435
436 return (KERN_SUCCESS);
437 }
438
439 case PROCESSOR_CPU_LOAD_INFO:
440 {
441 register processor_cpu_load_info_t cpu_load_info;
442 register integer_t *cpu_ticks;
443
444 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
445 return (KERN_FAILURE);
446
447 cpu_load_info = (processor_cpu_load_info_t) info;
448 cpu_ticks = PROCESSOR_DATA(processor, cpu_ticks);
449 for (i=0; i < CPU_STATE_MAX; i++)
450 cpu_load_info->cpu_ticks[i] = cpu_ticks[i];
451
452 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
453 *host = &realhost;
454
455 return (KERN_SUCCESS);
456 }
457
458 default:
459 result = cpu_info(flavor, slot_num, info, count);
460 if (result == KERN_SUCCESS)
461 *host = &realhost;
462
463 return (result);
464 }
465 }
466
467 kern_return_t
468 processor_start(
469 processor_t processor)
470 {
471 kern_return_t result;
472 thread_t thread;
473 spl_t s;
474
475 if (processor == PROCESSOR_NULL)
476 return (KERN_INVALID_ARGUMENT);
477
478 if (processor == master_processor) {
479 thread_t self = current_thread();
480 processor_t prev;
481
482 prev = thread_bind(self, processor);
483 thread_block(THREAD_CONTINUE_NULL);
484
485 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
486
487 thread_bind(self, prev);
488
489 return (result);
490 }
491
492 s = splsched();
493 processor_lock(processor);
494 if (processor->state != PROCESSOR_OFF_LINE) {
495 processor_unlock(processor);
496 splx(s);
497
498 return (KERN_FAILURE);
499 }
500
501 processor->state = PROCESSOR_START;
502 processor_unlock(processor);
503 splx(s);
504
505 /*
506 * Create the idle processor thread.
507 */
508 if (processor->idle_thread == THREAD_NULL) {
509 result = idle_thread_create(processor);
510 if (result != KERN_SUCCESS) {
511 s = splsched();
512 processor_lock(processor);
513 processor->state = PROCESSOR_OFF_LINE;
514 processor_unlock(processor);
515 splx(s);
516
517 return (result);
518 }
519 }
520
521 /*
522 * If there is no active thread, the processor
523 * has never been started. Create a dedicated
524 * start up thread.
525 */
526 if ( processor->active_thread == THREAD_NULL &&
527 processor->next_thread == THREAD_NULL ) {
528 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
529 if (result != KERN_SUCCESS) {
530 s = splsched();
531 processor_lock(processor);
532 processor->state = PROCESSOR_OFF_LINE;
533 processor_unlock(processor);
534 splx(s);
535
536 return (result);
537 }
538
539 s = splsched();
540 thread_lock(thread);
541 thread->bound_processor = processor;
542 processor->next_thread = thread;
543 thread->state = TH_RUN;
544 thread_unlock(thread);
545 splx(s);
546
547 thread_deallocate(thread);
548 }
549
550 if (processor->processor_self == IP_NULL)
551 ipc_processor_init(processor);
552
553 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
554 if (result != KERN_SUCCESS) {
555 s = splsched();
556 processor_lock(processor);
557 processor->state = PROCESSOR_OFF_LINE;
558 timer_call_shutdown(processor);
559 processor_unlock(processor);
560 splx(s);
561
562 return (result);
563 }
564
565 ipc_processor_enable(processor);
566
567 return (KERN_SUCCESS);
568 }
569
570 kern_return_t
571 processor_exit(
572 processor_t processor)
573 {
574 if (processor == PROCESSOR_NULL)
575 return(KERN_INVALID_ARGUMENT);
576
577 return(processor_shutdown(processor));
578 }
579
580 kern_return_t
581 processor_control(
582 processor_t processor,
583 processor_info_t info,
584 mach_msg_type_number_t count)
585 {
586 if (processor == PROCESSOR_NULL)
587 return(KERN_INVALID_ARGUMENT);
588
589 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
590 }
591
592 /*
593 * Calculate the appropriate timesharing quanta based on set load.
594 */
595
596 void
597 timeshare_quanta_update(
598 processor_set_t pset)
599 {
600 int pcount = pset->processor_count;
601 int i = pset->runq.count;
602
603 if (i >= pcount)
604 i = 1;
605 else
606 if (i <= 1)
607 i = pcount;
608 else
609 i = (pcount + (i / 2)) / i;
610
611 pset->timeshare_quanta = i;
612 }
613
614 kern_return_t
615 processor_set_create(
616 __unused host_t host,
617 __unused processor_set_t *new_set,
618 __unused processor_set_t *new_name)
619 {
620 return(KERN_FAILURE);
621 }
622
623 kern_return_t
624 processor_set_destroy(
625 __unused processor_set_t pset)
626 {
627 return(KERN_FAILURE);
628 }
629
630 kern_return_t
631 processor_get_assignment(
632 processor_t processor,
633 processor_set_t *pset)
634 {
635 int state;
636
637 state = processor->state;
638 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
639 return(KERN_FAILURE);
640
641 *pset = processor->processor_set;
642 pset_reference(*pset);
643 return(KERN_SUCCESS);
644 }
645
646 kern_return_t
647 processor_set_info(
648 processor_set_t pset,
649 int flavor,
650 host_t *host,
651 processor_set_info_t info,
652 mach_msg_type_number_t *count)
653 {
654 if (pset == PROCESSOR_SET_NULL)
655 return(KERN_INVALID_ARGUMENT);
656
657 if (flavor == PROCESSOR_SET_BASIC_INFO) {
658 register processor_set_basic_info_t basic_info;
659
660 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
661 return(KERN_FAILURE);
662
663 basic_info = (processor_set_basic_info_t) info;
664 basic_info->processor_count = pset->processor_count;
665 basic_info->default_policy = POLICY_TIMESHARE;
666
667 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
668 *host = &realhost;
669 return(KERN_SUCCESS);
670 }
671 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
672 register policy_timeshare_base_t ts_base;
673
674 if (*count < POLICY_TIMESHARE_BASE_COUNT)
675 return(KERN_FAILURE);
676
677 ts_base = (policy_timeshare_base_t) info;
678 ts_base->base_priority = BASEPRI_DEFAULT;
679
680 *count = POLICY_TIMESHARE_BASE_COUNT;
681 *host = &realhost;
682 return(KERN_SUCCESS);
683 }
684 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
685 register policy_fifo_base_t fifo_base;
686
687 if (*count < POLICY_FIFO_BASE_COUNT)
688 return(KERN_FAILURE);
689
690 fifo_base = (policy_fifo_base_t) info;
691 fifo_base->base_priority = BASEPRI_DEFAULT;
692
693 *count = POLICY_FIFO_BASE_COUNT;
694 *host = &realhost;
695 return(KERN_SUCCESS);
696 }
697 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
698 register policy_rr_base_t rr_base;
699
700 if (*count < POLICY_RR_BASE_COUNT)
701 return(KERN_FAILURE);
702
703 rr_base = (policy_rr_base_t) info;
704 rr_base->base_priority = BASEPRI_DEFAULT;
705 rr_base->quantum = 1;
706
707 *count = POLICY_RR_BASE_COUNT;
708 *host = &realhost;
709 return(KERN_SUCCESS);
710 }
711 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
712 register policy_timeshare_limit_t ts_limit;
713
714 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
715 return(KERN_FAILURE);
716
717 ts_limit = (policy_timeshare_limit_t) info;
718 ts_limit->max_priority = MAXPRI_KERNEL;
719
720 *count = POLICY_TIMESHARE_LIMIT_COUNT;
721 *host = &realhost;
722 return(KERN_SUCCESS);
723 }
724 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
725 register policy_fifo_limit_t fifo_limit;
726
727 if (*count < POLICY_FIFO_LIMIT_COUNT)
728 return(KERN_FAILURE);
729
730 fifo_limit = (policy_fifo_limit_t) info;
731 fifo_limit->max_priority = MAXPRI_KERNEL;
732
733 *count = POLICY_FIFO_LIMIT_COUNT;
734 *host = &realhost;
735 return(KERN_SUCCESS);
736 }
737 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
738 register policy_rr_limit_t rr_limit;
739
740 if (*count < POLICY_RR_LIMIT_COUNT)
741 return(KERN_FAILURE);
742
743 rr_limit = (policy_rr_limit_t) info;
744 rr_limit->max_priority = MAXPRI_KERNEL;
745
746 *count = POLICY_RR_LIMIT_COUNT;
747 *host = &realhost;
748 return(KERN_SUCCESS);
749 }
750 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
751 register int *enabled;
752
753 if (*count < (sizeof(*enabled)/sizeof(int)))
754 return(KERN_FAILURE);
755
756 enabled = (int *) info;
757 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
758
759 *count = sizeof(*enabled)/sizeof(int);
760 *host = &realhost;
761 return(KERN_SUCCESS);
762 }
763
764
765 *host = HOST_NULL;
766 return(KERN_INVALID_ARGUMENT);
767 }
768
769 /*
770 * processor_set_statistics
771 *
772 * Returns scheduling statistics for a processor set.
773 */
774 kern_return_t
775 processor_set_statistics(
776 processor_set_t pset,
777 int flavor,
778 processor_set_info_t info,
779 mach_msg_type_number_t *count)
780 {
781 if (pset == PROCESSOR_SET_NULL)
782 return (KERN_INVALID_PROCESSOR_SET);
783
784 if (flavor == PROCESSOR_SET_LOAD_INFO) {
785 register processor_set_load_info_t load_info;
786
787 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
788 return(KERN_FAILURE);
789
790 load_info = (processor_set_load_info_t) info;
791
792 pset_lock(pset);
793 load_info->task_count = pset->task_count;
794 load_info->thread_count = pset->thread_count;
795 load_info->mach_factor = pset->mach_factor;
796 load_info->load_average = pset->load_average;
797 pset_unlock(pset);
798
799 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
800 return(KERN_SUCCESS);
801 }
802
803 return(KERN_INVALID_ARGUMENT);
804 }
805
806 /*
807 * processor_set_max_priority:
808 *
809 * Specify max priority permitted on processor set. This affects
810 * newly created and assigned threads. Optionally change existing
811 * ones.
812 */
813 kern_return_t
814 processor_set_max_priority(
815 __unused processor_set_t pset,
816 __unused int max_priority,
817 __unused boolean_t change_threads)
818 {
819 return (KERN_INVALID_ARGUMENT);
820 }
821
822 /*
823 * processor_set_policy_enable:
824 *
825 * Allow indicated policy on processor set.
826 */
827
828 kern_return_t
829 processor_set_policy_enable(
830 __unused processor_set_t pset,
831 __unused int policy)
832 {
833 return (KERN_INVALID_ARGUMENT);
834 }
835
836 /*
837 * processor_set_policy_disable:
838 *
839 * Forbid indicated policy on processor set. Time sharing cannot
840 * be forbidden.
841 */
842 kern_return_t
843 processor_set_policy_disable(
844 __unused processor_set_t pset,
845 __unused int policy,
846 __unused boolean_t change_threads)
847 {
848 return (KERN_INVALID_ARGUMENT);
849 }
850
851 #define THING_TASK 0
852 #define THING_THREAD 1
853
854 /*
855 * processor_set_things:
856 *
857 * Common internals for processor_set_{threads,tasks}
858 */
859 kern_return_t
860 processor_set_things(
861 processor_set_t pset,
862 mach_port_t **thing_list,
863 mach_msg_type_number_t *count,
864 int type)
865 {
866 unsigned int actual; /* this many things */
867 unsigned int maxthings;
868 unsigned int i;
869
870 vm_size_t size, size_needed;
871 void *addr;
872
873 if (pset == PROCESSOR_SET_NULL)
874 return (KERN_INVALID_ARGUMENT);
875
876 size = 0; addr = 0;
877
878 for (;;) {
879 pset_lock(pset);
880 if (!pset->active) {
881 pset_unlock(pset);
882
883 return (KERN_FAILURE);
884 }
885
886 if (type == THING_TASK)
887 maxthings = pset->task_count;
888 else
889 maxthings = pset->thread_count;
890
891 /* do we have the memory we need? */
892
893 size_needed = maxthings * sizeof (mach_port_t);
894 if (size_needed <= size)
895 break;
896
897 /* unlock the pset and allocate more memory */
898 pset_unlock(pset);
899
900 if (size != 0)
901 kfree(addr, size);
902
903 assert(size_needed > 0);
904 size = size_needed;
905
906 addr = kalloc(size);
907 if (addr == 0)
908 return (KERN_RESOURCE_SHORTAGE);
909 }
910
911 /* OK, have memory and the processor_set is locked & active */
912
913 actual = 0;
914 switch (type) {
915
916 case THING_TASK:
917 {
918 task_t task, *tasks = (task_t *)addr;
919
920 for (task = (task_t)queue_first(&pset->tasks);
921 !queue_end(&pset->tasks, (queue_entry_t)task);
922 task = (task_t)queue_next(&task->pset_tasks)) {
923 task_reference_internal(task);
924 tasks[actual++] = task;
925 }
926
927 break;
928 }
929
930 case THING_THREAD:
931 {
932 thread_t thread, *threads = (thread_t *)addr;
933
934 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
935 !queue_end(&pset->threads, (queue_entry_t)thread);
936 thread = (thread_t)queue_next(&thread->pset_threads)) {
937 thread_reference_internal(thread);
938 threads[actual++] = thread;
939 }
940
941 break;
942 }
943 }
944
945 pset_unlock(pset);
946
947 if (actual < maxthings)
948 size_needed = actual * sizeof (mach_port_t);
949
950 if (actual == 0) {
951 /* no things, so return null pointer and deallocate memory */
952 *thing_list = 0;
953 *count = 0;
954
955 if (size != 0)
956 kfree(addr, size);
957 }
958 else {
959 /* if we allocated too much, must copy */
960
961 if (size_needed < size) {
962 void *newaddr;
963
964 newaddr = kalloc(size_needed);
965 if (newaddr == 0) {
966 switch (type) {
967
968 case THING_TASK:
969 {
970 task_t *tasks = (task_t *)addr;
971
972 for (i = 0; i < actual; i++)
973 task_deallocate(tasks[i]);
974 break;
975 }
976
977 case THING_THREAD:
978 {
979 thread_t *threads = (thread_t *)addr;
980
981 for (i = 0; i < actual; i++)
982 thread_deallocate(threads[i]);
983 break;
984 }
985 }
986
987 kfree(addr, size);
988 return (KERN_RESOURCE_SHORTAGE);
989 }
990
991 bcopy((void *) addr, (void *) newaddr, size_needed);
992 kfree(addr, size);
993 addr = newaddr;
994 }
995
996 *thing_list = (mach_port_t *)addr;
997 *count = actual;
998
999 /* do the conversion that Mig should handle */
1000
1001 switch (type) {
1002
1003 case THING_TASK:
1004 {
1005 task_t *tasks = (task_t *)addr;
1006
1007 for (i = 0; i < actual; i++)
1008 (*thing_list)[i] = convert_task_to_port(tasks[i]);
1009 break;
1010 }
1011
1012 case THING_THREAD:
1013 {
1014 thread_t *threads = (thread_t *)addr;
1015
1016 for (i = 0; i < actual; i++)
1017 (*thing_list)[i] = convert_thread_to_port(threads[i]);
1018 break;
1019 }
1020 }
1021 }
1022
1023 return (KERN_SUCCESS);
1024 }
1025
1026
1027 /*
1028 * processor_set_tasks:
1029 *
1030 * List all tasks in the processor set.
1031 */
1032 kern_return_t
1033 processor_set_tasks(
1034 processor_set_t pset,
1035 task_array_t *task_list,
1036 mach_msg_type_number_t *count)
1037 {
1038 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1039 }
1040
1041 /*
1042 * processor_set_threads:
1043 *
1044 * List all threads in the processor set.
1045 */
1046 kern_return_t
1047 processor_set_threads(
1048 processor_set_t pset,
1049 thread_array_t *thread_list,
1050 mach_msg_type_number_t *count)
1051 {
1052 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1053 }
1054
1055 /*
1056 * processor_set_base:
1057 *
1058 * Specify per-policy base priority for a processor set. Set processor
1059 * set default policy to the given policy. This affects newly created
1060 * and assigned threads. Optionally change existing ones.
1061 */
1062 kern_return_t
1063 processor_set_base(
1064 __unused processor_set_t pset,
1065 __unused policy_t policy,
1066 __unused policy_base_t base,
1067 __unused boolean_t change)
1068 {
1069 return (KERN_INVALID_ARGUMENT);
1070 }
1071
1072 /*
1073 * processor_set_limit:
1074 *
1075 * Specify per-policy limits for a processor set. This affects
1076 * newly created and assigned threads. Optionally change existing
1077 * ones.
1078 */
1079 kern_return_t
1080 processor_set_limit(
1081 __unused processor_set_t pset,
1082 __unused policy_t policy,
1083 __unused policy_limit_t limit,
1084 __unused boolean_t change)
1085 {
1086 return (KERN_POLICY_LIMIT);
1087 }
1088
1089 /*
1090 * processor_set_policy_control
1091 *
1092 * Controls the scheduling attributes governing the processor set.
1093 * Allows control of enabled policies, and per-policy base and limit
1094 * priorities.
1095 */
1096 kern_return_t
1097 processor_set_policy_control(
1098 __unused processor_set_t pset,
1099 __unused int flavor,
1100 __unused processor_set_info_t policy_info,
1101 __unused mach_msg_type_number_t count,
1102 __unused boolean_t change)
1103 {
1104 return (KERN_INVALID_ARGUMENT);
1105 }