]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
80
81 /*
82 * Exported interface
83 */
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
86
87 /*
88 * Exported variables.
89 */
90 struct processor_set default_pset;
91
92 processor_t processor_list;
93 unsigned int processor_count;
94 static processor_t processor_list_tail;
95 decl_simple_lock_data(,processor_list_lock)
96
97 processor_t master_processor;
98 int master_cpu = 0;
99
100 /* Forwards */
101 kern_return_t processor_set_base(
102 processor_set_t pset,
103 policy_t policy,
104 policy_base_t base,
105 boolean_t change);
106
107 kern_return_t processor_set_limit(
108 processor_set_t pset,
109 policy_t policy,
110 policy_limit_t limit,
111 boolean_t change);
112
113 kern_return_t processor_set_things(
114 processor_set_t pset,
115 mach_port_t **thing_list,
116 mach_msg_type_number_t *count,
117 int type);
118
119 void
120 processor_bootstrap(void)
121 {
122 simple_lock_init(&processor_list_lock, 0);
123
124 master_processor = cpu_to_processor(master_cpu);
125
126 processor_init(master_processor, master_cpu);
127 }
128
129 /*
130 * Initialize the given processor_set structure.
131 */
132
133 void
134 pset_init(
135 register processor_set_t pset)
136 {
137 register int i;
138
139 /* setup run queue */
140 pset->runq.highq = IDLEPRI;
141 for (i = 0; i < NRQBM; i++)
142 pset->runq.bitmap[i] = 0;
143 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
144 pset->runq.urgency = pset->runq.count = 0;
145 for (i = 0; i < NRQS; i++)
146 queue_init(&pset->runq.queues[i]);
147
148 queue_init(&pset->idle_queue);
149 pset->idle_count = 0;
150 queue_init(&pset->active_queue);
151 simple_lock_init(&pset->sched_lock, 0);
152 pset->run_count = pset->share_count = 0;
153 pset->mach_factor = pset->load_average = 0;
154 pset->pri_shift = INT8_MAX;
155 queue_init(&pset->processors);
156 pset->processor_count = 0;
157 queue_init(&pset->tasks);
158 pset->task_count = 0;
159 queue_init(&pset->threads);
160 pset->thread_count = 0;
161 pset->ref_count = 1;
162 pset->active = TRUE;
163 mutex_init(&pset->lock, 0);
164 pset->pset_self = IP_NULL;
165 pset->pset_name_self = IP_NULL;
166 pset->timeshare_quanta = 1;
167 }
168
169 /*
170 * Initialize the given processor structure for the processor in
171 * the slot specified by slot_num.
172 */
173 void
174 processor_init(
175 register processor_t p,
176 int slot_num)
177 {
178 register int i;
179
180 /* setup run queue */
181 p->runq.highq = IDLEPRI;
182 for (i = 0; i < NRQBM; i++)
183 p->runq.bitmap[i] = 0;
184 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
185 p->runq.urgency = p->runq.count = 0;
186 for (i = 0; i < NRQS; i++)
187 queue_init(&p->runq.queues[i]);
188
189 p->state = PROCESSOR_OFF_LINE;
190 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
191 p->processor_set = PROCESSOR_SET_NULL;
192 p->current_pri = MINPRI;
193 p->deadline = UINT64_MAX;
194 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
195 p->timeslice = 0;
196 simple_lock_init(&p->lock, 0);
197 p->processor_self = IP_NULL;
198 processor_data_init(p);
199 PROCESSOR_DATA(p, slot_num) = slot_num;
200
201 simple_lock(&processor_list_lock);
202 if (processor_list == NULL)
203 processor_list = p;
204 else
205 processor_list_tail->processor_list = p;
206 processor_list_tail = p;
207 processor_count++;
208 p->processor_list = NULL;
209 simple_unlock(&processor_list_lock);
210 }
211
212 /*
213 * pset_deallocate:
214 *
215 * Remove one reference to the processor set. Destroy processor_set
216 * if this was the last reference.
217 */
218 void
219 pset_deallocate(
220 processor_set_t pset)
221 {
222 if (pset == PROCESSOR_SET_NULL)
223 return;
224
225 assert(pset == &default_pset);
226 return;
227 }
228
229 /*
230 * pset_reference:
231 *
232 * Add one reference to the processor set.
233 */
234 void
235 pset_reference(
236 processor_set_t pset)
237 {
238 if (pset == PROCESSOR_SET_NULL)
239 return;
240
241 assert(pset == &default_pset);
242 }
243
244 #define pset_reference_locked(pset) assert(pset == &default_pset)
245
246 /*
247 * pset_remove_processor() removes a processor from a processor_set.
248 * It can only be called on the current processor. Caller must
249 * hold lock on current processor and processor set.
250 */
251 void
252 pset_remove_processor(
253 processor_set_t pset,
254 processor_t processor)
255 {
256 if (pset != processor->processor_set)
257 panic("pset_remove_processor: wrong pset");
258
259 queue_remove(&pset->processors, processor, processor_t, processors);
260 processor->processor_set = PROCESSOR_SET_NULL;
261 pset->processor_count--;
262 timeshare_quanta_update(pset);
263 }
264
265 /*
266 * pset_add_processor() adds a processor to a processor_set.
267 * It can only be called on the current processor. Caller must
268 * hold lock on curent processor and on pset. No reference counting on
269 * processors. Processor reference to pset is implicit.
270 */
271 void
272 pset_add_processor(
273 processor_set_t pset,
274 processor_t processor)
275 {
276 queue_enter(&pset->processors, processor, processor_t, processors);
277 processor->processor_set = pset;
278 pset->processor_count++;
279 timeshare_quanta_update(pset);
280 }
281
282 /*
283 * pset_remove_task() removes a task from a processor_set.
284 * Caller must hold locks on pset and task (unless task has
285 * no references left, in which case just the pset lock is
286 * needed). Pset reference count is not decremented;
287 * caller must explicitly pset_deallocate.
288 */
289 void
290 pset_remove_task(
291 processor_set_t pset,
292 task_t task)
293 {
294 if (pset != task->processor_set)
295 return;
296
297 queue_remove(&pset->tasks, task, task_t, pset_tasks);
298 pset->task_count--;
299 }
300
301 /*
302 * pset_add_task() adds a task to a processor_set.
303 * Caller must hold locks on pset and task. Pset references to
304 * tasks are implicit.
305 */
306 void
307 pset_add_task(
308 processor_set_t pset,
309 task_t task)
310 {
311 queue_enter(&pset->tasks, task, task_t, pset_tasks);
312 task->processor_set = pset;
313 pset->task_count++;
314 pset_reference_locked(pset);
315 }
316
317 /*
318 * pset_remove_thread() removes a thread from a processor_set.
319 * Caller must hold locks on pset and thread (but only if thread
320 * has outstanding references that could be used to lookup the pset).
321 * The pset reference count is not decremented; caller must explicitly
322 * pset_deallocate.
323 */
324 void
325 pset_remove_thread(
326 processor_set_t pset,
327 thread_t thread)
328 {
329 queue_remove(&pset->threads, thread, thread_t, pset_threads);
330 pset->thread_count--;
331 }
332
333 /*
334 * pset_add_thread() adds a thread to a processor_set.
335 * Caller must hold locks on pset and thread. Pset references to
336 * threads are implicit.
337 */
338 void
339 pset_add_thread(
340 processor_set_t pset,
341 thread_t thread)
342 {
343 queue_enter(&pset->threads, thread, thread_t, pset_threads);
344 thread->processor_set = pset;
345 pset->thread_count++;
346 pset_reference_locked(pset);
347 }
348
349 /*
350 * thread_change_psets() changes the pset of a thread. Caller must
351 * hold locks on both psets and thread. The old pset must be
352 * explicitly pset_deallocat()'ed by caller.
353 */
354 void
355 thread_change_psets(
356 thread_t thread,
357 processor_set_t old_pset,
358 processor_set_t new_pset)
359 {
360 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
361 old_pset->thread_count--;
362 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
363 thread->processor_set = new_pset;
364 new_pset->thread_count++;
365 pset_reference_locked(new_pset);
366 }
367
368
369 kern_return_t
370 processor_info_count(
371 processor_flavor_t flavor,
372 mach_msg_type_number_t *count)
373 {
374 switch (flavor) {
375
376 case PROCESSOR_BASIC_INFO:
377 *count = PROCESSOR_BASIC_INFO_COUNT;
378 break;
379
380 case PROCESSOR_CPU_LOAD_INFO:
381 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
382 break;
383
384 default:
385 return (cpu_info_count(flavor, count));
386 }
387
388 return (KERN_SUCCESS);
389 }
390
391
392 kern_return_t
393 processor_info(
394 register processor_t processor,
395 processor_flavor_t flavor,
396 host_t *host,
397 processor_info_t info,
398 mach_msg_type_number_t *count)
399 {
400 register int i, slot_num, state;
401 kern_return_t result;
402
403 if (processor == PROCESSOR_NULL)
404 return (KERN_INVALID_ARGUMENT);
405
406 slot_num = PROCESSOR_DATA(processor, slot_num);
407
408 switch (flavor) {
409
410 case PROCESSOR_BASIC_INFO:
411 {
412 register processor_basic_info_t basic_info;
413
414 if (*count < PROCESSOR_BASIC_INFO_COUNT)
415 return (KERN_FAILURE);
416
417 basic_info = (processor_basic_info_t) info;
418 basic_info->cpu_type = slot_type(slot_num);
419 basic_info->cpu_subtype = slot_subtype(slot_num);
420 state = processor->state;
421 if (state == PROCESSOR_OFF_LINE)
422 basic_info->running = FALSE;
423 else
424 basic_info->running = TRUE;
425 basic_info->slot_num = slot_num;
426 if (processor == master_processor)
427 basic_info->is_master = TRUE;
428 else
429 basic_info->is_master = FALSE;
430
431 *count = PROCESSOR_BASIC_INFO_COUNT;
432 *host = &realhost;
433
434 return (KERN_SUCCESS);
435 }
436
437 case PROCESSOR_CPU_LOAD_INFO:
438 {
439 register processor_cpu_load_info_t cpu_load_info;
440 register integer_t *cpu_ticks;
441
442 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
443 return (KERN_FAILURE);
444
445 cpu_load_info = (processor_cpu_load_info_t) info;
446 cpu_ticks = PROCESSOR_DATA(processor, cpu_ticks);
447 for (i=0; i < CPU_STATE_MAX; i++)
448 cpu_load_info->cpu_ticks[i] = cpu_ticks[i];
449
450 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
451 *host = &realhost;
452
453 return (KERN_SUCCESS);
454 }
455
456 default:
457 result = cpu_info(flavor, slot_num, info, count);
458 if (result == KERN_SUCCESS)
459 *host = &realhost;
460
461 return (result);
462 }
463 }
464
465 kern_return_t
466 processor_start(
467 processor_t processor)
468 {
469 kern_return_t result;
470 thread_t thread;
471 spl_t s;
472
473 if (processor == PROCESSOR_NULL)
474 return (KERN_INVALID_ARGUMENT);
475
476 if (processor == master_processor) {
477 thread_t self = current_thread();
478 processor_t prev;
479
480 prev = thread_bind(self, processor);
481 thread_block(THREAD_CONTINUE_NULL);
482
483 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
484
485 thread_bind(self, prev);
486
487 return (result);
488 }
489
490 s = splsched();
491 processor_lock(processor);
492 if (processor->state != PROCESSOR_OFF_LINE) {
493 processor_unlock(processor);
494 splx(s);
495
496 return (KERN_FAILURE);
497 }
498
499 processor->state = PROCESSOR_START;
500 processor_unlock(processor);
501 splx(s);
502
503 /*
504 * Create the idle processor thread.
505 */
506 if (processor->idle_thread == THREAD_NULL) {
507 result = idle_thread_create(processor);
508 if (result != KERN_SUCCESS) {
509 s = splsched();
510 processor_lock(processor);
511 processor->state = PROCESSOR_OFF_LINE;
512 processor_unlock(processor);
513 splx(s);
514
515 return (result);
516 }
517 }
518
519 /*
520 * If there is no active thread, the processor
521 * has never been started. Create a dedicated
522 * start up thread.
523 */
524 if ( processor->active_thread == THREAD_NULL &&
525 processor->next_thread == THREAD_NULL ) {
526 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
527 if (result != KERN_SUCCESS) {
528 s = splsched();
529 processor_lock(processor);
530 processor->state = PROCESSOR_OFF_LINE;
531 processor_unlock(processor);
532 splx(s);
533
534 return (result);
535 }
536
537 s = splsched();
538 thread_lock(thread);
539 thread->bound_processor = processor;
540 processor->next_thread = thread;
541 thread->state = TH_RUN;
542 thread_unlock(thread);
543 splx(s);
544
545 thread_deallocate(thread);
546 }
547
548 if (processor->processor_self == IP_NULL)
549 ipc_processor_init(processor);
550
551 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
552 if (result != KERN_SUCCESS) {
553 s = splsched();
554 processor_lock(processor);
555 processor->state = PROCESSOR_OFF_LINE;
556 timer_call_shutdown(processor);
557 processor_unlock(processor);
558 splx(s);
559
560 return (result);
561 }
562
563 ipc_processor_enable(processor);
564
565 return (KERN_SUCCESS);
566 }
567
568 kern_return_t
569 processor_exit(
570 processor_t processor)
571 {
572 if (processor == PROCESSOR_NULL)
573 return(KERN_INVALID_ARGUMENT);
574
575 return(processor_shutdown(processor));
576 }
577
578 kern_return_t
579 processor_control(
580 processor_t processor,
581 processor_info_t info,
582 mach_msg_type_number_t count)
583 {
584 if (processor == PROCESSOR_NULL)
585 return(KERN_INVALID_ARGUMENT);
586
587 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
588 }
589
590 /*
591 * Calculate the appropriate timesharing quanta based on set load.
592 */
593
594 void
595 timeshare_quanta_update(
596 processor_set_t pset)
597 {
598 int pcount = pset->processor_count;
599 int i = pset->runq.count;
600
601 if (i >= pcount)
602 i = 1;
603 else
604 if (i <= 1)
605 i = pcount;
606 else
607 i = (pcount + (i / 2)) / i;
608
609 pset->timeshare_quanta = i;
610 }
611
612 kern_return_t
613 processor_set_create(
614 __unused host_t host,
615 __unused processor_set_t *new_set,
616 __unused processor_set_t *new_name)
617 {
618 return(KERN_FAILURE);
619 }
620
621 kern_return_t
622 processor_set_destroy(
623 __unused processor_set_t pset)
624 {
625 return(KERN_FAILURE);
626 }
627
628 kern_return_t
629 processor_get_assignment(
630 processor_t processor,
631 processor_set_t *pset)
632 {
633 int state;
634
635 state = processor->state;
636 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
637 return(KERN_FAILURE);
638
639 *pset = processor->processor_set;
640 pset_reference(*pset);
641 return(KERN_SUCCESS);
642 }
643
644 kern_return_t
645 processor_set_info(
646 processor_set_t pset,
647 int flavor,
648 host_t *host,
649 processor_set_info_t info,
650 mach_msg_type_number_t *count)
651 {
652 if (pset == PROCESSOR_SET_NULL)
653 return(KERN_INVALID_ARGUMENT);
654
655 if (flavor == PROCESSOR_SET_BASIC_INFO) {
656 register processor_set_basic_info_t basic_info;
657
658 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
659 return(KERN_FAILURE);
660
661 basic_info = (processor_set_basic_info_t) info;
662 basic_info->processor_count = pset->processor_count;
663 basic_info->default_policy = POLICY_TIMESHARE;
664
665 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
666 *host = &realhost;
667 return(KERN_SUCCESS);
668 }
669 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
670 register policy_timeshare_base_t ts_base;
671
672 if (*count < POLICY_TIMESHARE_BASE_COUNT)
673 return(KERN_FAILURE);
674
675 ts_base = (policy_timeshare_base_t) info;
676 ts_base->base_priority = BASEPRI_DEFAULT;
677
678 *count = POLICY_TIMESHARE_BASE_COUNT;
679 *host = &realhost;
680 return(KERN_SUCCESS);
681 }
682 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
683 register policy_fifo_base_t fifo_base;
684
685 if (*count < POLICY_FIFO_BASE_COUNT)
686 return(KERN_FAILURE);
687
688 fifo_base = (policy_fifo_base_t) info;
689 fifo_base->base_priority = BASEPRI_DEFAULT;
690
691 *count = POLICY_FIFO_BASE_COUNT;
692 *host = &realhost;
693 return(KERN_SUCCESS);
694 }
695 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
696 register policy_rr_base_t rr_base;
697
698 if (*count < POLICY_RR_BASE_COUNT)
699 return(KERN_FAILURE);
700
701 rr_base = (policy_rr_base_t) info;
702 rr_base->base_priority = BASEPRI_DEFAULT;
703 rr_base->quantum = 1;
704
705 *count = POLICY_RR_BASE_COUNT;
706 *host = &realhost;
707 return(KERN_SUCCESS);
708 }
709 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
710 register policy_timeshare_limit_t ts_limit;
711
712 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
713 return(KERN_FAILURE);
714
715 ts_limit = (policy_timeshare_limit_t) info;
716 ts_limit->max_priority = MAXPRI_KERNEL;
717
718 *count = POLICY_TIMESHARE_LIMIT_COUNT;
719 *host = &realhost;
720 return(KERN_SUCCESS);
721 }
722 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
723 register policy_fifo_limit_t fifo_limit;
724
725 if (*count < POLICY_FIFO_LIMIT_COUNT)
726 return(KERN_FAILURE);
727
728 fifo_limit = (policy_fifo_limit_t) info;
729 fifo_limit->max_priority = MAXPRI_KERNEL;
730
731 *count = POLICY_FIFO_LIMIT_COUNT;
732 *host = &realhost;
733 return(KERN_SUCCESS);
734 }
735 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
736 register policy_rr_limit_t rr_limit;
737
738 if (*count < POLICY_RR_LIMIT_COUNT)
739 return(KERN_FAILURE);
740
741 rr_limit = (policy_rr_limit_t) info;
742 rr_limit->max_priority = MAXPRI_KERNEL;
743
744 *count = POLICY_RR_LIMIT_COUNT;
745 *host = &realhost;
746 return(KERN_SUCCESS);
747 }
748 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
749 register int *enabled;
750
751 if (*count < (sizeof(*enabled)/sizeof(int)))
752 return(KERN_FAILURE);
753
754 enabled = (int *) info;
755 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
756
757 *count = sizeof(*enabled)/sizeof(int);
758 *host = &realhost;
759 return(KERN_SUCCESS);
760 }
761
762
763 *host = HOST_NULL;
764 return(KERN_INVALID_ARGUMENT);
765 }
766
767 /*
768 * processor_set_statistics
769 *
770 * Returns scheduling statistics for a processor set.
771 */
772 kern_return_t
773 processor_set_statistics(
774 processor_set_t pset,
775 int flavor,
776 processor_set_info_t info,
777 mach_msg_type_number_t *count)
778 {
779 if (pset == PROCESSOR_SET_NULL)
780 return (KERN_INVALID_PROCESSOR_SET);
781
782 if (flavor == PROCESSOR_SET_LOAD_INFO) {
783 register processor_set_load_info_t load_info;
784
785 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
786 return(KERN_FAILURE);
787
788 load_info = (processor_set_load_info_t) info;
789
790 pset_lock(pset);
791 load_info->task_count = pset->task_count;
792 load_info->thread_count = pset->thread_count;
793 load_info->mach_factor = pset->mach_factor;
794 load_info->load_average = pset->load_average;
795 pset_unlock(pset);
796
797 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
798 return(KERN_SUCCESS);
799 }
800
801 return(KERN_INVALID_ARGUMENT);
802 }
803
804 /*
805 * processor_set_max_priority:
806 *
807 * Specify max priority permitted on processor set. This affects
808 * newly created and assigned threads. Optionally change existing
809 * ones.
810 */
811 kern_return_t
812 processor_set_max_priority(
813 __unused processor_set_t pset,
814 __unused int max_priority,
815 __unused boolean_t change_threads)
816 {
817 return (KERN_INVALID_ARGUMENT);
818 }
819
820 /*
821 * processor_set_policy_enable:
822 *
823 * Allow indicated policy on processor set.
824 */
825
826 kern_return_t
827 processor_set_policy_enable(
828 __unused processor_set_t pset,
829 __unused int policy)
830 {
831 return (KERN_INVALID_ARGUMENT);
832 }
833
834 /*
835 * processor_set_policy_disable:
836 *
837 * Forbid indicated policy on processor set. Time sharing cannot
838 * be forbidden.
839 */
840 kern_return_t
841 processor_set_policy_disable(
842 __unused processor_set_t pset,
843 __unused int policy,
844 __unused boolean_t change_threads)
845 {
846 return (KERN_INVALID_ARGUMENT);
847 }
848
849 #define THING_TASK 0
850 #define THING_THREAD 1
851
852 /*
853 * processor_set_things:
854 *
855 * Common internals for processor_set_{threads,tasks}
856 */
857 kern_return_t
858 processor_set_things(
859 processor_set_t pset,
860 mach_port_t **thing_list,
861 mach_msg_type_number_t *count,
862 int type)
863 {
864 unsigned int actual; /* this many things */
865 unsigned int maxthings;
866 unsigned int i;
867
868 vm_size_t size, size_needed;
869 void *addr;
870
871 if (pset == PROCESSOR_SET_NULL)
872 return (KERN_INVALID_ARGUMENT);
873
874 size = 0; addr = 0;
875
876 for (;;) {
877 pset_lock(pset);
878 if (!pset->active) {
879 pset_unlock(pset);
880
881 return (KERN_FAILURE);
882 }
883
884 if (type == THING_TASK)
885 maxthings = pset->task_count;
886 else
887 maxthings = pset->thread_count;
888
889 /* do we have the memory we need? */
890
891 size_needed = maxthings * sizeof (mach_port_t);
892 if (size_needed <= size)
893 break;
894
895 /* unlock the pset and allocate more memory */
896 pset_unlock(pset);
897
898 if (size != 0)
899 kfree(addr, size);
900
901 assert(size_needed > 0);
902 size = size_needed;
903
904 addr = kalloc(size);
905 if (addr == 0)
906 return (KERN_RESOURCE_SHORTAGE);
907 }
908
909 /* OK, have memory and the processor_set is locked & active */
910
911 actual = 0;
912 switch (type) {
913
914 case THING_TASK:
915 {
916 task_t task, *tasks = (task_t *)addr;
917
918 for (task = (task_t)queue_first(&pset->tasks);
919 !queue_end(&pset->tasks, (queue_entry_t)task);
920 task = (task_t)queue_next(&task->pset_tasks)) {
921 task_reference_internal(task);
922 tasks[actual++] = task;
923 }
924
925 break;
926 }
927
928 case THING_THREAD:
929 {
930 thread_t thread, *threads = (thread_t *)addr;
931
932 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
933 !queue_end(&pset->threads, (queue_entry_t)thread);
934 thread = (thread_t)queue_next(&thread->pset_threads)) {
935 thread_reference_internal(thread);
936 threads[actual++] = thread;
937 }
938
939 break;
940 }
941 }
942
943 pset_unlock(pset);
944
945 if (actual < maxthings)
946 size_needed = actual * sizeof (mach_port_t);
947
948 if (actual == 0) {
949 /* no things, so return null pointer and deallocate memory */
950 *thing_list = 0;
951 *count = 0;
952
953 if (size != 0)
954 kfree(addr, size);
955 }
956 else {
957 /* if we allocated too much, must copy */
958
959 if (size_needed < size) {
960 void *newaddr;
961
962 newaddr = kalloc(size_needed);
963 if (newaddr == 0) {
964 switch (type) {
965
966 case THING_TASK:
967 {
968 task_t *tasks = (task_t *)addr;
969
970 for (i = 0; i < actual; i++)
971 task_deallocate(tasks[i]);
972 break;
973 }
974
975 case THING_THREAD:
976 {
977 thread_t *threads = (thread_t *)addr;
978
979 for (i = 0; i < actual; i++)
980 thread_deallocate(threads[i]);
981 break;
982 }
983 }
984
985 kfree(addr, size);
986 return (KERN_RESOURCE_SHORTAGE);
987 }
988
989 bcopy((void *) addr, (void *) newaddr, size_needed);
990 kfree(addr, size);
991 addr = newaddr;
992 }
993
994 *thing_list = (mach_port_t *)addr;
995 *count = actual;
996
997 /* do the conversion that Mig should handle */
998
999 switch (type) {
1000
1001 case THING_TASK:
1002 {
1003 task_t *tasks = (task_t *)addr;
1004
1005 for (i = 0; i < actual; i++)
1006 (*thing_list)[i] = convert_task_to_port(tasks[i]);
1007 break;
1008 }
1009
1010 case THING_THREAD:
1011 {
1012 thread_t *threads = (thread_t *)addr;
1013
1014 for (i = 0; i < actual; i++)
1015 (*thing_list)[i] = convert_thread_to_port(threads[i]);
1016 break;
1017 }
1018 }
1019 }
1020
1021 return (KERN_SUCCESS);
1022 }
1023
1024
1025 /*
1026 * processor_set_tasks:
1027 *
1028 * List all tasks in the processor set.
1029 */
1030 kern_return_t
1031 processor_set_tasks(
1032 processor_set_t pset,
1033 task_array_t *task_list,
1034 mach_msg_type_number_t *count)
1035 {
1036 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1037 }
1038
1039 /*
1040 * processor_set_threads:
1041 *
1042 * List all threads in the processor set.
1043 */
1044 kern_return_t
1045 processor_set_threads(
1046 processor_set_t pset,
1047 thread_array_t *thread_list,
1048 mach_msg_type_number_t *count)
1049 {
1050 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1051 }
1052
1053 /*
1054 * processor_set_base:
1055 *
1056 * Specify per-policy base priority for a processor set. Set processor
1057 * set default policy to the given policy. This affects newly created
1058 * and assigned threads. Optionally change existing ones.
1059 */
1060 kern_return_t
1061 processor_set_base(
1062 __unused processor_set_t pset,
1063 __unused policy_t policy,
1064 __unused policy_base_t base,
1065 __unused boolean_t change)
1066 {
1067 return (KERN_INVALID_ARGUMENT);
1068 }
1069
1070 /*
1071 * processor_set_limit:
1072 *
1073 * Specify per-policy limits for a processor set. This affects
1074 * newly created and assigned threads. Optionally change existing
1075 * ones.
1076 */
1077 kern_return_t
1078 processor_set_limit(
1079 __unused processor_set_t pset,
1080 __unused policy_t policy,
1081 __unused policy_limit_t limit,
1082 __unused boolean_t change)
1083 {
1084 return (KERN_POLICY_LIMIT);
1085 }
1086
1087 /*
1088 * processor_set_policy_control
1089 *
1090 * Controls the scheduling attributes governing the processor set.
1091 * Allows control of enabled policies, and per-policy base and limit
1092 * priorities.
1093 */
1094 kern_return_t
1095 processor_set_policy_control(
1096 __unused processor_set_t pset,
1097 __unused int flavor,
1098 __unused processor_set_info_t policy_info,
1099 __unused mach_msg_type_number_t count,
1100 __unused boolean_t change)
1101 {
1102 return (KERN_INVALID_ARGUMENT);
1103 }