]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * processor.c: processor and processor_set manipulation routines.
55 */
56
57 #include <mach/boolean.h>
58 #include <mach/policy.h>
59 #include <mach/processor.h>
60 #include <mach/processor_info.h>
61 #include <mach/vm_param.h>
62 #include <kern/cpu_number.h>
63 #include <kern/host.h>
64 #include <kern/machine.h>
65 #include <kern/misc_protos.h>
66 #include <kern/processor.h>
67 #include <kern/sched.h>
68 #include <kern/task.h>
69 #include <kern/thread.h>
70 #include <kern/ipc_host.h>
71 #include <kern/ipc_tt.h>
72 #include <ipc/ipc_port.h>
73 #include <kern/kalloc.h>
74
75 /*
76 * Exported interface
77 */
78 #include <mach/mach_host_server.h>
79 #include <mach/processor_set_server.h>
80
81 /*
82 * Exported variables.
83 */
84 struct processor_set default_pset;
85
86 processor_t processor_list;
87 unsigned int processor_count;
88 static processor_t processor_list_tail;
89 decl_simple_lock_data(,processor_list_lock)
90
91 processor_t master_processor;
92 int master_cpu = 0;
93
94 /* Forwards */
95 kern_return_t processor_set_base(
96 processor_set_t pset,
97 policy_t policy,
98 policy_base_t base,
99 boolean_t change);
100
101 kern_return_t processor_set_limit(
102 processor_set_t pset,
103 policy_t policy,
104 policy_limit_t limit,
105 boolean_t change);
106
107 kern_return_t processor_set_things(
108 processor_set_t pset,
109 mach_port_t **thing_list,
110 mach_msg_type_number_t *count,
111 int type);
112
113 void
114 processor_bootstrap(void)
115 {
116 simple_lock_init(&processor_list_lock, 0);
117
118 master_processor = cpu_to_processor(master_cpu);
119
120 processor_init(master_processor, master_cpu);
121 }
122
123 /*
124 * Initialize the given processor_set structure.
125 */
126
127 void
128 pset_init(
129 register processor_set_t pset)
130 {
131 register int i;
132
133 /* setup run queue */
134 pset->runq.highq = IDLEPRI;
135 for (i = 0; i < NRQBM; i++)
136 pset->runq.bitmap[i] = 0;
137 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
138 pset->runq.urgency = pset->runq.count = 0;
139 for (i = 0; i < NRQS; i++)
140 queue_init(&pset->runq.queues[i]);
141
142 queue_init(&pset->idle_queue);
143 pset->idle_count = 0;
144 queue_init(&pset->active_queue);
145 simple_lock_init(&pset->sched_lock, 0);
146 pset->run_count = pset->share_count = 0;
147 pset->mach_factor = pset->load_average = 0;
148 pset->pri_shift = INT8_MAX;
149 queue_init(&pset->processors);
150 pset->processor_count = 0;
151 queue_init(&pset->tasks);
152 pset->task_count = 0;
153 queue_init(&pset->threads);
154 pset->thread_count = 0;
155 pset->ref_count = 1;
156 pset->active = TRUE;
157 mutex_init(&pset->lock, 0);
158 pset->pset_self = IP_NULL;
159 pset->pset_name_self = IP_NULL;
160 pset->timeshare_quanta = 1;
161 }
162
163 /*
164 * Initialize the given processor structure for the processor in
165 * the slot specified by slot_num.
166 */
167 void
168 processor_init(
169 register processor_t p,
170 int slot_num)
171 {
172 register int i;
173
174 /* setup run queue */
175 p->runq.highq = IDLEPRI;
176 for (i = 0; i < NRQBM; i++)
177 p->runq.bitmap[i] = 0;
178 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
179 p->runq.urgency = p->runq.count = 0;
180 for (i = 0; i < NRQS; i++)
181 queue_init(&p->runq.queues[i]);
182
183 p->state = PROCESSOR_OFF_LINE;
184 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
185 p->processor_set = PROCESSOR_SET_NULL;
186 p->current_pri = MINPRI;
187 p->deadline = UINT64_MAX;
188 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
189 p->timeslice = 0;
190 simple_lock_init(&p->lock, 0);
191 p->processor_self = IP_NULL;
192 processor_data_init(p);
193 PROCESSOR_DATA(p, slot_num) = slot_num;
194
195 simple_lock(&processor_list_lock);
196 if (processor_list == NULL)
197 processor_list = p;
198 else
199 processor_list_tail->processor_list = p;
200 processor_list_tail = p;
201 processor_count++;
202 p->processor_list = NULL;
203 simple_unlock(&processor_list_lock);
204 }
205
206 /*
207 * pset_deallocate:
208 *
209 * Remove one reference to the processor set. Destroy processor_set
210 * if this was the last reference.
211 */
212 void
213 pset_deallocate(
214 processor_set_t pset)
215 {
216 if (pset == PROCESSOR_SET_NULL)
217 return;
218
219 assert(pset == &default_pset);
220 return;
221 }
222
223 /*
224 * pset_reference:
225 *
226 * Add one reference to the processor set.
227 */
228 void
229 pset_reference(
230 processor_set_t pset)
231 {
232 if (pset == PROCESSOR_SET_NULL)
233 return;
234
235 assert(pset == &default_pset);
236 }
237
238 #define pset_reference_locked(pset) assert(pset == &default_pset)
239
240 /*
241 * pset_remove_processor() removes a processor from a processor_set.
242 * It can only be called on the current processor. Caller must
243 * hold lock on current processor and processor set.
244 */
245 void
246 pset_remove_processor(
247 processor_set_t pset,
248 processor_t processor)
249 {
250 if (pset != processor->processor_set)
251 panic("pset_remove_processor: wrong pset");
252
253 queue_remove(&pset->processors, processor, processor_t, processors);
254 processor->processor_set = PROCESSOR_SET_NULL;
255 pset->processor_count--;
256 timeshare_quanta_update(pset);
257 }
258
259 /*
260 * pset_add_processor() adds a processor to a processor_set.
261 * It can only be called on the current processor. Caller must
262 * hold lock on curent processor and on pset. No reference counting on
263 * processors. Processor reference to pset is implicit.
264 */
265 void
266 pset_add_processor(
267 processor_set_t pset,
268 processor_t processor)
269 {
270 queue_enter(&pset->processors, processor, processor_t, processors);
271 processor->processor_set = pset;
272 pset->processor_count++;
273 timeshare_quanta_update(pset);
274 }
275
276 /*
277 * pset_remove_task() removes a task from a processor_set.
278 * Caller must hold locks on pset and task (unless task has
279 * no references left, in which case just the pset lock is
280 * needed). Pset reference count is not decremented;
281 * caller must explicitly pset_deallocate.
282 */
283 void
284 pset_remove_task(
285 processor_set_t pset,
286 task_t task)
287 {
288 if (pset != task->processor_set)
289 return;
290
291 queue_remove(&pset->tasks, task, task_t, pset_tasks);
292 pset->task_count--;
293 }
294
295 /*
296 * pset_add_task() adds a task to a processor_set.
297 * Caller must hold locks on pset and task. Pset references to
298 * tasks are implicit.
299 */
300 void
301 pset_add_task(
302 processor_set_t pset,
303 task_t task)
304 {
305 queue_enter(&pset->tasks, task, task_t, pset_tasks);
306 task->processor_set = pset;
307 pset->task_count++;
308 pset_reference_locked(pset);
309 }
310
311 /*
312 * pset_remove_thread() removes a thread from a processor_set.
313 * Caller must hold locks on pset and thread (but only if thread
314 * has outstanding references that could be used to lookup the pset).
315 * The pset reference count is not decremented; caller must explicitly
316 * pset_deallocate.
317 */
318 void
319 pset_remove_thread(
320 processor_set_t pset,
321 thread_t thread)
322 {
323 queue_remove(&pset->threads, thread, thread_t, pset_threads);
324 pset->thread_count--;
325 }
326
327 /*
328 * pset_add_thread() adds a thread to a processor_set.
329 * Caller must hold locks on pset and thread. Pset references to
330 * threads are implicit.
331 */
332 void
333 pset_add_thread(
334 processor_set_t pset,
335 thread_t thread)
336 {
337 queue_enter(&pset->threads, thread, thread_t, pset_threads);
338 thread->processor_set = pset;
339 pset->thread_count++;
340 pset_reference_locked(pset);
341 }
342
343 /*
344 * thread_change_psets() changes the pset of a thread. Caller must
345 * hold locks on both psets and thread. The old pset must be
346 * explicitly pset_deallocat()'ed by caller.
347 */
348 void
349 thread_change_psets(
350 thread_t thread,
351 processor_set_t old_pset,
352 processor_set_t new_pset)
353 {
354 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
355 old_pset->thread_count--;
356 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
357 thread->processor_set = new_pset;
358 new_pset->thread_count++;
359 pset_reference_locked(new_pset);
360 }
361
362
363 kern_return_t
364 processor_info_count(
365 processor_flavor_t flavor,
366 mach_msg_type_number_t *count)
367 {
368 switch (flavor) {
369
370 case PROCESSOR_BASIC_INFO:
371 *count = PROCESSOR_BASIC_INFO_COUNT;
372 break;
373
374 case PROCESSOR_CPU_LOAD_INFO:
375 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
376 break;
377
378 default:
379 return (cpu_info_count(flavor, count));
380 }
381
382 return (KERN_SUCCESS);
383 }
384
385
386 kern_return_t
387 processor_info(
388 register processor_t processor,
389 processor_flavor_t flavor,
390 host_t *host,
391 processor_info_t info,
392 mach_msg_type_number_t *count)
393 {
394 register int i, slot_num, state;
395 kern_return_t result;
396
397 if (processor == PROCESSOR_NULL)
398 return (KERN_INVALID_ARGUMENT);
399
400 slot_num = PROCESSOR_DATA(processor, slot_num);
401
402 switch (flavor) {
403
404 case PROCESSOR_BASIC_INFO:
405 {
406 register processor_basic_info_t basic_info;
407
408 if (*count < PROCESSOR_BASIC_INFO_COUNT)
409 return (KERN_FAILURE);
410
411 basic_info = (processor_basic_info_t) info;
412 basic_info->cpu_type = slot_type(slot_num);
413 basic_info->cpu_subtype = slot_subtype(slot_num);
414 state = processor->state;
415 if (state == PROCESSOR_OFF_LINE)
416 basic_info->running = FALSE;
417 else
418 basic_info->running = TRUE;
419 basic_info->slot_num = slot_num;
420 if (processor == master_processor)
421 basic_info->is_master = TRUE;
422 else
423 basic_info->is_master = FALSE;
424
425 *count = PROCESSOR_BASIC_INFO_COUNT;
426 *host = &realhost;
427
428 return (KERN_SUCCESS);
429 }
430
431 case PROCESSOR_CPU_LOAD_INFO:
432 {
433 register processor_cpu_load_info_t cpu_load_info;
434 register integer_t *cpu_ticks;
435
436 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
437 return (KERN_FAILURE);
438
439 cpu_load_info = (processor_cpu_load_info_t) info;
440 cpu_ticks = PROCESSOR_DATA(processor, cpu_ticks);
441 for (i=0; i < CPU_STATE_MAX; i++)
442 cpu_load_info->cpu_ticks[i] = cpu_ticks[i];
443
444 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
445 *host = &realhost;
446
447 return (KERN_SUCCESS);
448 }
449
450 default:
451 result = cpu_info(flavor, slot_num, info, count);
452 if (result == KERN_SUCCESS)
453 *host = &realhost;
454
455 return (result);
456 }
457 }
458
459 kern_return_t
460 processor_start(
461 processor_t processor)
462 {
463 kern_return_t result;
464 thread_t thread;
465 spl_t s;
466
467 if (processor == PROCESSOR_NULL)
468 return (KERN_INVALID_ARGUMENT);
469
470 if (processor == master_processor) {
471 thread_t self = current_thread();
472 processor_t prev;
473
474 prev = thread_bind(self, processor);
475 thread_block(THREAD_CONTINUE_NULL);
476
477 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
478
479 thread_bind(self, prev);
480
481 return (result);
482 }
483
484 s = splsched();
485 processor_lock(processor);
486 if (processor->state != PROCESSOR_OFF_LINE) {
487 processor_unlock(processor);
488 splx(s);
489
490 return (KERN_FAILURE);
491 }
492
493 processor->state = PROCESSOR_START;
494 processor_unlock(processor);
495 splx(s);
496
497 /*
498 * Create the idle processor thread.
499 */
500 if (processor->idle_thread == THREAD_NULL) {
501 result = idle_thread_create(processor);
502 if (result != KERN_SUCCESS) {
503 s = splsched();
504 processor_lock(processor);
505 processor->state = PROCESSOR_OFF_LINE;
506 processor_unlock(processor);
507 splx(s);
508
509 return (result);
510 }
511 }
512
513 /*
514 * If there is no active thread, the processor
515 * has never been started. Create a dedicated
516 * start up thread.
517 */
518 if ( processor->active_thread == THREAD_NULL &&
519 processor->next_thread == THREAD_NULL ) {
520 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
521 if (result != KERN_SUCCESS) {
522 s = splsched();
523 processor_lock(processor);
524 processor->state = PROCESSOR_OFF_LINE;
525 processor_unlock(processor);
526 splx(s);
527
528 return (result);
529 }
530
531 s = splsched();
532 thread_lock(thread);
533 thread->bound_processor = processor;
534 processor->next_thread = thread;
535 thread->state = TH_RUN;
536 thread_unlock(thread);
537 splx(s);
538
539 thread_deallocate(thread);
540 }
541
542 if (processor->processor_self == IP_NULL)
543 ipc_processor_init(processor);
544
545 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
546 if (result != KERN_SUCCESS) {
547 s = splsched();
548 processor_lock(processor);
549 processor->state = PROCESSOR_OFF_LINE;
550 timer_call_shutdown(processor);
551 processor_unlock(processor);
552 splx(s);
553
554 return (result);
555 }
556
557 ipc_processor_enable(processor);
558
559 return (KERN_SUCCESS);
560 }
561
562 kern_return_t
563 processor_exit(
564 processor_t processor)
565 {
566 if (processor == PROCESSOR_NULL)
567 return(KERN_INVALID_ARGUMENT);
568
569 return(processor_shutdown(processor));
570 }
571
572 kern_return_t
573 processor_control(
574 processor_t processor,
575 processor_info_t info,
576 mach_msg_type_number_t count)
577 {
578 if (processor == PROCESSOR_NULL)
579 return(KERN_INVALID_ARGUMENT);
580
581 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
582 }
583
584 /*
585 * Calculate the appropriate timesharing quanta based on set load.
586 */
587
588 void
589 timeshare_quanta_update(
590 processor_set_t pset)
591 {
592 int pcount = pset->processor_count;
593 int i = pset->runq.count;
594
595 if (i >= pcount)
596 i = 1;
597 else
598 if (i <= 1)
599 i = pcount;
600 else
601 i = (pcount + (i / 2)) / i;
602
603 pset->timeshare_quanta = i;
604 }
605
606 kern_return_t
607 processor_set_create(
608 __unused host_t host,
609 __unused processor_set_t *new_set,
610 __unused processor_set_t *new_name)
611 {
612 return(KERN_FAILURE);
613 }
614
615 kern_return_t
616 processor_set_destroy(
617 __unused processor_set_t pset)
618 {
619 return(KERN_FAILURE);
620 }
621
622 kern_return_t
623 processor_get_assignment(
624 processor_t processor,
625 processor_set_t *pset)
626 {
627 int state;
628
629 state = processor->state;
630 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
631 return(KERN_FAILURE);
632
633 *pset = processor->processor_set;
634 pset_reference(*pset);
635 return(KERN_SUCCESS);
636 }
637
638 kern_return_t
639 processor_set_info(
640 processor_set_t pset,
641 int flavor,
642 host_t *host,
643 processor_set_info_t info,
644 mach_msg_type_number_t *count)
645 {
646 if (pset == PROCESSOR_SET_NULL)
647 return(KERN_INVALID_ARGUMENT);
648
649 if (flavor == PROCESSOR_SET_BASIC_INFO) {
650 register processor_set_basic_info_t basic_info;
651
652 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
653 return(KERN_FAILURE);
654
655 basic_info = (processor_set_basic_info_t) info;
656 basic_info->processor_count = pset->processor_count;
657 basic_info->default_policy = POLICY_TIMESHARE;
658
659 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
660 *host = &realhost;
661 return(KERN_SUCCESS);
662 }
663 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
664 register policy_timeshare_base_t ts_base;
665
666 if (*count < POLICY_TIMESHARE_BASE_COUNT)
667 return(KERN_FAILURE);
668
669 ts_base = (policy_timeshare_base_t) info;
670 ts_base->base_priority = BASEPRI_DEFAULT;
671
672 *count = POLICY_TIMESHARE_BASE_COUNT;
673 *host = &realhost;
674 return(KERN_SUCCESS);
675 }
676 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
677 register policy_fifo_base_t fifo_base;
678
679 if (*count < POLICY_FIFO_BASE_COUNT)
680 return(KERN_FAILURE);
681
682 fifo_base = (policy_fifo_base_t) info;
683 fifo_base->base_priority = BASEPRI_DEFAULT;
684
685 *count = POLICY_FIFO_BASE_COUNT;
686 *host = &realhost;
687 return(KERN_SUCCESS);
688 }
689 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
690 register policy_rr_base_t rr_base;
691
692 if (*count < POLICY_RR_BASE_COUNT)
693 return(KERN_FAILURE);
694
695 rr_base = (policy_rr_base_t) info;
696 rr_base->base_priority = BASEPRI_DEFAULT;
697 rr_base->quantum = 1;
698
699 *count = POLICY_RR_BASE_COUNT;
700 *host = &realhost;
701 return(KERN_SUCCESS);
702 }
703 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
704 register policy_timeshare_limit_t ts_limit;
705
706 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
707 return(KERN_FAILURE);
708
709 ts_limit = (policy_timeshare_limit_t) info;
710 ts_limit->max_priority = MAXPRI_KERNEL;
711
712 *count = POLICY_TIMESHARE_LIMIT_COUNT;
713 *host = &realhost;
714 return(KERN_SUCCESS);
715 }
716 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
717 register policy_fifo_limit_t fifo_limit;
718
719 if (*count < POLICY_FIFO_LIMIT_COUNT)
720 return(KERN_FAILURE);
721
722 fifo_limit = (policy_fifo_limit_t) info;
723 fifo_limit->max_priority = MAXPRI_KERNEL;
724
725 *count = POLICY_FIFO_LIMIT_COUNT;
726 *host = &realhost;
727 return(KERN_SUCCESS);
728 }
729 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
730 register policy_rr_limit_t rr_limit;
731
732 if (*count < POLICY_RR_LIMIT_COUNT)
733 return(KERN_FAILURE);
734
735 rr_limit = (policy_rr_limit_t) info;
736 rr_limit->max_priority = MAXPRI_KERNEL;
737
738 *count = POLICY_RR_LIMIT_COUNT;
739 *host = &realhost;
740 return(KERN_SUCCESS);
741 }
742 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
743 register int *enabled;
744
745 if (*count < (sizeof(*enabled)/sizeof(int)))
746 return(KERN_FAILURE);
747
748 enabled = (int *) info;
749 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
750
751 *count = sizeof(*enabled)/sizeof(int);
752 *host = &realhost;
753 return(KERN_SUCCESS);
754 }
755
756
757 *host = HOST_NULL;
758 return(KERN_INVALID_ARGUMENT);
759 }
760
761 /*
762 * processor_set_statistics
763 *
764 * Returns scheduling statistics for a processor set.
765 */
766 kern_return_t
767 processor_set_statistics(
768 processor_set_t pset,
769 int flavor,
770 processor_set_info_t info,
771 mach_msg_type_number_t *count)
772 {
773 if (pset == PROCESSOR_SET_NULL)
774 return (KERN_INVALID_PROCESSOR_SET);
775
776 if (flavor == PROCESSOR_SET_LOAD_INFO) {
777 register processor_set_load_info_t load_info;
778
779 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
780 return(KERN_FAILURE);
781
782 load_info = (processor_set_load_info_t) info;
783
784 pset_lock(pset);
785 load_info->task_count = pset->task_count;
786 load_info->thread_count = pset->thread_count;
787 load_info->mach_factor = pset->mach_factor;
788 load_info->load_average = pset->load_average;
789 pset_unlock(pset);
790
791 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
792 return(KERN_SUCCESS);
793 }
794
795 return(KERN_INVALID_ARGUMENT);
796 }
797
798 /*
799 * processor_set_max_priority:
800 *
801 * Specify max priority permitted on processor set. This affects
802 * newly created and assigned threads. Optionally change existing
803 * ones.
804 */
805 kern_return_t
806 processor_set_max_priority(
807 __unused processor_set_t pset,
808 __unused int max_priority,
809 __unused boolean_t change_threads)
810 {
811 return (KERN_INVALID_ARGUMENT);
812 }
813
814 /*
815 * processor_set_policy_enable:
816 *
817 * Allow indicated policy on processor set.
818 */
819
820 kern_return_t
821 processor_set_policy_enable(
822 __unused processor_set_t pset,
823 __unused int policy)
824 {
825 return (KERN_INVALID_ARGUMENT);
826 }
827
828 /*
829 * processor_set_policy_disable:
830 *
831 * Forbid indicated policy on processor set. Time sharing cannot
832 * be forbidden.
833 */
834 kern_return_t
835 processor_set_policy_disable(
836 __unused processor_set_t pset,
837 __unused int policy,
838 __unused boolean_t change_threads)
839 {
840 return (KERN_INVALID_ARGUMENT);
841 }
842
843 #define THING_TASK 0
844 #define THING_THREAD 1
845
846 /*
847 * processor_set_things:
848 *
849 * Common internals for processor_set_{threads,tasks}
850 */
851 kern_return_t
852 processor_set_things(
853 processor_set_t pset,
854 mach_port_t **thing_list,
855 mach_msg_type_number_t *count,
856 int type)
857 {
858 unsigned int actual; /* this many things */
859 unsigned int maxthings;
860 unsigned int i;
861
862 vm_size_t size, size_needed;
863 void *addr;
864
865 if (pset == PROCESSOR_SET_NULL)
866 return (KERN_INVALID_ARGUMENT);
867
868 size = 0; addr = 0;
869
870 for (;;) {
871 pset_lock(pset);
872 if (!pset->active) {
873 pset_unlock(pset);
874
875 return (KERN_FAILURE);
876 }
877
878 if (type == THING_TASK)
879 maxthings = pset->task_count;
880 else
881 maxthings = pset->thread_count;
882
883 /* do we have the memory we need? */
884
885 size_needed = maxthings * sizeof (mach_port_t);
886 if (size_needed <= size)
887 break;
888
889 /* unlock the pset and allocate more memory */
890 pset_unlock(pset);
891
892 if (size != 0)
893 kfree(addr, size);
894
895 assert(size_needed > 0);
896 size = size_needed;
897
898 addr = kalloc(size);
899 if (addr == 0)
900 return (KERN_RESOURCE_SHORTAGE);
901 }
902
903 /* OK, have memory and the processor_set is locked & active */
904
905 actual = 0;
906 switch (type) {
907
908 case THING_TASK:
909 {
910 task_t task, *tasks = (task_t *)addr;
911
912 for (task = (task_t)queue_first(&pset->tasks);
913 !queue_end(&pset->tasks, (queue_entry_t)task);
914 task = (task_t)queue_next(&task->pset_tasks)) {
915 task_reference_internal(task);
916 tasks[actual++] = task;
917 }
918
919 break;
920 }
921
922 case THING_THREAD:
923 {
924 thread_t thread, *threads = (thread_t *)addr;
925
926 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
927 !queue_end(&pset->threads, (queue_entry_t)thread);
928 thread = (thread_t)queue_next(&thread->pset_threads)) {
929 thread_reference_internal(thread);
930 threads[actual++] = thread;
931 }
932
933 break;
934 }
935 }
936
937 pset_unlock(pset);
938
939 if (actual < maxthings)
940 size_needed = actual * sizeof (mach_port_t);
941
942 if (actual == 0) {
943 /* no things, so return null pointer and deallocate memory */
944 *thing_list = 0;
945 *count = 0;
946
947 if (size != 0)
948 kfree(addr, size);
949 }
950 else {
951 /* if we allocated too much, must copy */
952
953 if (size_needed < size) {
954 void *newaddr;
955
956 newaddr = kalloc(size_needed);
957 if (newaddr == 0) {
958 switch (type) {
959
960 case THING_TASK:
961 {
962 task_t *tasks = (task_t *)addr;
963
964 for (i = 0; i < actual; i++)
965 task_deallocate(tasks[i]);
966 break;
967 }
968
969 case THING_THREAD:
970 {
971 thread_t *threads = (thread_t *)addr;
972
973 for (i = 0; i < actual; i++)
974 thread_deallocate(threads[i]);
975 break;
976 }
977 }
978
979 kfree(addr, size);
980 return (KERN_RESOURCE_SHORTAGE);
981 }
982
983 bcopy((void *) addr, (void *) newaddr, size_needed);
984 kfree(addr, size);
985 addr = newaddr;
986 }
987
988 *thing_list = (mach_port_t *)addr;
989 *count = actual;
990
991 /* do the conversion that Mig should handle */
992
993 switch (type) {
994
995 case THING_TASK:
996 {
997 task_t *tasks = (task_t *)addr;
998
999 for (i = 0; i < actual; i++)
1000 (*thing_list)[i] = convert_task_to_port(tasks[i]);
1001 break;
1002 }
1003
1004 case THING_THREAD:
1005 {
1006 thread_t *threads = (thread_t *)addr;
1007
1008 for (i = 0; i < actual; i++)
1009 (*thing_list)[i] = convert_thread_to_port(threads[i]);
1010 break;
1011 }
1012 }
1013 }
1014
1015 return (KERN_SUCCESS);
1016 }
1017
1018
1019 /*
1020 * processor_set_tasks:
1021 *
1022 * List all tasks in the processor set.
1023 */
1024 kern_return_t
1025 processor_set_tasks(
1026 processor_set_t pset,
1027 task_array_t *task_list,
1028 mach_msg_type_number_t *count)
1029 {
1030 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1031 }
1032
1033 /*
1034 * processor_set_threads:
1035 *
1036 * List all threads in the processor set.
1037 */
1038 kern_return_t
1039 processor_set_threads(
1040 processor_set_t pset,
1041 thread_array_t *thread_list,
1042 mach_msg_type_number_t *count)
1043 {
1044 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1045 }
1046
1047 /*
1048 * processor_set_base:
1049 *
1050 * Specify per-policy base priority for a processor set. Set processor
1051 * set default policy to the given policy. This affects newly created
1052 * and assigned threads. Optionally change existing ones.
1053 */
1054 kern_return_t
1055 processor_set_base(
1056 __unused processor_set_t pset,
1057 __unused policy_t policy,
1058 __unused policy_base_t base,
1059 __unused boolean_t change)
1060 {
1061 return (KERN_INVALID_ARGUMENT);
1062 }
1063
1064 /*
1065 * processor_set_limit:
1066 *
1067 * Specify per-policy limits for a processor set. This affects
1068 * newly created and assigned threads. Optionally change existing
1069 * ones.
1070 */
1071 kern_return_t
1072 processor_set_limit(
1073 __unused processor_set_t pset,
1074 __unused policy_t policy,
1075 __unused policy_limit_t limit,
1076 __unused boolean_t change)
1077 {
1078 return (KERN_POLICY_LIMIT);
1079 }
1080
1081 /*
1082 * processor_set_policy_control
1083 *
1084 * Controls the scheduling attributes governing the processor set.
1085 * Allows control of enabled policies, and per-policy base and limit
1086 * priorities.
1087 */
1088 kern_return_t
1089 processor_set_policy_control(
1090 __unused processor_set_t pset,
1091 __unused int flavor,
1092 __unused processor_set_info_t policy_info,
1093 __unused mach_msg_type_number_t count,
1094 __unused boolean_t change)
1095 {
1096 return (KERN_INVALID_ARGUMENT);
1097 }