]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53
54 /*
55 * processor.c: processor and processor_set manipulation routines.
56 */
57
58 #include <mach/boolean.h>
59 #include <mach/policy.h>
60 #include <mach/processor.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
75
76 /*
77 * Exported interface
78 */
79 #include <mach/mach_host_server.h>
80 #include <mach/processor_set_server.h>
81
82 /*
83 * Exported variables.
84 */
85 struct processor_set default_pset;
86
87 processor_t processor_list;
88 unsigned int processor_count;
89 static processor_t processor_list_tail;
90 decl_simple_lock_data(,processor_list_lock)
91
92 processor_t master_processor;
93 int master_cpu = 0;
94
95 /* Forwards */
96 kern_return_t processor_set_base(
97 processor_set_t pset,
98 policy_t policy,
99 policy_base_t base,
100 boolean_t change);
101
102 kern_return_t processor_set_limit(
103 processor_set_t pset,
104 policy_t policy,
105 policy_limit_t limit,
106 boolean_t change);
107
108 kern_return_t processor_set_things(
109 processor_set_t pset,
110 mach_port_t **thing_list,
111 mach_msg_type_number_t *count,
112 int type);
113
114 void
115 processor_bootstrap(void)
116 {
117 simple_lock_init(&processor_list_lock, 0);
118
119 master_processor = cpu_to_processor(master_cpu);
120
121 processor_init(master_processor, master_cpu);
122 }
123
124 /*
125 * Initialize the given processor_set structure.
126 */
127
128 void
129 pset_init(
130 register processor_set_t pset)
131 {
132 register int i;
133
134 /* setup run queue */
135 pset->runq.highq = IDLEPRI;
136 for (i = 0; i < NRQBM; i++)
137 pset->runq.bitmap[i] = 0;
138 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
139 pset->runq.urgency = pset->runq.count = 0;
140 for (i = 0; i < NRQS; i++)
141 queue_init(&pset->runq.queues[i]);
142
143 queue_init(&pset->idle_queue);
144 pset->idle_count = 0;
145 queue_init(&pset->active_queue);
146 simple_lock_init(&pset->sched_lock, 0);
147 pset->run_count = pset->share_count = 0;
148 pset->mach_factor = pset->load_average = 0;
149 pset->pri_shift = INT8_MAX;
150 queue_init(&pset->processors);
151 pset->processor_count = 0;
152 queue_init(&pset->tasks);
153 pset->task_count = 0;
154 queue_init(&pset->threads);
155 pset->thread_count = 0;
156 pset->ref_count = 1;
157 pset->active = TRUE;
158 mutex_init(&pset->lock, 0);
159 pset->pset_self = IP_NULL;
160 pset->pset_name_self = IP_NULL;
161 pset->timeshare_quanta = 1;
162 }
163
164 /*
165 * Initialize the given processor structure for the processor in
166 * the slot specified by slot_num.
167 */
168 void
169 processor_init(
170 register processor_t p,
171 int slot_num)
172 {
173 register int i;
174
175 /* setup run queue */
176 p->runq.highq = IDLEPRI;
177 for (i = 0; i < NRQBM; i++)
178 p->runq.bitmap[i] = 0;
179 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
180 p->runq.urgency = p->runq.count = 0;
181 for (i = 0; i < NRQS; i++)
182 queue_init(&p->runq.queues[i]);
183
184 p->state = PROCESSOR_OFF_LINE;
185 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
186 p->processor_set = PROCESSOR_SET_NULL;
187 p->current_pri = MINPRI;
188 p->deadline = UINT64_MAX;
189 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
190 p->timeslice = 0;
191 simple_lock_init(&p->lock, 0);
192 p->processor_self = IP_NULL;
193 processor_data_init(p);
194 PROCESSOR_DATA(p, slot_num) = slot_num;
195
196 simple_lock(&processor_list_lock);
197 if (processor_list == NULL)
198 processor_list = p;
199 else
200 processor_list_tail->processor_list = p;
201 processor_list_tail = p;
202 processor_count++;
203 p->processor_list = NULL;
204 simple_unlock(&processor_list_lock);
205 }
206
207 /*
208 * pset_deallocate:
209 *
210 * Remove one reference to the processor set. Destroy processor_set
211 * if this was the last reference.
212 */
213 void
214 pset_deallocate(
215 processor_set_t pset)
216 {
217 if (pset == PROCESSOR_SET_NULL)
218 return;
219
220 assert(pset == &default_pset);
221 return;
222 }
223
224 /*
225 * pset_reference:
226 *
227 * Add one reference to the processor set.
228 */
229 void
230 pset_reference(
231 processor_set_t pset)
232 {
233 if (pset == PROCESSOR_SET_NULL)
234 return;
235
236 assert(pset == &default_pset);
237 }
238
239 #define pset_reference_locked(pset) assert(pset == &default_pset)
240
241 /*
242 * pset_remove_processor() removes a processor from a processor_set.
243 * It can only be called on the current processor. Caller must
244 * hold lock on current processor and processor set.
245 */
246 void
247 pset_remove_processor(
248 processor_set_t pset,
249 processor_t processor)
250 {
251 if (pset != processor->processor_set)
252 panic("pset_remove_processor: wrong pset");
253
254 queue_remove(&pset->processors, processor, processor_t, processors);
255 processor->processor_set = PROCESSOR_SET_NULL;
256 pset->processor_count--;
257 timeshare_quanta_update(pset);
258 }
259
260 /*
261 * pset_add_processor() adds a processor to a processor_set.
262 * It can only be called on the current processor. Caller must
263 * hold lock on curent processor and on pset. No reference counting on
264 * processors. Processor reference to pset is implicit.
265 */
266 void
267 pset_add_processor(
268 processor_set_t pset,
269 processor_t processor)
270 {
271 queue_enter(&pset->processors, processor, processor_t, processors);
272 processor->processor_set = pset;
273 pset->processor_count++;
274 timeshare_quanta_update(pset);
275 }
276
277 /*
278 * pset_remove_task() removes a task from a processor_set.
279 * Caller must hold locks on pset and task (unless task has
280 * no references left, in which case just the pset lock is
281 * needed). Pset reference count is not decremented;
282 * caller must explicitly pset_deallocate.
283 */
284 void
285 pset_remove_task(
286 processor_set_t pset,
287 task_t task)
288 {
289 if (pset != task->processor_set)
290 return;
291
292 queue_remove(&pset->tasks, task, task_t, pset_tasks);
293 pset->task_count--;
294 }
295
296 /*
297 * pset_add_task() adds a task to a processor_set.
298 * Caller must hold locks on pset and task. Pset references to
299 * tasks are implicit.
300 */
301 void
302 pset_add_task(
303 processor_set_t pset,
304 task_t task)
305 {
306 queue_enter(&pset->tasks, task, task_t, pset_tasks);
307 task->processor_set = pset;
308 pset->task_count++;
309 pset_reference_locked(pset);
310 }
311
312 /*
313 * pset_remove_thread() removes a thread from a processor_set.
314 * Caller must hold locks on pset and thread (but only if thread
315 * has outstanding references that could be used to lookup the pset).
316 * The pset reference count is not decremented; caller must explicitly
317 * pset_deallocate.
318 */
319 void
320 pset_remove_thread(
321 processor_set_t pset,
322 thread_t thread)
323 {
324 queue_remove(&pset->threads, thread, thread_t, pset_threads);
325 pset->thread_count--;
326 }
327
328 /*
329 * pset_add_thread() adds a thread to a processor_set.
330 * Caller must hold locks on pset and thread. Pset references to
331 * threads are implicit.
332 */
333 void
334 pset_add_thread(
335 processor_set_t pset,
336 thread_t thread)
337 {
338 queue_enter(&pset->threads, thread, thread_t, pset_threads);
339 thread->processor_set = pset;
340 pset->thread_count++;
341 pset_reference_locked(pset);
342 }
343
344 /*
345 * thread_change_psets() changes the pset of a thread. Caller must
346 * hold locks on both psets and thread. The old pset must be
347 * explicitly pset_deallocat()'ed by caller.
348 */
349 void
350 thread_change_psets(
351 thread_t thread,
352 processor_set_t old_pset,
353 processor_set_t new_pset)
354 {
355 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
356 old_pset->thread_count--;
357 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
358 thread->processor_set = new_pset;
359 new_pset->thread_count++;
360 pset_reference_locked(new_pset);
361 }
362
363
364 kern_return_t
365 processor_info_count(
366 processor_flavor_t flavor,
367 mach_msg_type_number_t *count)
368 {
369 switch (flavor) {
370
371 case PROCESSOR_BASIC_INFO:
372 *count = PROCESSOR_BASIC_INFO_COUNT;
373 break;
374
375 case PROCESSOR_CPU_LOAD_INFO:
376 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
377 break;
378
379 default:
380 return (cpu_info_count(flavor, count));
381 }
382
383 return (KERN_SUCCESS);
384 }
385
386
387 kern_return_t
388 processor_info(
389 register processor_t processor,
390 processor_flavor_t flavor,
391 host_t *host,
392 processor_info_t info,
393 mach_msg_type_number_t *count)
394 {
395 register int i, slot_num, state;
396 kern_return_t result;
397
398 if (processor == PROCESSOR_NULL)
399 return (KERN_INVALID_ARGUMENT);
400
401 slot_num = PROCESSOR_DATA(processor, slot_num);
402
403 switch (flavor) {
404
405 case PROCESSOR_BASIC_INFO:
406 {
407 register processor_basic_info_t basic_info;
408
409 if (*count < PROCESSOR_BASIC_INFO_COUNT)
410 return (KERN_FAILURE);
411
412 basic_info = (processor_basic_info_t) info;
413 basic_info->cpu_type = slot_type(slot_num);
414 basic_info->cpu_subtype = slot_subtype(slot_num);
415 state = processor->state;
416 if (state == PROCESSOR_OFF_LINE)
417 basic_info->running = FALSE;
418 else
419 basic_info->running = TRUE;
420 basic_info->slot_num = slot_num;
421 if (processor == master_processor)
422 basic_info->is_master = TRUE;
423 else
424 basic_info->is_master = FALSE;
425
426 *count = PROCESSOR_BASIC_INFO_COUNT;
427 *host = &realhost;
428
429 return (KERN_SUCCESS);
430 }
431
432 case PROCESSOR_CPU_LOAD_INFO:
433 {
434 register processor_cpu_load_info_t cpu_load_info;
435 register integer_t *cpu_ticks;
436
437 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
438 return (KERN_FAILURE);
439
440 cpu_load_info = (processor_cpu_load_info_t) info;
441 cpu_ticks = PROCESSOR_DATA(processor, cpu_ticks);
442 for (i=0; i < CPU_STATE_MAX; i++)
443 cpu_load_info->cpu_ticks[i] = cpu_ticks[i];
444
445 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
446 *host = &realhost;
447
448 return (KERN_SUCCESS);
449 }
450
451 default:
452 result = cpu_info(flavor, slot_num, info, count);
453 if (result == KERN_SUCCESS)
454 *host = &realhost;
455
456 return (result);
457 }
458 }
459
460 kern_return_t
461 processor_start(
462 processor_t processor)
463 {
464 kern_return_t result;
465 thread_t thread;
466 spl_t s;
467
468 if (processor == PROCESSOR_NULL)
469 return (KERN_INVALID_ARGUMENT);
470
471 if (processor == master_processor) {
472 thread_t self = current_thread();
473 processor_t prev;
474
475 prev = thread_bind(self, processor);
476 thread_block(THREAD_CONTINUE_NULL);
477
478 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
479
480 thread_bind(self, prev);
481
482 return (result);
483 }
484
485 s = splsched();
486 processor_lock(processor);
487 if (processor->state != PROCESSOR_OFF_LINE) {
488 processor_unlock(processor);
489 splx(s);
490
491 return (KERN_FAILURE);
492 }
493
494 processor->state = PROCESSOR_START;
495 processor_unlock(processor);
496 splx(s);
497
498 /*
499 * Create the idle processor thread.
500 */
501 if (processor->idle_thread == THREAD_NULL) {
502 result = idle_thread_create(processor);
503 if (result != KERN_SUCCESS) {
504 s = splsched();
505 processor_lock(processor);
506 processor->state = PROCESSOR_OFF_LINE;
507 processor_unlock(processor);
508 splx(s);
509
510 return (result);
511 }
512 }
513
514 /*
515 * If there is no active thread, the processor
516 * has never been started. Create a dedicated
517 * start up thread.
518 */
519 if ( processor->active_thread == THREAD_NULL &&
520 processor->next_thread == THREAD_NULL ) {
521 result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
522 if (result != KERN_SUCCESS) {
523 s = splsched();
524 processor_lock(processor);
525 processor->state = PROCESSOR_OFF_LINE;
526 processor_unlock(processor);
527 splx(s);
528
529 return (result);
530 }
531
532 s = splsched();
533 thread_lock(thread);
534 thread->bound_processor = processor;
535 processor->next_thread = thread;
536 thread->state = TH_RUN;
537 thread_unlock(thread);
538 splx(s);
539
540 thread_deallocate(thread);
541 }
542
543 if (processor->processor_self == IP_NULL)
544 ipc_processor_init(processor);
545
546 result = cpu_start(PROCESSOR_DATA(processor, slot_num));
547 if (result != KERN_SUCCESS) {
548 s = splsched();
549 processor_lock(processor);
550 processor->state = PROCESSOR_OFF_LINE;
551 timer_call_shutdown(processor);
552 processor_unlock(processor);
553 splx(s);
554
555 return (result);
556 }
557
558 ipc_processor_enable(processor);
559
560 return (KERN_SUCCESS);
561 }
562
563 kern_return_t
564 processor_exit(
565 processor_t processor)
566 {
567 if (processor == PROCESSOR_NULL)
568 return(KERN_INVALID_ARGUMENT);
569
570 return(processor_shutdown(processor));
571 }
572
573 kern_return_t
574 processor_control(
575 processor_t processor,
576 processor_info_t info,
577 mach_msg_type_number_t count)
578 {
579 if (processor == PROCESSOR_NULL)
580 return(KERN_INVALID_ARGUMENT);
581
582 return(cpu_control(PROCESSOR_DATA(processor, slot_num), info, count));
583 }
584
585 /*
586 * Calculate the appropriate timesharing quanta based on set load.
587 */
588
589 void
590 timeshare_quanta_update(
591 processor_set_t pset)
592 {
593 int pcount = pset->processor_count;
594 int i = pset->runq.count;
595
596 if (i >= pcount)
597 i = 1;
598 else
599 if (i <= 1)
600 i = pcount;
601 else
602 i = (pcount + (i / 2)) / i;
603
604 pset->timeshare_quanta = i;
605 }
606
607 kern_return_t
608 processor_set_create(
609 __unused host_t host,
610 __unused processor_set_t *new_set,
611 __unused processor_set_t *new_name)
612 {
613 return(KERN_FAILURE);
614 }
615
616 kern_return_t
617 processor_set_destroy(
618 __unused processor_set_t pset)
619 {
620 return(KERN_FAILURE);
621 }
622
623 kern_return_t
624 processor_get_assignment(
625 processor_t processor,
626 processor_set_t *pset)
627 {
628 int state;
629
630 state = processor->state;
631 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
632 return(KERN_FAILURE);
633
634 *pset = processor->processor_set;
635 pset_reference(*pset);
636 return(KERN_SUCCESS);
637 }
638
639 kern_return_t
640 processor_set_info(
641 processor_set_t pset,
642 int flavor,
643 host_t *host,
644 processor_set_info_t info,
645 mach_msg_type_number_t *count)
646 {
647 if (pset == PROCESSOR_SET_NULL)
648 return(KERN_INVALID_ARGUMENT);
649
650 if (flavor == PROCESSOR_SET_BASIC_INFO) {
651 register processor_set_basic_info_t basic_info;
652
653 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
654 return(KERN_FAILURE);
655
656 basic_info = (processor_set_basic_info_t) info;
657 basic_info->processor_count = pset->processor_count;
658 basic_info->default_policy = POLICY_TIMESHARE;
659
660 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
661 *host = &realhost;
662 return(KERN_SUCCESS);
663 }
664 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
665 register policy_timeshare_base_t ts_base;
666
667 if (*count < POLICY_TIMESHARE_BASE_COUNT)
668 return(KERN_FAILURE);
669
670 ts_base = (policy_timeshare_base_t) info;
671 ts_base->base_priority = BASEPRI_DEFAULT;
672
673 *count = POLICY_TIMESHARE_BASE_COUNT;
674 *host = &realhost;
675 return(KERN_SUCCESS);
676 }
677 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
678 register policy_fifo_base_t fifo_base;
679
680 if (*count < POLICY_FIFO_BASE_COUNT)
681 return(KERN_FAILURE);
682
683 fifo_base = (policy_fifo_base_t) info;
684 fifo_base->base_priority = BASEPRI_DEFAULT;
685
686 *count = POLICY_FIFO_BASE_COUNT;
687 *host = &realhost;
688 return(KERN_SUCCESS);
689 }
690 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
691 register policy_rr_base_t rr_base;
692
693 if (*count < POLICY_RR_BASE_COUNT)
694 return(KERN_FAILURE);
695
696 rr_base = (policy_rr_base_t) info;
697 rr_base->base_priority = BASEPRI_DEFAULT;
698 rr_base->quantum = 1;
699
700 *count = POLICY_RR_BASE_COUNT;
701 *host = &realhost;
702 return(KERN_SUCCESS);
703 }
704 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
705 register policy_timeshare_limit_t ts_limit;
706
707 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
708 return(KERN_FAILURE);
709
710 ts_limit = (policy_timeshare_limit_t) info;
711 ts_limit->max_priority = MAXPRI_KERNEL;
712
713 *count = POLICY_TIMESHARE_LIMIT_COUNT;
714 *host = &realhost;
715 return(KERN_SUCCESS);
716 }
717 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
718 register policy_fifo_limit_t fifo_limit;
719
720 if (*count < POLICY_FIFO_LIMIT_COUNT)
721 return(KERN_FAILURE);
722
723 fifo_limit = (policy_fifo_limit_t) info;
724 fifo_limit->max_priority = MAXPRI_KERNEL;
725
726 *count = POLICY_FIFO_LIMIT_COUNT;
727 *host = &realhost;
728 return(KERN_SUCCESS);
729 }
730 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
731 register policy_rr_limit_t rr_limit;
732
733 if (*count < POLICY_RR_LIMIT_COUNT)
734 return(KERN_FAILURE);
735
736 rr_limit = (policy_rr_limit_t) info;
737 rr_limit->max_priority = MAXPRI_KERNEL;
738
739 *count = POLICY_RR_LIMIT_COUNT;
740 *host = &realhost;
741 return(KERN_SUCCESS);
742 }
743 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
744 register int *enabled;
745
746 if (*count < (sizeof(*enabled)/sizeof(int)))
747 return(KERN_FAILURE);
748
749 enabled = (int *) info;
750 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
751
752 *count = sizeof(*enabled)/sizeof(int);
753 *host = &realhost;
754 return(KERN_SUCCESS);
755 }
756
757
758 *host = HOST_NULL;
759 return(KERN_INVALID_ARGUMENT);
760 }
761
762 /*
763 * processor_set_statistics
764 *
765 * Returns scheduling statistics for a processor set.
766 */
767 kern_return_t
768 processor_set_statistics(
769 processor_set_t pset,
770 int flavor,
771 processor_set_info_t info,
772 mach_msg_type_number_t *count)
773 {
774 if (pset == PROCESSOR_SET_NULL)
775 return (KERN_INVALID_PROCESSOR_SET);
776
777 if (flavor == PROCESSOR_SET_LOAD_INFO) {
778 register processor_set_load_info_t load_info;
779
780 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
781 return(KERN_FAILURE);
782
783 load_info = (processor_set_load_info_t) info;
784
785 pset_lock(pset);
786 load_info->task_count = pset->task_count;
787 load_info->thread_count = pset->thread_count;
788 load_info->mach_factor = pset->mach_factor;
789 load_info->load_average = pset->load_average;
790 pset_unlock(pset);
791
792 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
793 return(KERN_SUCCESS);
794 }
795
796 return(KERN_INVALID_ARGUMENT);
797 }
798
799 /*
800 * processor_set_max_priority:
801 *
802 * Specify max priority permitted on processor set. This affects
803 * newly created and assigned threads. Optionally change existing
804 * ones.
805 */
806 kern_return_t
807 processor_set_max_priority(
808 __unused processor_set_t pset,
809 __unused int max_priority,
810 __unused boolean_t change_threads)
811 {
812 return (KERN_INVALID_ARGUMENT);
813 }
814
815 /*
816 * processor_set_policy_enable:
817 *
818 * Allow indicated policy on processor set.
819 */
820
821 kern_return_t
822 processor_set_policy_enable(
823 __unused processor_set_t pset,
824 __unused int policy)
825 {
826 return (KERN_INVALID_ARGUMENT);
827 }
828
829 /*
830 * processor_set_policy_disable:
831 *
832 * Forbid indicated policy on processor set. Time sharing cannot
833 * be forbidden.
834 */
835 kern_return_t
836 processor_set_policy_disable(
837 __unused processor_set_t pset,
838 __unused int policy,
839 __unused boolean_t change_threads)
840 {
841 return (KERN_INVALID_ARGUMENT);
842 }
843
844 #define THING_TASK 0
845 #define THING_THREAD 1
846
847 /*
848 * processor_set_things:
849 *
850 * Common internals for processor_set_{threads,tasks}
851 */
852 kern_return_t
853 processor_set_things(
854 processor_set_t pset,
855 mach_port_t **thing_list,
856 mach_msg_type_number_t *count,
857 int type)
858 {
859 unsigned int actual; /* this many things */
860 unsigned int maxthings;
861 unsigned int i;
862
863 vm_size_t size, size_needed;
864 void *addr;
865
866 if (pset == PROCESSOR_SET_NULL)
867 return (KERN_INVALID_ARGUMENT);
868
869 size = 0; addr = 0;
870
871 for (;;) {
872 pset_lock(pset);
873 if (!pset->active) {
874 pset_unlock(pset);
875
876 return (KERN_FAILURE);
877 }
878
879 if (type == THING_TASK)
880 maxthings = pset->task_count;
881 else
882 maxthings = pset->thread_count;
883
884 /* do we have the memory we need? */
885
886 size_needed = maxthings * sizeof (mach_port_t);
887 if (size_needed <= size)
888 break;
889
890 /* unlock the pset and allocate more memory */
891 pset_unlock(pset);
892
893 if (size != 0)
894 kfree(addr, size);
895
896 assert(size_needed > 0);
897 size = size_needed;
898
899 addr = kalloc(size);
900 if (addr == 0)
901 return (KERN_RESOURCE_SHORTAGE);
902 }
903
904 /* OK, have memory and the processor_set is locked & active */
905
906 actual = 0;
907 switch (type) {
908
909 case THING_TASK:
910 {
911 task_t task, *tasks = (task_t *)addr;
912
913 for (task = (task_t)queue_first(&pset->tasks);
914 !queue_end(&pset->tasks, (queue_entry_t)task);
915 task = (task_t)queue_next(&task->pset_tasks)) {
916 task_reference_internal(task);
917 tasks[actual++] = task;
918 }
919
920 break;
921 }
922
923 case THING_THREAD:
924 {
925 thread_t thread, *threads = (thread_t *)addr;
926
927 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
928 !queue_end(&pset->threads, (queue_entry_t)thread);
929 thread = (thread_t)queue_next(&thread->pset_threads)) {
930 thread_reference_internal(thread);
931 threads[actual++] = thread;
932 }
933
934 break;
935 }
936 }
937
938 pset_unlock(pset);
939
940 if (actual < maxthings)
941 size_needed = actual * sizeof (mach_port_t);
942
943 if (actual == 0) {
944 /* no things, so return null pointer and deallocate memory */
945 *thing_list = 0;
946 *count = 0;
947
948 if (size != 0)
949 kfree(addr, size);
950 }
951 else {
952 /* if we allocated too much, must copy */
953
954 if (size_needed < size) {
955 void *newaddr;
956
957 newaddr = kalloc(size_needed);
958 if (newaddr == 0) {
959 switch (type) {
960
961 case THING_TASK:
962 {
963 task_t *tasks = (task_t *)addr;
964
965 for (i = 0; i < actual; i++)
966 task_deallocate(tasks[i]);
967 break;
968 }
969
970 case THING_THREAD:
971 {
972 thread_t *threads = (thread_t *)addr;
973
974 for (i = 0; i < actual; i++)
975 thread_deallocate(threads[i]);
976 break;
977 }
978 }
979
980 kfree(addr, size);
981 return (KERN_RESOURCE_SHORTAGE);
982 }
983
984 bcopy((void *) addr, (void *) newaddr, size_needed);
985 kfree(addr, size);
986 addr = newaddr;
987 }
988
989 *thing_list = (mach_port_t *)addr;
990 *count = actual;
991
992 /* do the conversion that Mig should handle */
993
994 switch (type) {
995
996 case THING_TASK:
997 {
998 task_t *tasks = (task_t *)addr;
999
1000 for (i = 0; i < actual; i++)
1001 (*thing_list)[i] = convert_task_to_port(tasks[i]);
1002 break;
1003 }
1004
1005 case THING_THREAD:
1006 {
1007 thread_t *threads = (thread_t *)addr;
1008
1009 for (i = 0; i < actual; i++)
1010 (*thing_list)[i] = convert_thread_to_port(threads[i]);
1011 break;
1012 }
1013 }
1014 }
1015
1016 return (KERN_SUCCESS);
1017 }
1018
1019
1020 /*
1021 * processor_set_tasks:
1022 *
1023 * List all tasks in the processor set.
1024 */
1025 kern_return_t
1026 processor_set_tasks(
1027 processor_set_t pset,
1028 task_array_t *task_list,
1029 mach_msg_type_number_t *count)
1030 {
1031 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1032 }
1033
1034 /*
1035 * processor_set_threads:
1036 *
1037 * List all threads in the processor set.
1038 */
1039 kern_return_t
1040 processor_set_threads(
1041 processor_set_t pset,
1042 thread_array_t *thread_list,
1043 mach_msg_type_number_t *count)
1044 {
1045 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1046 }
1047
1048 /*
1049 * processor_set_base:
1050 *
1051 * Specify per-policy base priority for a processor set. Set processor
1052 * set default policy to the given policy. This affects newly created
1053 * and assigned threads. Optionally change existing ones.
1054 */
1055 kern_return_t
1056 processor_set_base(
1057 __unused processor_set_t pset,
1058 __unused policy_t policy,
1059 __unused policy_base_t base,
1060 __unused boolean_t change)
1061 {
1062 return (KERN_INVALID_ARGUMENT);
1063 }
1064
1065 /*
1066 * processor_set_limit:
1067 *
1068 * Specify per-policy limits for a processor set. This affects
1069 * newly created and assigned threads. Optionally change existing
1070 * ones.
1071 */
1072 kern_return_t
1073 processor_set_limit(
1074 __unused processor_set_t pset,
1075 __unused policy_t policy,
1076 __unused policy_limit_t limit,
1077 __unused boolean_t change)
1078 {
1079 return (KERN_POLICY_LIMIT);
1080 }
1081
1082 /*
1083 * processor_set_policy_control
1084 *
1085 * Controls the scheduling attributes governing the processor set.
1086 * Allows control of enabled policies, and per-policy base and limit
1087 * priorities.
1088 */
1089 kern_return_t
1090 processor_set_policy_control(
1091 __unused processor_set_t pset,
1092 __unused int flavor,
1093 __unused processor_set_info_t policy_info,
1094 __unused mach_msg_type_number_t count,
1095 __unused boolean_t change)
1096 {
1097 return (KERN_INVALID_ARGUMENT);
1098 }