]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * processor.c: processor and processor_set manipulation routines.
55 */
56
57 #include <cpus.h>
58
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
75
76 /*
77 * Exported interface
78 */
79 #include <mach/mach_host_server.h>
80
81 /*
82 * Exported variables.
83 */
84 struct processor_set default_pset;
85 struct processor processor_array[NCPUS];
86
87 int master_cpu = 0;
88
89 processor_t master_processor;
90 processor_t processor_ptr[NCPUS];
91
92 /* Forwards */
93 void pset_init(
94 processor_set_t pset);
95
96 void processor_init(
97 register processor_t pr,
98 int slot_num);
99
100 void pset_quanta_setup(
101 processor_set_t pset);
102
103 kern_return_t processor_set_base(
104 processor_set_t pset,
105 policy_t policy,
106 policy_base_t base,
107 boolean_t change);
108
109 kern_return_t processor_set_limit(
110 processor_set_t pset,
111 policy_t policy,
112 policy_limit_t limit,
113 boolean_t change);
114
115 kern_return_t processor_set_things(
116 processor_set_t pset,
117 mach_port_t **thing_list,
118 mach_msg_type_number_t *count,
119 int type);
120
121
122 /*
123 * Bootstrap the processor/pset system so the scheduler can run.
124 */
125 void
126 pset_sys_bootstrap(void)
127 {
128 register int i;
129
130 pset_init(&default_pset);
131
132 for (i = 0; i < NCPUS; i++) {
133 /*
134 * Initialize processor data structures.
135 * Note that cpu_to_processor(i) is processor_ptr[i].
136 */
137 processor_ptr[i] = &processor_array[i];
138 processor_init(processor_ptr[i], i);
139 }
140
141 master_processor = cpu_to_processor(master_cpu);
142
143 default_pset.active = TRUE;
144 }
145
146 /*
147 * Initialize the given processor_set structure.
148 */
149
150 void pset_init(
151 register processor_set_t pset)
152 {
153 register int i;
154
155 /* setup run queue */
156 pset->runq.highq = IDLEPRI;
157 for (i = 0; i < NRQBM; i++)
158 pset->runq.bitmap[i] = 0;
159 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
160 pset->runq.urgency = pset->runq.count = 0;
161 for (i = 0; i < NRQS; i++)
162 queue_init(&pset->runq.queues[i]);
163
164 queue_init(&pset->idle_queue);
165 pset->idle_count = 0;
166 queue_init(&pset->active_queue);
167 simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE);
168 pset->run_count = pset->share_count = 0;
169 pset->mach_factor = pset->load_average = 0;
170 pset->sched_load = 0;
171 queue_init(&pset->processors);
172 pset->processor_count = 0;
173 queue_init(&pset->tasks);
174 pset->task_count = 0;
175 queue_init(&pset->threads);
176 pset->thread_count = 0;
177 pset->ref_count = 1;
178 pset->active = FALSE;
179 mutex_init(&pset->lock, ETAP_THREAD_PSET);
180 pset->pset_self = IP_NULL;
181 pset->pset_name_self = IP_NULL;
182 pset->timeshare_quanta = 1;
183
184 for (i = 0; i <= NCPUS; i++)
185 pset->quantum_factors[i] = 1;
186 }
187
188 /*
189 * Initialize the given processor structure for the processor in
190 * the slot specified by slot_num.
191 */
192 void
193 processor_init(
194 register processor_t p,
195 int slot_num)
196 {
197 register int i;
198
199 /* setup run queue */
200 p->runq.highq = IDLEPRI;
201 for (i = 0; i < NRQBM; i++)
202 p->runq.bitmap[i] = 0;
203 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
204 p->runq.urgency = p->runq.count = 0;
205 for (i = 0; i < NRQS; i++)
206 queue_init(&p->runq.queues[i]);
207
208 p->state = PROCESSOR_OFF_LINE;
209 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
210 p->processor_set = PROCESSOR_SET_NULL;
211 p->current_pri = MINPRI;
212 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
213 p->timeslice = 0;
214 p->deadline = UINT64_MAX;
215 simple_lock_init(&p->lock, ETAP_THREAD_PROC);
216 p->processor_self = IP_NULL;
217 p->slot_num = slot_num;
218 }
219
220 /*
221 * pset_deallocate:
222 *
223 * Remove one reference to the processor set. Destroy processor_set
224 * if this was the last reference.
225 */
226 void
227 pset_deallocate(
228 processor_set_t pset)
229 {
230 if (pset == PROCESSOR_SET_NULL)
231 return;
232
233 assert(pset == &default_pset);
234 return;
235 }
236
237 /*
238 * pset_reference:
239 *
240 * Add one reference to the processor set.
241 */
242 void
243 pset_reference(
244 processor_set_t pset)
245 {
246 assert(pset == &default_pset);
247 }
248
249 #define pset_reference_locked(pset) assert(pset == &default_pset)
250
251 /*
252 * pset_remove_processor() removes a processor from a processor_set.
253 * It can only be called on the current processor. Caller must
254 * hold lock on current processor and processor set.
255 */
256 void
257 pset_remove_processor(
258 processor_set_t pset,
259 processor_t processor)
260 {
261 if (pset != processor->processor_set)
262 panic("pset_remove_processor: wrong pset");
263
264 queue_remove(&pset->processors, processor, processor_t, processors);
265 processor->processor_set = PROCESSOR_SET_NULL;
266 pset->processor_count--;
267 pset_quanta_setup(pset);
268 }
269
270 /*
271 * pset_add_processor() adds a processor to a processor_set.
272 * It can only be called on the current processor. Caller must
273 * hold lock on curent processor and on pset. No reference counting on
274 * processors. Processor reference to pset is implicit.
275 */
276 void
277 pset_add_processor(
278 processor_set_t pset,
279 processor_t processor)
280 {
281 queue_enter(&pset->processors, processor, processor_t, processors);
282 processor->processor_set = pset;
283 pset->processor_count++;
284 pset_quanta_setup(pset);
285 }
286
287 /*
288 * pset_remove_task() removes a task from a processor_set.
289 * Caller must hold locks on pset and task (unless task has
290 * no references left, in which case just the pset lock is
291 * needed). Pset reference count is not decremented;
292 * caller must explicitly pset_deallocate.
293 */
294 void
295 pset_remove_task(
296 processor_set_t pset,
297 task_t task)
298 {
299 if (pset != task->processor_set)
300 return;
301
302 queue_remove(&pset->tasks, task, task_t, pset_tasks);
303 task->processor_set = PROCESSOR_SET_NULL;
304 pset->task_count--;
305 }
306
307 /*
308 * pset_add_task() adds a task to a processor_set.
309 * Caller must hold locks on pset and task. Pset references to
310 * tasks are implicit.
311 */
312 void
313 pset_add_task(
314 processor_set_t pset,
315 task_t task)
316 {
317 queue_enter(&pset->tasks, task, task_t, pset_tasks);
318 task->processor_set = pset;
319 pset->task_count++;
320 pset_reference_locked(pset);
321 }
322
323 /*
324 * pset_remove_thread() removes a thread from a processor_set.
325 * Caller must hold locks on pset and thread (but only if thread
326 * has outstanding references that could be used to lookup the pset).
327 * The pset reference count is not decremented; caller must explicitly
328 * pset_deallocate.
329 */
330 void
331 pset_remove_thread(
332 processor_set_t pset,
333 thread_t thread)
334 {
335 queue_remove(&pset->threads, thread, thread_t, pset_threads);
336 thread->processor_set = PROCESSOR_SET_NULL;
337 pset->thread_count--;
338 }
339
340 /*
341 * pset_add_thread() adds a thread to a processor_set.
342 * Caller must hold locks on pset and thread. Pset references to
343 * threads are implicit.
344 */
345 void
346 pset_add_thread(
347 processor_set_t pset,
348 thread_t thread)
349 {
350 queue_enter(&pset->threads, thread, thread_t, pset_threads);
351 thread->processor_set = pset;
352 pset->thread_count++;
353 pset_reference_locked(pset);
354 }
355
356 /*
357 * thread_change_psets() changes the pset of a thread. Caller must
358 * hold locks on both psets and thread. The old pset must be
359 * explicitly pset_deallocat()'ed by caller.
360 */
361 void
362 thread_change_psets(
363 thread_t thread,
364 processor_set_t old_pset,
365 processor_set_t new_pset)
366 {
367 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
368 old_pset->thread_count--;
369 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
370 thread->processor_set = new_pset;
371 new_pset->thread_count++;
372 pset_reference_locked(new_pset);
373 }
374
375
376 kern_return_t
377 processor_info_count(
378 processor_flavor_t flavor,
379 mach_msg_type_number_t *count)
380 {
381 kern_return_t kr;
382
383 switch (flavor) {
384 case PROCESSOR_BASIC_INFO:
385 *count = PROCESSOR_BASIC_INFO_COUNT;
386 return KERN_SUCCESS;
387 case PROCESSOR_CPU_LOAD_INFO:
388 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
389 return KERN_SUCCESS;
390 default:
391 kr = cpu_info_count(flavor, count);
392 return kr;
393 }
394 }
395
396
397 kern_return_t
398 processor_info(
399 register processor_t processor,
400 processor_flavor_t flavor,
401 host_t *host,
402 processor_info_t info,
403 mach_msg_type_number_t *count)
404 {
405 register int i, slot_num, state;
406 register processor_basic_info_t basic_info;
407 register processor_cpu_load_info_t cpu_load_info;
408 kern_return_t kr;
409
410 if (processor == PROCESSOR_NULL)
411 return(KERN_INVALID_ARGUMENT);
412
413 slot_num = processor->slot_num;
414
415 switch (flavor) {
416
417 case PROCESSOR_BASIC_INFO:
418 {
419 if (*count < PROCESSOR_BASIC_INFO_COUNT)
420 return(KERN_FAILURE);
421
422 basic_info = (processor_basic_info_t) info;
423 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
424 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
425 state = processor->state;
426 if (state == PROCESSOR_OFF_LINE)
427 basic_info->running = FALSE;
428 else
429 basic_info->running = TRUE;
430 basic_info->slot_num = slot_num;
431 if (processor == master_processor)
432 basic_info->is_master = TRUE;
433 else
434 basic_info->is_master = FALSE;
435
436 *count = PROCESSOR_BASIC_INFO_COUNT;
437 *host = &realhost;
438 return(KERN_SUCCESS);
439 }
440 case PROCESSOR_CPU_LOAD_INFO:
441 {
442 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
443 return(KERN_FAILURE);
444
445 cpu_load_info = (processor_cpu_load_info_t) info;
446 for (i=0;i<CPU_STATE_MAX;i++)
447 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
448
449 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
450 *host = &realhost;
451 return(KERN_SUCCESS);
452 }
453 default:
454 {
455 kr=cpu_info(flavor, slot_num, info, count);
456 if (kr == KERN_SUCCESS)
457 *host = &realhost;
458 return(kr);
459 }
460 }
461 }
462
463 kern_return_t
464 processor_start(
465 processor_t processor)
466 {
467 kern_return_t result;
468 spl_t s;
469
470 if (processor == PROCESSOR_NULL)
471 return(KERN_INVALID_ARGUMENT);
472
473 if (processor == master_processor) {
474 processor_t prev;
475
476 prev = thread_bind(current_thread(), processor);
477 thread_block(THREAD_CONTINUE_NULL);
478
479 result = cpu_start(processor->slot_num);
480
481 thread_bind(current_thread(), prev);
482
483 return (result);
484 }
485
486 s = splsched();
487 processor_lock(processor);
488 if (processor->state != PROCESSOR_OFF_LINE) {
489 processor_unlock(processor);
490 splx(s);
491
492 return (KERN_FAILURE);
493 }
494
495 processor->state = PROCESSOR_START;
496 processor_unlock(processor);
497 splx(s);
498
499 if (processor->next_thread == THREAD_NULL) {
500 thread_t thread;
501 extern void start_cpu_thread(void);
502
503 thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL);
504
505 s = splsched();
506 thread_lock(thread);
507 thread->bound_processor = processor;
508 processor->next_thread = thread;
509 thread->state = TH_RUN;
510 pset_run_incr(thread->processor_set);
511 thread_unlock(thread);
512 splx(s);
513 }
514
515 if (processor->processor_self == IP_NULL)
516 ipc_processor_init(processor);
517
518 result = cpu_start(processor->slot_num);
519 if (result != KERN_SUCCESS) {
520 s = splsched();
521 processor_lock(processor);
522 processor->state = PROCESSOR_OFF_LINE;
523 processor_unlock(processor);
524 splx(s);
525
526 return (result);
527 }
528
529 ipc_processor_enable(processor);
530
531 return (KERN_SUCCESS);
532 }
533
534 kern_return_t
535 processor_exit(
536 processor_t processor)
537 {
538 if (processor == PROCESSOR_NULL)
539 return(KERN_INVALID_ARGUMENT);
540
541 return(processor_shutdown(processor));
542 }
543
544 kern_return_t
545 processor_control(
546 processor_t processor,
547 processor_info_t info,
548 mach_msg_type_number_t count)
549 {
550 if (processor == PROCESSOR_NULL)
551 return(KERN_INVALID_ARGUMENT);
552
553 return(cpu_control(processor->slot_num, info, count));
554 }
555
556 /*
557 * Precalculate the appropriate timesharing quanta based on load. The
558 * index into quantum_factors[] is the number of threads on the
559 * processor set queue. It is limited to the number of processors in
560 * the set.
561 */
562
563 void
564 pset_quanta_setup(
565 processor_set_t pset)
566 {
567 register int i, count = pset->processor_count;
568
569 for (i = 1; i <= count; i++)
570 pset->quantum_factors[i] = (count + (i / 2)) / i;
571
572 pset->quantum_factors[0] = pset->quantum_factors[1];
573
574 timeshare_quanta_update(pset);
575 }
576
577 kern_return_t
578 processor_set_create(
579 host_t host,
580 processor_set_t *new_set,
581 processor_set_t *new_name)
582 {
583 #ifdef lint
584 host++; new_set++; new_name++;
585 #endif /* lint */
586 return(KERN_FAILURE);
587 }
588
589 kern_return_t
590 processor_set_destroy(
591 processor_set_t pset)
592 {
593 #ifdef lint
594 pset++;
595 #endif /* lint */
596 return(KERN_FAILURE);
597 }
598
599 kern_return_t
600 processor_get_assignment(
601 processor_t processor,
602 processor_set_t *pset)
603 {
604 int state;
605
606 state = processor->state;
607 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
608 return(KERN_FAILURE);
609
610 *pset = processor->processor_set;
611 pset_reference(*pset);
612 return(KERN_SUCCESS);
613 }
614
615 kern_return_t
616 processor_set_info(
617 processor_set_t pset,
618 int flavor,
619 host_t *host,
620 processor_set_info_t info,
621 mach_msg_type_number_t *count)
622 {
623 if (pset == PROCESSOR_SET_NULL)
624 return(KERN_INVALID_ARGUMENT);
625
626 if (flavor == PROCESSOR_SET_BASIC_INFO) {
627 register processor_set_basic_info_t basic_info;
628
629 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
630 return(KERN_FAILURE);
631
632 basic_info = (processor_set_basic_info_t) info;
633 basic_info->processor_count = pset->processor_count;
634 basic_info->default_policy = POLICY_TIMESHARE;
635
636 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
637 *host = &realhost;
638 return(KERN_SUCCESS);
639 }
640 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
641 register policy_timeshare_base_t ts_base;
642
643 if (*count < POLICY_TIMESHARE_BASE_COUNT)
644 return(KERN_FAILURE);
645
646 ts_base = (policy_timeshare_base_t) info;
647 ts_base->base_priority = BASEPRI_DEFAULT;
648
649 *count = POLICY_TIMESHARE_BASE_COUNT;
650 *host = &realhost;
651 return(KERN_SUCCESS);
652 }
653 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
654 register policy_fifo_base_t fifo_base;
655
656 if (*count < POLICY_FIFO_BASE_COUNT)
657 return(KERN_FAILURE);
658
659 fifo_base = (policy_fifo_base_t) info;
660 fifo_base->base_priority = BASEPRI_DEFAULT;
661
662 *count = POLICY_FIFO_BASE_COUNT;
663 *host = &realhost;
664 return(KERN_SUCCESS);
665 }
666 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
667 register policy_rr_base_t rr_base;
668
669 if (*count < POLICY_RR_BASE_COUNT)
670 return(KERN_FAILURE);
671
672 rr_base = (policy_rr_base_t) info;
673 rr_base->base_priority = BASEPRI_DEFAULT;
674 rr_base->quantum = 1;
675
676 *count = POLICY_RR_BASE_COUNT;
677 *host = &realhost;
678 return(KERN_SUCCESS);
679 }
680 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
681 register policy_timeshare_limit_t ts_limit;
682
683 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
684 return(KERN_FAILURE);
685
686 ts_limit = (policy_timeshare_limit_t) info;
687 ts_limit->max_priority = MAXPRI_STANDARD;
688
689 *count = POLICY_TIMESHARE_LIMIT_COUNT;
690 *host = &realhost;
691 return(KERN_SUCCESS);
692 }
693 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
694 register policy_fifo_limit_t fifo_limit;
695
696 if (*count < POLICY_FIFO_LIMIT_COUNT)
697 return(KERN_FAILURE);
698
699 fifo_limit = (policy_fifo_limit_t) info;
700 fifo_limit->max_priority = MAXPRI_STANDARD;
701
702 *count = POLICY_FIFO_LIMIT_COUNT;
703 *host = &realhost;
704 return(KERN_SUCCESS);
705 }
706 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
707 register policy_rr_limit_t rr_limit;
708
709 if (*count < POLICY_RR_LIMIT_COUNT)
710 return(KERN_FAILURE);
711
712 rr_limit = (policy_rr_limit_t) info;
713 rr_limit->max_priority = MAXPRI_STANDARD;
714
715 *count = POLICY_RR_LIMIT_COUNT;
716 *host = &realhost;
717 return(KERN_SUCCESS);
718 }
719 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
720 register int *enabled;
721
722 if (*count < (sizeof(*enabled)/sizeof(int)))
723 return(KERN_FAILURE);
724
725 enabled = (int *) info;
726 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
727
728 *count = sizeof(*enabled)/sizeof(int);
729 *host = &realhost;
730 return(KERN_SUCCESS);
731 }
732
733
734 *host = HOST_NULL;
735 return(KERN_INVALID_ARGUMENT);
736 }
737
738 /*
739 * processor_set_statistics
740 *
741 * Returns scheduling statistics for a processor set.
742 */
743 kern_return_t
744 processor_set_statistics(
745 processor_set_t pset,
746 int flavor,
747 processor_set_info_t info,
748 mach_msg_type_number_t *count)
749 {
750 if (pset == PROCESSOR_SET_NULL)
751 return (KERN_INVALID_PROCESSOR_SET);
752
753 if (flavor == PROCESSOR_SET_LOAD_INFO) {
754 register processor_set_load_info_t load_info;
755
756 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
757 return(KERN_FAILURE);
758
759 load_info = (processor_set_load_info_t) info;
760
761 pset_lock(pset);
762 load_info->task_count = pset->task_count;
763 load_info->thread_count = pset->thread_count;
764 load_info->mach_factor = pset->mach_factor;
765 load_info->load_average = pset->load_average;
766 pset_unlock(pset);
767
768 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
769 return(KERN_SUCCESS);
770 }
771
772 return(KERN_INVALID_ARGUMENT);
773 }
774
775 /*
776 * processor_set_max_priority:
777 *
778 * Specify max priority permitted on processor set. This affects
779 * newly created and assigned threads. Optionally change existing
780 * ones.
781 */
782 kern_return_t
783 processor_set_max_priority(
784 processor_set_t pset,
785 int max_priority,
786 boolean_t change_threads)
787 {
788 return (KERN_INVALID_ARGUMENT);
789 }
790
791 /*
792 * processor_set_policy_enable:
793 *
794 * Allow indicated policy on processor set.
795 */
796
797 kern_return_t
798 processor_set_policy_enable(
799 processor_set_t pset,
800 int policy)
801 {
802 return (KERN_INVALID_ARGUMENT);
803 }
804
805 /*
806 * processor_set_policy_disable:
807 *
808 * Forbid indicated policy on processor set. Time sharing cannot
809 * be forbidden.
810 */
811 kern_return_t
812 processor_set_policy_disable(
813 processor_set_t pset,
814 int policy,
815 boolean_t change_threads)
816 {
817 return (KERN_INVALID_ARGUMENT);
818 }
819
820 #define THING_TASK 0
821 #define THING_THREAD 1
822
823 /*
824 * processor_set_things:
825 *
826 * Common internals for processor_set_{threads,tasks}
827 */
828 kern_return_t
829 processor_set_things(
830 processor_set_t pset,
831 mach_port_t **thing_list,
832 mach_msg_type_number_t *count,
833 int type)
834 {
835 unsigned int actual; /* this many things */
836 int i;
837
838 vm_size_t size, size_needed;
839 vm_offset_t addr;
840
841 if (pset == PROCESSOR_SET_NULL)
842 return KERN_INVALID_ARGUMENT;
843
844 size = 0; addr = 0;
845
846 for (;;) {
847 pset_lock(pset);
848 if (!pset->active) {
849 pset_unlock(pset);
850 return KERN_FAILURE;
851 }
852
853 if (type == THING_TASK)
854 actual = pset->task_count;
855 else
856 actual = pset->thread_count;
857
858 /* do we have the memory we need? */
859
860 size_needed = actual * sizeof(mach_port_t);
861 if (size_needed <= size)
862 break;
863
864 /* unlock the pset and allocate more memory */
865 pset_unlock(pset);
866
867 if (size != 0)
868 kfree(addr, size);
869
870 assert(size_needed > 0);
871 size = size_needed;
872
873 addr = kalloc(size);
874 if (addr == 0)
875 return KERN_RESOURCE_SHORTAGE;
876 }
877
878 /* OK, have memory and the processor_set is locked & active */
879
880 switch (type) {
881 case THING_TASK: {
882 task_t *tasks = (task_t *) addr;
883 task_t task;
884
885 for (i = 0, task = (task_t) queue_first(&pset->tasks);
886 !queue_end(&pset->tasks, (queue_entry_t) task);
887 task = (task_t) queue_next(&task->pset_tasks)) {
888
889 task_lock(task);
890 if (task->ref_count > 0) {
891 /* take ref for convert_task_to_port */
892 task_reference_locked(task);
893 tasks[i++] = task;
894 }
895 task_unlock(task);
896 }
897 break;
898 }
899
900 case THING_THREAD: {
901 thread_act_t *thr_acts = (thread_act_t *) addr;
902 thread_t thread;
903 thread_act_t thr_act;
904
905 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
906 !queue_end(&pset->threads, (queue_entry_t)thread);
907 thread = (thread_t) queue_next(&thread->pset_threads)) {
908
909 thr_act = thread_lock_act(thread);
910 if (thr_act && thr_act->act_ref_count > 0) {
911 /* take ref for convert_act_to_port */
912 act_reference_locked(thr_act);
913 thr_acts[i++] = thr_act;
914 }
915 thread_unlock_act(thread);
916 }
917 break;
918 }
919 }
920
921 /* can unlock processor set now that we have the task/thread refs */
922 pset_unlock(pset);
923
924 if (i < actual) {
925 actual = i;
926 size_needed = actual * sizeof(mach_port_t);
927 }
928 assert(i == actual);
929
930 if (actual == 0) {
931 /* no things, so return null pointer and deallocate memory */
932 *thing_list = 0;
933 *count = 0;
934
935 if (size != 0)
936 kfree(addr, size);
937 } else {
938 /* if we allocated too much, must copy */
939
940 if (size_needed < size) {
941 vm_offset_t newaddr;
942
943 newaddr = kalloc(size_needed);
944 if (newaddr == 0) {
945 switch (type) {
946 case THING_TASK: {
947 task_t *tasks = (task_t *) addr;
948
949 for (i = 0; i < actual; i++)
950 task_deallocate(tasks[i]);
951 break;
952 }
953
954 case THING_THREAD: {
955 thread_act_t *acts = (thread_act_t *) addr;
956
957 for (i = 0; i < actual; i++)
958 act_deallocate(acts[i]);
959 break;
960 }
961 }
962 kfree(addr, size);
963 return KERN_RESOURCE_SHORTAGE;
964 }
965
966 bcopy((char *) addr, (char *) newaddr, size_needed);
967 kfree(addr, size);
968 addr = newaddr;
969 }
970
971 *thing_list = (mach_port_t *) addr;
972 *count = actual;
973
974 /* do the conversion that Mig should handle */
975
976 switch (type) {
977 case THING_TASK: {
978 task_t *tasks = (task_t *) addr;
979
980 for (i = 0; i < actual; i++)
981 (*thing_list)[i] = convert_task_to_port(tasks[i]);
982 break;
983 }
984
985 case THING_THREAD: {
986 thread_act_t *thr_acts = (thread_act_t *) addr;
987
988 for (i = 0; i < actual; i++)
989 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
990 break;
991 }
992 }
993 }
994
995 return(KERN_SUCCESS);
996 }
997
998
999 /*
1000 * processor_set_tasks:
1001 *
1002 * List all tasks in the processor set.
1003 */
1004 kern_return_t
1005 processor_set_tasks(
1006 processor_set_t pset,
1007 task_array_t *task_list,
1008 mach_msg_type_number_t *count)
1009 {
1010 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1011 }
1012
1013 /*
1014 * processor_set_threads:
1015 *
1016 * List all threads in the processor set.
1017 */
1018 kern_return_t
1019 processor_set_threads(
1020 processor_set_t pset,
1021 thread_array_t *thread_list,
1022 mach_msg_type_number_t *count)
1023 {
1024 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1025 }
1026
1027 /*
1028 * processor_set_base:
1029 *
1030 * Specify per-policy base priority for a processor set. Set processor
1031 * set default policy to the given policy. This affects newly created
1032 * and assigned threads. Optionally change existing ones.
1033 */
1034 kern_return_t
1035 processor_set_base(
1036 processor_set_t pset,
1037 policy_t policy,
1038 policy_base_t base,
1039 boolean_t change)
1040 {
1041 return (KERN_INVALID_ARGUMENT);
1042 }
1043
1044 /*
1045 * processor_set_limit:
1046 *
1047 * Specify per-policy limits for a processor set. This affects
1048 * newly created and assigned threads. Optionally change existing
1049 * ones.
1050 */
1051 kern_return_t
1052 processor_set_limit(
1053 processor_set_t pset,
1054 policy_t policy,
1055 policy_limit_t limit,
1056 boolean_t change)
1057 {
1058 return (KERN_POLICY_LIMIT);
1059 }
1060
1061 /*
1062 * processor_set_policy_control
1063 *
1064 * Controls the scheduling attributes governing the processor set.
1065 * Allows control of enabled policies, and per-policy base and limit
1066 * priorities.
1067 */
1068 kern_return_t
1069 processor_set_policy_control(
1070 processor_set_t pset,
1071 int flavor,
1072 processor_set_info_t policy_info,
1073 mach_msg_type_number_t count,
1074 boolean_t change)
1075 {
1076 return (KERN_INVALID_ARGUMENT);
1077 }