]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * processor.c: processor and processor_set manipulation routines.
55 */
56
57 #include <cpus.h>
58
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
75
76 /*
77 * Exported interface
78 */
79 #include <mach/mach_host_server.h>
80
81 /*
82 * Exported variables.
83 */
84 struct processor_set default_pset;
85 struct processor processor_array[NCPUS];
86
87 processor_t master_processor;
88 processor_t processor_ptr[NCPUS];
89
90 /* Forwards */
91 void pset_init(
92 processor_set_t pset);
93
94 void processor_init(
95 register processor_t pr,
96 int slot_num);
97
98 void pset_quanta_set(
99 processor_set_t pset);
100
101 kern_return_t processor_set_base(
102 processor_set_t pset,
103 policy_t policy,
104 policy_base_t base,
105 boolean_t change);
106
107 kern_return_t processor_set_limit(
108 processor_set_t pset,
109 policy_t policy,
110 policy_limit_t limit,
111 boolean_t change);
112
113 kern_return_t processor_set_things(
114 processor_set_t pset,
115 mach_port_t **thing_list,
116 mach_msg_type_number_t *count,
117 int type);
118
119
120 /*
121 * Bootstrap the processor/pset system so the scheduler can run.
122 */
123 void
124 pset_sys_bootstrap(void)
125 {
126 register int i;
127
128 pset_init(&default_pset);
129 for (i = 0; i < NCPUS; i++) {
130 /*
131 * Initialize processor data structures.
132 * Note that cpu_to_processor(i) is processor_ptr[i].
133 */
134 processor_ptr[i] = &processor_array[i];
135 processor_init(processor_ptr[i], i);
136 }
137 master_processor = cpu_to_processor(master_cpu);
138 default_pset.active = TRUE;
139 }
140
141 /*
142 * Initialize the given processor_set structure.
143 */
144
145 void pset_init(
146 register processor_set_t pset)
147 {
148 int i;
149
150 /* setup run-queues */
151 simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ);
152 pset->runq.count = 0;
153 for (i = 0; i < NRQBM; i++) {
154 pset->runq.bitmap[i] = 0;
155 }
156 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
157 pset->runq.highq = IDLEPRI;
158 for (i = 0; i < NRQS; i++) {
159 queue_init(&(pset->runq.queues[i]));
160 }
161
162 queue_init(&pset->idle_queue);
163 pset->idle_count = 0;
164 simple_lock_init(&pset->idle_lock, ETAP_THREAD_PSET_IDLE);
165 pset->mach_factor = pset->load_average = 0;
166 pset->sched_load = 0;
167 queue_init(&pset->processors);
168 pset->processor_count = 0;
169 simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET);
170 queue_init(&pset->tasks);
171 pset->task_count = 0;
172 queue_init(&pset->threads);
173 pset->thread_count = 0;
174 pset->ref_count = 1;
175 pset->active = FALSE;
176 mutex_init(&pset->lock, ETAP_THREAD_PSET);
177 pset->pset_self = IP_NULL;
178 pset->pset_name_self = IP_NULL;
179 pset->set_quanta = 1;
180
181 for (i = 0; i <= NCPUS; i++)
182 pset->machine_quanta[i] = 1;
183 }
184
185 /*
186 * Initialize the given processor structure for the processor in
187 * the slot specified by slot_num.
188 */
189 void
190 processor_init(
191 register processor_t pr,
192 int slot_num)
193 {
194 int i;
195
196 /* setup run-queues */
197 simple_lock_init(&pr->runq.lock, ETAP_THREAD_PROC_RUNQ);
198 pr->runq.count = 0;
199 for (i = 0; i < NRQBM; i++) {
200 pr->runq.bitmap[i] = 0;
201 }
202 setbit(MAXPRI - IDLEPRI, pr->runq.bitmap);
203 pr->runq.highq = IDLEPRI;
204 for (i = 0; i < NRQS; i++) {
205 queue_init(&(pr->runq.queues[i]));
206 }
207
208 queue_init(&pr->processor_queue);
209 pr->state = PROCESSOR_OFF_LINE;
210 pr->next_thread = THREAD_NULL;
211 pr->idle_thread = THREAD_NULL;
212 timer_call_setup(&pr->quantum_timer, thread_quantum_expire, pr);
213 pr->slice_quanta = 0;
214 pr->processor_set = PROCESSOR_SET_NULL;
215 pr->processor_set_next = PROCESSOR_SET_NULL;
216 queue_init(&pr->processors);
217 simple_lock_init(&pr->lock, ETAP_THREAD_PROC);
218 pr->processor_self = IP_NULL;
219 pr->slot_num = slot_num;
220 }
221
222 /*
223 * pset_remove_processor() removes a processor from a processor_set.
224 * It can only be called on the current processor. Caller must
225 * hold lock on current processor and processor set.
226 */
227 void
228 pset_remove_processor(
229 processor_set_t pset,
230 processor_t processor)
231 {
232 if (pset != processor->processor_set)
233 panic("pset_remove_processor: wrong pset");
234
235 queue_remove(&pset->processors, processor, processor_t, processors);
236 processor->processor_set = PROCESSOR_SET_NULL;
237 pset->processor_count--;
238 pset_quanta_set(pset);
239 }
240
241 /*
242 * pset_add_processor() adds a processor to a processor_set.
243 * It can only be called on the current processor. Caller must
244 * hold lock on curent processor and on pset. No reference counting on
245 * processors. Processor reference to pset is implicit.
246 */
247 void
248 pset_add_processor(
249 processor_set_t pset,
250 processor_t processor)
251 {
252 queue_enter(&pset->processors, processor, processor_t, processors);
253 processor->processor_set = pset;
254 pset->processor_count++;
255 pset_quanta_set(pset);
256 }
257
258 /*
259 * pset_remove_task() removes a task from a processor_set.
260 * Caller must hold locks on pset and task. Pset reference count
261 * is not decremented; caller must explicitly pset_deallocate.
262 */
263 void
264 pset_remove_task(
265 processor_set_t pset,
266 task_t task)
267 {
268 if (pset != task->processor_set)
269 return;
270
271 queue_remove(&pset->tasks, task, task_t, pset_tasks);
272 task->processor_set = PROCESSOR_SET_NULL;
273 pset->task_count--;
274 }
275
276 /*
277 * pset_add_task() adds a task to a processor_set.
278 * Caller must hold locks on pset and task. Pset references to
279 * tasks are implicit.
280 */
281 void
282 pset_add_task(
283 processor_set_t pset,
284 task_t task)
285 {
286 queue_enter(&pset->tasks, task, task_t, pset_tasks);
287 task->processor_set = pset;
288 pset->task_count++;
289 pset->ref_count++;
290 }
291
292 /*
293 * pset_remove_thread() removes a thread from a processor_set.
294 * Caller must hold locks on pset and thread. Pset reference count
295 * is not decremented; caller must explicitly pset_deallocate.
296 */
297 void
298 pset_remove_thread(
299 processor_set_t pset,
300 thread_t thread)
301 {
302 queue_remove(&pset->threads, thread, thread_t, pset_threads);
303 thread->processor_set = PROCESSOR_SET_NULL;
304 pset->thread_count--;
305 }
306
307 /*
308 * pset_add_thread() adds a thread to a processor_set.
309 * Caller must hold locks on pset and thread. Pset references to
310 * threads are implicit.
311 */
312 void
313 pset_add_thread(
314 processor_set_t pset,
315 thread_t thread)
316 {
317 queue_enter(&pset->threads, thread, thread_t, pset_threads);
318 thread->processor_set = pset;
319 pset->thread_count++;
320 pset->ref_count++;
321 }
322
323 /*
324 * thread_change_psets() changes the pset of a thread. Caller must
325 * hold locks on both psets and thread. The old pset must be
326 * explicitly pset_deallocat()'ed by caller.
327 */
328 void
329 thread_change_psets(
330 thread_t thread,
331 processor_set_t old_pset,
332 processor_set_t new_pset)
333 {
334 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
335 old_pset->thread_count--;
336 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
337 thread->processor_set = new_pset;
338 new_pset->thread_count++;
339 new_pset->ref_count++;
340 }
341
342 /*
343 * pset_deallocate:
344 *
345 * Remove one reference to the processor set. Destroy processor_set
346 * if this was the last reference.
347 */
348 void
349 pset_deallocate(
350 processor_set_t pset)
351 {
352 if (pset == PROCESSOR_SET_NULL)
353 return;
354
355 pset_lock(pset);
356 if (--pset->ref_count > 0) {
357 pset_unlock(pset);
358 return;
359 }
360
361 panic("pset_deallocate: default_pset destroyed");
362 }
363
364 /*
365 * pset_reference:
366 *
367 * Add one reference to the processor set.
368 */
369 void
370 pset_reference(
371 processor_set_t pset)
372 {
373 pset_lock(pset);
374 pset->ref_count++;
375 pset_unlock(pset);
376 }
377
378
379 kern_return_t
380 processor_info_count(
381 processor_flavor_t flavor,
382 mach_msg_type_number_t *count)
383 {
384 kern_return_t kr;
385
386 switch (flavor) {
387 case PROCESSOR_BASIC_INFO:
388 *count = PROCESSOR_BASIC_INFO_COUNT;
389 return KERN_SUCCESS;
390 case PROCESSOR_CPU_LOAD_INFO:
391 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
392 return KERN_SUCCESS;
393 default:
394 kr = cpu_info_count(flavor, count);
395 return kr;
396 }
397 }
398
399
400 kern_return_t
401 processor_info(
402 register processor_t processor,
403 processor_flavor_t flavor,
404 host_t *host,
405 processor_info_t info,
406 mach_msg_type_number_t *count)
407 {
408 register int i, slot_num, state;
409 register processor_basic_info_t basic_info;
410 register processor_cpu_load_info_t cpu_load_info;
411 kern_return_t kr;
412
413 if (processor == PROCESSOR_NULL)
414 return(KERN_INVALID_ARGUMENT);
415
416 slot_num = processor->slot_num;
417
418 switch (flavor) {
419
420 case PROCESSOR_BASIC_INFO:
421 {
422 if (*count < PROCESSOR_BASIC_INFO_COUNT)
423 return(KERN_FAILURE);
424
425 basic_info = (processor_basic_info_t) info;
426 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
427 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
428 state = processor->state;
429 if (state == PROCESSOR_OFF_LINE)
430 basic_info->running = FALSE;
431 else
432 basic_info->running = TRUE;
433 basic_info->slot_num = slot_num;
434 if (processor == master_processor)
435 basic_info->is_master = TRUE;
436 else
437 basic_info->is_master = FALSE;
438
439 *count = PROCESSOR_BASIC_INFO_COUNT;
440 *host = &realhost;
441 return(KERN_SUCCESS);
442 }
443 case PROCESSOR_CPU_LOAD_INFO:
444 {
445 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
446 return(KERN_FAILURE);
447
448 cpu_load_info = (processor_cpu_load_info_t) info;
449 for (i=0;i<CPU_STATE_MAX;i++)
450 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
451
452 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
453 *host = &realhost;
454 return(KERN_SUCCESS);
455 }
456 default:
457 {
458 kr=cpu_info(flavor, slot_num, info, count);
459 if (kr == KERN_SUCCESS)
460 *host = &realhost;
461 return(kr);
462 }
463 }
464 }
465
466 kern_return_t
467 processor_start(
468 processor_t processor)
469 {
470 int state;
471 spl_t s;
472 kern_return_t kr;
473
474 if (processor == PROCESSOR_NULL)
475 return(KERN_INVALID_ARGUMENT);
476
477 if (processor == master_processor) {
478 thread_bind(current_thread(), processor);
479 thread_block((void (*)(void)) 0);
480 kr = cpu_start(processor->slot_num);
481 thread_bind(current_thread(), PROCESSOR_NULL);
482
483 return(kr);
484 }
485
486 s = splsched();
487 processor_lock(processor);
488
489 state = processor->state;
490 if (state != PROCESSOR_OFF_LINE) {
491 processor_unlock(processor);
492 splx(s);
493 return(KERN_FAILURE);
494 }
495 processor->state = PROCESSOR_START;
496 processor_unlock(processor);
497 splx(s);
498
499 if (processor->next_thread == THREAD_NULL) {
500 thread_t thread;
501 extern void start_cpu_thread(void);
502
503 thread = kernel_thread_with_priority(
504 kernel_task, MAXPRI_KERNEL,
505 start_cpu_thread, TRUE, FALSE);
506
507 s = splsched();
508 thread_lock(thread);
509 thread_bind_locked(thread, processor);
510 thread_go_locked(thread, THREAD_AWAKENED);
511 (void)rem_runq(thread);
512 processor->next_thread = thread;
513 thread_unlock(thread);
514 splx(s);
515 }
516
517 kr = cpu_start(processor->slot_num);
518
519 if (kr != KERN_SUCCESS) {
520 s = splsched();
521 processor_lock(processor);
522 processor->state = PROCESSOR_OFF_LINE;
523 processor_unlock(processor);
524 splx(s);
525 }
526
527 return(kr);
528 }
529
530 kern_return_t
531 processor_exit(
532 processor_t processor)
533 {
534 if (processor == PROCESSOR_NULL)
535 return(KERN_INVALID_ARGUMENT);
536
537 return(processor_shutdown(processor));
538 }
539
540 kern_return_t
541 processor_control(
542 processor_t processor,
543 processor_info_t info,
544 mach_msg_type_number_t count)
545 {
546 if (processor == PROCESSOR_NULL)
547 return(KERN_INVALID_ARGUMENT);
548
549 return(cpu_control(processor->slot_num, info, count));
550 }
551
552 /*
553 * Precalculate the appropriate timesharing quanta based on load. The
554 * index into machine_quanta is the number of threads on the
555 * processor set queue. It is limited to the number of processors in
556 * the set.
557 */
558
559 void
560 pset_quanta_set(
561 processor_set_t pset)
562 {
563 register int i, ncpus;
564
565 ncpus = pset->processor_count;
566
567 for (i=1; i <= ncpus; i++)
568 pset->machine_quanta[i] = (ncpus + (i / 2)) / i;
569
570 pset->machine_quanta[0] = pset->machine_quanta[1];
571
572 i = (pset->runq.count > ncpus)? ncpus: pset->runq.count;
573 pset->set_quanta = pset->machine_quanta[i];
574 }
575
576 kern_return_t
577 processor_set_create(
578 host_t host,
579 processor_set_t *new_set,
580 processor_set_t *new_name)
581 {
582 #ifdef lint
583 host++; new_set++; new_name++;
584 #endif /* lint */
585 return(KERN_FAILURE);
586 }
587
588 kern_return_t
589 processor_set_destroy(
590 processor_set_t pset)
591 {
592 #ifdef lint
593 pset++;
594 #endif /* lint */
595 return(KERN_FAILURE);
596 }
597
598 kern_return_t
599 processor_get_assignment(
600 processor_t processor,
601 processor_set_t *pset)
602 {
603 int state;
604
605 state = processor->state;
606 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
607 return(KERN_FAILURE);
608
609 *pset = processor->processor_set;
610 pset_reference(*pset);
611 return(KERN_SUCCESS);
612 }
613
614 kern_return_t
615 processor_set_info(
616 processor_set_t pset,
617 int flavor,
618 host_t *host,
619 processor_set_info_t info,
620 mach_msg_type_number_t *count)
621 {
622 if (pset == PROCESSOR_SET_NULL)
623 return(KERN_INVALID_ARGUMENT);
624
625 if (flavor == PROCESSOR_SET_BASIC_INFO) {
626 register processor_set_basic_info_t basic_info;
627
628 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
629 return(KERN_FAILURE);
630
631 basic_info = (processor_set_basic_info_t) info;
632 basic_info->processor_count = pset->processor_count;
633 basic_info->default_policy = POLICY_TIMESHARE;
634
635 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
636 *host = &realhost;
637 return(KERN_SUCCESS);
638 }
639 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
640 register policy_timeshare_base_t ts_base;
641
642 if (*count < POLICY_TIMESHARE_BASE_COUNT)
643 return(KERN_FAILURE);
644
645 ts_base = (policy_timeshare_base_t) info;
646 ts_base->base_priority = BASEPRI_DEFAULT;
647
648 *count = POLICY_TIMESHARE_BASE_COUNT;
649 *host = &realhost;
650 return(KERN_SUCCESS);
651 }
652 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
653 register policy_fifo_base_t fifo_base;
654
655 if (*count < POLICY_FIFO_BASE_COUNT)
656 return(KERN_FAILURE);
657
658 fifo_base = (policy_fifo_base_t) info;
659 fifo_base->base_priority = BASEPRI_DEFAULT;
660
661 *count = POLICY_FIFO_BASE_COUNT;
662 *host = &realhost;
663 return(KERN_SUCCESS);
664 }
665 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
666 register policy_rr_base_t rr_base;
667
668 if (*count < POLICY_RR_BASE_COUNT)
669 return(KERN_FAILURE);
670
671 rr_base = (policy_rr_base_t) info;
672 rr_base->base_priority = BASEPRI_DEFAULT;
673 rr_base->quantum = 1;
674
675 *count = POLICY_RR_BASE_COUNT;
676 *host = &realhost;
677 return(KERN_SUCCESS);
678 }
679 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
680 register policy_timeshare_limit_t ts_limit;
681
682 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
683 return(KERN_FAILURE);
684
685 ts_limit = (policy_timeshare_limit_t) info;
686 ts_limit->max_priority = MAXPRI_STANDARD;
687
688 *count = POLICY_TIMESHARE_LIMIT_COUNT;
689 *host = &realhost;
690 return(KERN_SUCCESS);
691 }
692 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
693 register policy_fifo_limit_t fifo_limit;
694
695 if (*count < POLICY_FIFO_LIMIT_COUNT)
696 return(KERN_FAILURE);
697
698 fifo_limit = (policy_fifo_limit_t) info;
699 fifo_limit->max_priority = MAXPRI_STANDARD;
700
701 *count = POLICY_FIFO_LIMIT_COUNT;
702 *host = &realhost;
703 return(KERN_SUCCESS);
704 }
705 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
706 register policy_rr_limit_t rr_limit;
707
708 if (*count < POLICY_RR_LIMIT_COUNT)
709 return(KERN_FAILURE);
710
711 rr_limit = (policy_rr_limit_t) info;
712 rr_limit->max_priority = MAXPRI_STANDARD;
713
714 *count = POLICY_RR_LIMIT_COUNT;
715 *host = &realhost;
716 return(KERN_SUCCESS);
717 }
718 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
719 register int *enabled;
720
721 if (*count < (sizeof(*enabled)/sizeof(int)))
722 return(KERN_FAILURE);
723
724 enabled = (int *) info;
725 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
726
727 *count = sizeof(*enabled)/sizeof(int);
728 *host = &realhost;
729 return(KERN_SUCCESS);
730 }
731
732
733 *host = HOST_NULL;
734 return(KERN_INVALID_ARGUMENT);
735 }
736
737 /*
738 * processor_set_statistics
739 *
740 * Returns scheduling statistics for a processor set.
741 */
742 kern_return_t
743 processor_set_statistics(
744 processor_set_t pset,
745 int flavor,
746 processor_set_info_t info,
747 mach_msg_type_number_t *count)
748 {
749 if (pset == PROCESSOR_SET_NULL)
750 return (KERN_INVALID_PROCESSOR_SET);
751
752 if (flavor == PROCESSOR_SET_LOAD_INFO) {
753 register processor_set_load_info_t load_info;
754
755 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
756 return(KERN_FAILURE);
757
758 load_info = (processor_set_load_info_t) info;
759
760 pset_lock(pset);
761 load_info->task_count = pset->task_count;
762 load_info->thread_count = pset->thread_count;
763 simple_lock(&pset->processors_lock);
764 load_info->mach_factor = pset->mach_factor;
765 load_info->load_average = pset->load_average;
766 simple_unlock(&pset->processors_lock);
767 pset_unlock(pset);
768
769 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
770 return(KERN_SUCCESS);
771 }
772
773 return(KERN_INVALID_ARGUMENT);
774 }
775
776 /*
777 * processor_set_max_priority:
778 *
779 * Specify max priority permitted on processor set. This affects
780 * newly created and assigned threads. Optionally change existing
781 * ones.
782 */
783 kern_return_t
784 processor_set_max_priority(
785 processor_set_t pset,
786 int max_priority,
787 boolean_t change_threads)
788 {
789 return (KERN_INVALID_ARGUMENT);
790 }
791
792 /*
793 * processor_set_policy_enable:
794 *
795 * Allow indicated policy on processor set.
796 */
797
798 kern_return_t
799 processor_set_policy_enable(
800 processor_set_t pset,
801 int policy)
802 {
803 return (KERN_INVALID_ARGUMENT);
804 }
805
806 /*
807 * processor_set_policy_disable:
808 *
809 * Forbid indicated policy on processor set. Time sharing cannot
810 * be forbidden.
811 */
812 kern_return_t
813 processor_set_policy_disable(
814 processor_set_t pset,
815 int policy,
816 boolean_t change_threads)
817 {
818 return (KERN_INVALID_ARGUMENT);
819 }
820
821 #define THING_TASK 0
822 #define THING_THREAD 1
823
824 /*
825 * processor_set_things:
826 *
827 * Common internals for processor_set_{threads,tasks}
828 */
829 kern_return_t
830 processor_set_things(
831 processor_set_t pset,
832 mach_port_t **thing_list,
833 mach_msg_type_number_t *count,
834 int type)
835 {
836 unsigned int actual; /* this many things */
837 int i;
838
839 vm_size_t size, size_needed;
840 vm_offset_t addr;
841
842 if (pset == PROCESSOR_SET_NULL)
843 return KERN_INVALID_ARGUMENT;
844
845 size = 0; addr = 0;
846
847 for (;;) {
848 pset_lock(pset);
849 if (!pset->active) {
850 pset_unlock(pset);
851 return KERN_FAILURE;
852 }
853
854 if (type == THING_TASK)
855 actual = pset->task_count;
856 else
857 actual = pset->thread_count;
858
859 /* do we have the memory we need? */
860
861 size_needed = actual * sizeof(mach_port_t);
862 if (size_needed <= size)
863 break;
864
865 /* unlock the pset and allocate more memory */
866 pset_unlock(pset);
867
868 if (size != 0)
869 kfree(addr, size);
870
871 assert(size_needed > 0);
872 size = size_needed;
873
874 addr = kalloc(size);
875 if (addr == 0)
876 return KERN_RESOURCE_SHORTAGE;
877 }
878
879 /* OK, have memory and the processor_set is locked & active */
880
881 switch (type) {
882 case THING_TASK: {
883 task_t *tasks = (task_t *) addr;
884 task_t task;
885
886 for (i = 0, task = (task_t) queue_first(&pset->tasks);
887 i < actual;
888 i++, task = (task_t) queue_next(&task->pset_tasks)) {
889 /* take ref for convert_task_to_port */
890 task_reference(task);
891 tasks[i] = task;
892 }
893 assert(queue_end(&pset->tasks, (queue_entry_t) task));
894 break;
895 }
896
897 case THING_THREAD: {
898 thread_act_t *thr_acts = (thread_act_t *) addr;
899 thread_t thread;
900 thread_act_t thr_act;
901 queue_head_t *list;
902
903 list = &pset->threads;
904 thread = (thread_t) queue_first(list);
905 i = 0;
906 while (i < actual && !queue_end(list, (queue_entry_t)thread)) {
907 thr_act = thread_lock_act(thread);
908 if (thr_act && thr_act->ref_count > 0) {
909 /* take ref for convert_act_to_port */
910 act_locked_act_reference(thr_act);
911 thr_acts[i] = thr_act;
912 i++;
913 }
914 thread_unlock_act(thread);
915 thread = (thread_t) queue_next(&thread->pset_threads);
916 }
917 if (i < actual) {
918 actual = i;
919 size_needed = actual * sizeof(mach_port_t);
920 }
921 break;
922 }
923 }
924
925 /* can unlock processor set now that we have the task/thread refs */
926 pset_unlock(pset);
927
928 if (actual == 0) {
929 /* no things, so return null pointer and deallocate memory */
930 *thing_list = 0;
931 *count = 0;
932
933 if (size != 0)
934 kfree(addr, size);
935 } else {
936 /* if we allocated too much, must copy */
937
938 if (size_needed < size) {
939 vm_offset_t newaddr;
940
941 newaddr = kalloc(size_needed);
942 if (newaddr == 0) {
943 switch (type) {
944 case THING_TASK: {
945 task_t *tasks = (task_t *) addr;
946
947 for (i = 0; i < actual; i++)
948 task_deallocate(tasks[i]);
949 break;
950 }
951
952 case THING_THREAD: {
953 thread_t *threads = (thread_t *) addr;
954
955 for (i = 0; i < actual; i++)
956 thread_deallocate(threads[i]);
957 break;
958 }
959 }
960 kfree(addr, size);
961 return KERN_RESOURCE_SHORTAGE;
962 }
963
964 bcopy((char *) addr, (char *) newaddr, size_needed);
965 kfree(addr, size);
966 addr = newaddr;
967 }
968
969 *thing_list = (mach_port_t *) addr;
970 *count = actual;
971
972 /* do the conversion that Mig should handle */
973
974 switch (type) {
975 case THING_TASK: {
976 task_t *tasks = (task_t *) addr;
977
978 for (i = 0; i < actual; i++)
979 (*thing_list)[i] = convert_task_to_port(tasks[i]);
980 break;
981 }
982
983 case THING_THREAD: {
984 thread_act_t *thr_acts = (thread_act_t *) addr;
985
986 for (i = 0; i < actual; i++)
987 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
988 break;
989 }
990 }
991 }
992
993 return(KERN_SUCCESS);
994 }
995
996
997 /*
998 * processor_set_tasks:
999 *
1000 * List all tasks in the processor set.
1001 */
1002 kern_return_t
1003 processor_set_tasks(
1004 processor_set_t pset,
1005 task_array_t *task_list,
1006 mach_msg_type_number_t *count)
1007 {
1008 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1009 }
1010
1011 /*
1012 * processor_set_threads:
1013 *
1014 * List all threads in the processor set.
1015 */
1016 kern_return_t
1017 processor_set_threads(
1018 processor_set_t pset,
1019 thread_array_t *thread_list,
1020 mach_msg_type_number_t *count)
1021 {
1022 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1023 }
1024
1025 /*
1026 * processor_set_base:
1027 *
1028 * Specify per-policy base priority for a processor set. Set processor
1029 * set default policy to the given policy. This affects newly created
1030 * and assigned threads. Optionally change existing ones.
1031 */
1032 kern_return_t
1033 processor_set_base(
1034 processor_set_t pset,
1035 policy_t policy,
1036 policy_base_t base,
1037 boolean_t change)
1038 {
1039 return (KERN_INVALID_ARGUMENT);
1040 }
1041
1042 /*
1043 * processor_set_limit:
1044 *
1045 * Specify per-policy limits for a processor set. This affects
1046 * newly created and assigned threads. Optionally change existing
1047 * ones.
1048 */
1049 kern_return_t
1050 processor_set_limit(
1051 processor_set_t pset,
1052 policy_t policy,
1053 policy_limit_t limit,
1054 boolean_t change)
1055 {
1056 return (KERN_POLICY_LIMIT);
1057 }
1058
1059 /*
1060 * processor_set_policy_control
1061 *
1062 * Controls the scheduling attributes governing the processor set.
1063 * Allows control of enabled policies, and per-policy base and limit
1064 * priorities.
1065 */
1066 kern_return_t
1067 processor_set_policy_control(
1068 processor_set_t pset,
1069 int flavor,
1070 processor_set_info_t policy_info,
1071 mach_msg_type_number_t count,
1072 boolean_t change)
1073 {
1074 return (KERN_INVALID_ARGUMENT);
1075 }