]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * processor.c: processor and processor_set manipulation routines.
55 */
56
57 #include <cpus.h>
58
59 #include <mach/boolean.h>
60 #include <mach/policy.h>
61 #include <mach/processor_info.h>
62 #include <mach/vm_param.h>
63 #include <kern/cpu_number.h>
64 #include <kern/host.h>
65 #include <kern/machine.h>
66 #include <kern/misc_protos.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <ipc/ipc_port.h>
74 #include <kern/kalloc.h>
75
76 /*
77 * Exported interface
78 */
79 #include <mach/mach_host_server.h>
80
81 /*
82 * Exported variables.
83 */
84 struct processor_set default_pset;
85 struct processor processor_array[NCPUS];
86
87 int master_cpu = 0;
88
89 processor_t master_processor;
90 processor_t processor_ptr[NCPUS];
91
92 /* Forwards */
93 void pset_init(
94 processor_set_t pset);
95
96 void processor_init(
97 register processor_t pr,
98 int slot_num);
99
100 void pset_quanta_set(
101 processor_set_t pset);
102
103 kern_return_t processor_set_base(
104 processor_set_t pset,
105 policy_t policy,
106 policy_base_t base,
107 boolean_t change);
108
109 kern_return_t processor_set_limit(
110 processor_set_t pset,
111 policy_t policy,
112 policy_limit_t limit,
113 boolean_t change);
114
115 kern_return_t processor_set_things(
116 processor_set_t pset,
117 mach_port_t **thing_list,
118 mach_msg_type_number_t *count,
119 int type);
120
121
122 /*
123 * Bootstrap the processor/pset system so the scheduler can run.
124 */
125 void
126 pset_sys_bootstrap(void)
127 {
128 register int i;
129
130 pset_init(&default_pset);
131 for (i = 0; i < NCPUS; i++) {
132 /*
133 * Initialize processor data structures.
134 * Note that cpu_to_processor(i) is processor_ptr[i].
135 */
136 processor_ptr[i] = &processor_array[i];
137 processor_init(processor_ptr[i], i);
138 }
139 master_processor = cpu_to_processor(master_cpu);
140 master_processor->cpu_data = get_cpu_data();
141 default_pset.active = TRUE;
142 }
143
144 /*
145 * Initialize the given processor_set structure.
146 */
147
148 void pset_init(
149 register processor_set_t pset)
150 {
151 register int i;
152
153 /* setup run queue */
154 simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ);
155 for (i = 0; i < NRQBM; i++)
156 pset->runq.bitmap[i] = 0;
157 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
158 pset->runq.highq = IDLEPRI;
159 pset->runq.urgency = pset->runq.count = 0;
160 for (i = 0; i < NRQS; i++)
161 queue_init(&pset->runq.queues[i]);
162
163 queue_init(&pset->idle_queue);
164 pset->idle_count = 0;
165 queue_init(&pset->active_queue);
166 simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE);
167 pset->run_count = 0;
168 pset->mach_factor = pset->load_average = 0;
169 pset->sched_load = 0;
170 queue_init(&pset->processors);
171 pset->processor_count = 0;
172 simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET);
173 queue_init(&pset->tasks);
174 pset->task_count = 0;
175 queue_init(&pset->threads);
176 pset->thread_count = 0;
177 pset->ref_count = 1;
178 pset->active = FALSE;
179 mutex_init(&pset->lock, ETAP_THREAD_PSET);
180 pset->pset_self = IP_NULL;
181 pset->pset_name_self = IP_NULL;
182 pset->set_quanta = 1;
183
184 for (i = 0; i <= NCPUS; i++)
185 pset->machine_quanta[i] = 1;
186 }
187
188 /*
189 * Initialize the given processor structure for the processor in
190 * the slot specified by slot_num.
191 */
192 void
193 processor_init(
194 register processor_t p,
195 int slot_num)
196 {
197 register int i;
198
199 /* setup run queue */
200 simple_lock_init(&p->runq.lock, ETAP_THREAD_PROC_RUNQ);
201 for (i = 0; i < NRQBM; i++)
202 p->runq.bitmap[i] = 0;
203 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
204 p->runq.highq = IDLEPRI;
205 p->runq.urgency = p->runq.count = 0;
206 for (i = 0; i < NRQS; i++)
207 queue_init(&p->runq.queues[i]);
208
209 p->state = PROCESSOR_OFF_LINE;
210 p->current_pri = MINPRI;
211 p->next_thread = THREAD_NULL;
212 p->idle_thread = THREAD_NULL;
213 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
214 p->slice_quanta = 0;
215 p->processor_set = PROCESSOR_SET_NULL;
216 p->processor_set_next = PROCESSOR_SET_NULL;
217 simple_lock_init(&p->lock, ETAP_THREAD_PROC);
218 p->processor_self = IP_NULL;
219 p->slot_num = slot_num;
220 }
221
222 /*
223 * pset_deallocate:
224 *
225 * Remove one reference to the processor set. Destroy processor_set
226 * if this was the last reference.
227 */
228 void
229 pset_deallocate(
230 processor_set_t pset)
231 {
232 if (pset == PROCESSOR_SET_NULL)
233 return;
234
235 assert(pset == &default_pset);
236 return;
237 }
238
239 /*
240 * pset_reference:
241 *
242 * Add one reference to the processor set.
243 */
244 void
245 pset_reference(
246 processor_set_t pset)
247 {
248 assert(pset == &default_pset);
249 }
250
251 #define pset_reference_locked(pset) assert(pset == &default_pset)
252
253 /*
254 * pset_remove_processor() removes a processor from a processor_set.
255 * It can only be called on the current processor. Caller must
256 * hold lock on current processor and processor set.
257 */
258 void
259 pset_remove_processor(
260 processor_set_t pset,
261 processor_t processor)
262 {
263 if (pset != processor->processor_set)
264 panic("pset_remove_processor: wrong pset");
265
266 queue_remove(&pset->processors, processor, processor_t, processors);
267 processor->processor_set = PROCESSOR_SET_NULL;
268 pset->processor_count--;
269 pset_quanta_set(pset);
270 }
271
272 /*
273 * pset_add_processor() adds a processor to a processor_set.
274 * It can only be called on the current processor. Caller must
275 * hold lock on curent processor and on pset. No reference counting on
276 * processors. Processor reference to pset is implicit.
277 */
278 void
279 pset_add_processor(
280 processor_set_t pset,
281 processor_t processor)
282 {
283 queue_enter(&pset->processors, processor, processor_t, processors);
284 processor->processor_set = pset;
285 pset->processor_count++;
286 pset_quanta_set(pset);
287 }
288
289 /*
290 * pset_remove_task() removes a task from a processor_set.
291 * Caller must hold locks on pset and task (unless task has
292 * no references left, in which case just the pset lock is
293 * needed). Pset reference count is not decremented;
294 * caller must explicitly pset_deallocate.
295 */
296 void
297 pset_remove_task(
298 processor_set_t pset,
299 task_t task)
300 {
301 if (pset != task->processor_set)
302 return;
303
304 queue_remove(&pset->tasks, task, task_t, pset_tasks);
305 task->processor_set = PROCESSOR_SET_NULL;
306 pset->task_count--;
307 }
308
309 /*
310 * pset_add_task() adds a task to a processor_set.
311 * Caller must hold locks on pset and task. Pset references to
312 * tasks are implicit.
313 */
314 void
315 pset_add_task(
316 processor_set_t pset,
317 task_t task)
318 {
319 queue_enter(&pset->tasks, task, task_t, pset_tasks);
320 task->processor_set = pset;
321 pset->task_count++;
322 pset_reference_locked(pset);
323 }
324
325 /*
326 * pset_remove_thread() removes a thread from a processor_set.
327 * Caller must hold locks on pset and thread (but only if thread
328 * has outstanding references that could be used to lookup the pset).
329 * The pset reference count is not decremented; caller must explicitly
330 * pset_deallocate.
331 */
332 void
333 pset_remove_thread(
334 processor_set_t pset,
335 thread_t thread)
336 {
337 queue_remove(&pset->threads, thread, thread_t, pset_threads);
338 thread->processor_set = PROCESSOR_SET_NULL;
339 pset->thread_count--;
340 }
341
342 /*
343 * pset_add_thread() adds a thread to a processor_set.
344 * Caller must hold locks on pset and thread. Pset references to
345 * threads are implicit.
346 */
347 void
348 pset_add_thread(
349 processor_set_t pset,
350 thread_t thread)
351 {
352 queue_enter(&pset->threads, thread, thread_t, pset_threads);
353 thread->processor_set = pset;
354 pset->thread_count++;
355 pset_reference_locked(pset);
356 }
357
358 /*
359 * thread_change_psets() changes the pset of a thread. Caller must
360 * hold locks on both psets and thread. The old pset must be
361 * explicitly pset_deallocat()'ed by caller.
362 */
363 void
364 thread_change_psets(
365 thread_t thread,
366 processor_set_t old_pset,
367 processor_set_t new_pset)
368 {
369 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
370 old_pset->thread_count--;
371 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
372 thread->processor_set = new_pset;
373 new_pset->thread_count++;
374 pset_reference_locked(new_pset);
375 }
376
377
378 kern_return_t
379 processor_info_count(
380 processor_flavor_t flavor,
381 mach_msg_type_number_t *count)
382 {
383 kern_return_t kr;
384
385 switch (flavor) {
386 case PROCESSOR_BASIC_INFO:
387 *count = PROCESSOR_BASIC_INFO_COUNT;
388 return KERN_SUCCESS;
389 case PROCESSOR_CPU_LOAD_INFO:
390 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
391 return KERN_SUCCESS;
392 default:
393 kr = cpu_info_count(flavor, count);
394 return kr;
395 }
396 }
397
398
399 kern_return_t
400 processor_info(
401 register processor_t processor,
402 processor_flavor_t flavor,
403 host_t *host,
404 processor_info_t info,
405 mach_msg_type_number_t *count)
406 {
407 register int i, slot_num, state;
408 register processor_basic_info_t basic_info;
409 register processor_cpu_load_info_t cpu_load_info;
410 kern_return_t kr;
411
412 if (processor == PROCESSOR_NULL)
413 return(KERN_INVALID_ARGUMENT);
414
415 slot_num = processor->slot_num;
416
417 switch (flavor) {
418
419 case PROCESSOR_BASIC_INFO:
420 {
421 if (*count < PROCESSOR_BASIC_INFO_COUNT)
422 return(KERN_FAILURE);
423
424 basic_info = (processor_basic_info_t) info;
425 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
426 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
427 state = processor->state;
428 if (state == PROCESSOR_OFF_LINE)
429 basic_info->running = FALSE;
430 else
431 basic_info->running = TRUE;
432 basic_info->slot_num = slot_num;
433 if (processor == master_processor)
434 basic_info->is_master = TRUE;
435 else
436 basic_info->is_master = FALSE;
437
438 *count = PROCESSOR_BASIC_INFO_COUNT;
439 *host = &realhost;
440 return(KERN_SUCCESS);
441 }
442 case PROCESSOR_CPU_LOAD_INFO:
443 {
444 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
445 return(KERN_FAILURE);
446
447 cpu_load_info = (processor_cpu_load_info_t) info;
448 for (i=0;i<CPU_STATE_MAX;i++)
449 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
450
451 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
452 *host = &realhost;
453 return(KERN_SUCCESS);
454 }
455 default:
456 {
457 kr=cpu_info(flavor, slot_num, info, count);
458 if (kr == KERN_SUCCESS)
459 *host = &realhost;
460 return(kr);
461 }
462 }
463 }
464
465 kern_return_t
466 processor_start(
467 processor_t processor)
468 {
469 int state;
470 spl_t s;
471 kern_return_t kr;
472
473 if (processor == PROCESSOR_NULL)
474 return(KERN_INVALID_ARGUMENT);
475
476 if (processor == master_processor) {
477 thread_bind(current_thread(), processor);
478 thread_block(THREAD_CONTINUE_NULL);
479 kr = cpu_start(processor->slot_num);
480 thread_bind(current_thread(), PROCESSOR_NULL);
481
482 return(kr);
483 }
484
485 s = splsched();
486 processor_lock(processor);
487
488 state = processor->state;
489 if (state != PROCESSOR_OFF_LINE) {
490 processor_unlock(processor);
491 splx(s);
492 return(KERN_FAILURE);
493 }
494 processor->state = PROCESSOR_START;
495 processor_unlock(processor);
496 splx(s);
497
498 if (processor->next_thread == THREAD_NULL) {
499 thread_t thread;
500 extern void start_cpu_thread(void);
501
502 thread = kernel_thread_with_priority(
503 kernel_task, MAXPRI_KERNEL,
504 start_cpu_thread, TRUE, FALSE);
505
506 s = splsched();
507 thread_lock(thread);
508 thread_bind_locked(thread, processor);
509 thread_go_locked(thread, THREAD_AWAKENED);
510 (void)rem_runq(thread);
511 processor->next_thread = thread;
512 thread_unlock(thread);
513 splx(s);
514 }
515
516 kr = cpu_start(processor->slot_num);
517
518 if (kr != KERN_SUCCESS) {
519 s = splsched();
520 processor_lock(processor);
521 processor->state = PROCESSOR_OFF_LINE;
522 processor_unlock(processor);
523 splx(s);
524 }
525
526 return(kr);
527 }
528
529 kern_return_t
530 processor_exit(
531 processor_t processor)
532 {
533 if (processor == PROCESSOR_NULL)
534 return(KERN_INVALID_ARGUMENT);
535
536 return(processor_shutdown(processor));
537 }
538
539 kern_return_t
540 processor_control(
541 processor_t processor,
542 processor_info_t info,
543 mach_msg_type_number_t count)
544 {
545 if (processor == PROCESSOR_NULL)
546 return(KERN_INVALID_ARGUMENT);
547
548 return(cpu_control(processor->slot_num, info, count));
549 }
550
551 /*
552 * Precalculate the appropriate timesharing quanta based on load. The
553 * index into machine_quanta is the number of threads on the
554 * processor set queue. It is limited to the number of processors in
555 * the set.
556 */
557
558 void
559 pset_quanta_set(
560 processor_set_t pset)
561 {
562 register int i, count = pset->processor_count;
563
564 for (i = 1; i <= count; i++)
565 pset->machine_quanta[i] = (count + (i / 2)) / i;
566
567 pset->machine_quanta[0] = pset->machine_quanta[1];
568
569 pset_quanta_update(pset);
570 }
571
572 kern_return_t
573 processor_set_create(
574 host_t host,
575 processor_set_t *new_set,
576 processor_set_t *new_name)
577 {
578 #ifdef lint
579 host++; new_set++; new_name++;
580 #endif /* lint */
581 return(KERN_FAILURE);
582 }
583
584 kern_return_t
585 processor_set_destroy(
586 processor_set_t pset)
587 {
588 #ifdef lint
589 pset++;
590 #endif /* lint */
591 return(KERN_FAILURE);
592 }
593
594 kern_return_t
595 processor_get_assignment(
596 processor_t processor,
597 processor_set_t *pset)
598 {
599 int state;
600
601 state = processor->state;
602 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
603 return(KERN_FAILURE);
604
605 *pset = processor->processor_set;
606 pset_reference(*pset);
607 return(KERN_SUCCESS);
608 }
609
610 kern_return_t
611 processor_set_info(
612 processor_set_t pset,
613 int flavor,
614 host_t *host,
615 processor_set_info_t info,
616 mach_msg_type_number_t *count)
617 {
618 if (pset == PROCESSOR_SET_NULL)
619 return(KERN_INVALID_ARGUMENT);
620
621 if (flavor == PROCESSOR_SET_BASIC_INFO) {
622 register processor_set_basic_info_t basic_info;
623
624 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
625 return(KERN_FAILURE);
626
627 basic_info = (processor_set_basic_info_t) info;
628 basic_info->processor_count = pset->processor_count;
629 basic_info->default_policy = POLICY_TIMESHARE;
630
631 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
632 *host = &realhost;
633 return(KERN_SUCCESS);
634 }
635 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
636 register policy_timeshare_base_t ts_base;
637
638 if (*count < POLICY_TIMESHARE_BASE_COUNT)
639 return(KERN_FAILURE);
640
641 ts_base = (policy_timeshare_base_t) info;
642 ts_base->base_priority = BASEPRI_DEFAULT;
643
644 *count = POLICY_TIMESHARE_BASE_COUNT;
645 *host = &realhost;
646 return(KERN_SUCCESS);
647 }
648 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
649 register policy_fifo_base_t fifo_base;
650
651 if (*count < POLICY_FIFO_BASE_COUNT)
652 return(KERN_FAILURE);
653
654 fifo_base = (policy_fifo_base_t) info;
655 fifo_base->base_priority = BASEPRI_DEFAULT;
656
657 *count = POLICY_FIFO_BASE_COUNT;
658 *host = &realhost;
659 return(KERN_SUCCESS);
660 }
661 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
662 register policy_rr_base_t rr_base;
663
664 if (*count < POLICY_RR_BASE_COUNT)
665 return(KERN_FAILURE);
666
667 rr_base = (policy_rr_base_t) info;
668 rr_base->base_priority = BASEPRI_DEFAULT;
669 rr_base->quantum = 1;
670
671 *count = POLICY_RR_BASE_COUNT;
672 *host = &realhost;
673 return(KERN_SUCCESS);
674 }
675 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
676 register policy_timeshare_limit_t ts_limit;
677
678 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
679 return(KERN_FAILURE);
680
681 ts_limit = (policy_timeshare_limit_t) info;
682 ts_limit->max_priority = MAXPRI_STANDARD;
683
684 *count = POLICY_TIMESHARE_LIMIT_COUNT;
685 *host = &realhost;
686 return(KERN_SUCCESS);
687 }
688 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
689 register policy_fifo_limit_t fifo_limit;
690
691 if (*count < POLICY_FIFO_LIMIT_COUNT)
692 return(KERN_FAILURE);
693
694 fifo_limit = (policy_fifo_limit_t) info;
695 fifo_limit->max_priority = MAXPRI_STANDARD;
696
697 *count = POLICY_FIFO_LIMIT_COUNT;
698 *host = &realhost;
699 return(KERN_SUCCESS);
700 }
701 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
702 register policy_rr_limit_t rr_limit;
703
704 if (*count < POLICY_RR_LIMIT_COUNT)
705 return(KERN_FAILURE);
706
707 rr_limit = (policy_rr_limit_t) info;
708 rr_limit->max_priority = MAXPRI_STANDARD;
709
710 *count = POLICY_RR_LIMIT_COUNT;
711 *host = &realhost;
712 return(KERN_SUCCESS);
713 }
714 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
715 register int *enabled;
716
717 if (*count < (sizeof(*enabled)/sizeof(int)))
718 return(KERN_FAILURE);
719
720 enabled = (int *) info;
721 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
722
723 *count = sizeof(*enabled)/sizeof(int);
724 *host = &realhost;
725 return(KERN_SUCCESS);
726 }
727
728
729 *host = HOST_NULL;
730 return(KERN_INVALID_ARGUMENT);
731 }
732
733 /*
734 * processor_set_statistics
735 *
736 * Returns scheduling statistics for a processor set.
737 */
738 kern_return_t
739 processor_set_statistics(
740 processor_set_t pset,
741 int flavor,
742 processor_set_info_t info,
743 mach_msg_type_number_t *count)
744 {
745 if (pset == PROCESSOR_SET_NULL)
746 return (KERN_INVALID_PROCESSOR_SET);
747
748 if (flavor == PROCESSOR_SET_LOAD_INFO) {
749 register processor_set_load_info_t load_info;
750
751 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
752 return(KERN_FAILURE);
753
754 load_info = (processor_set_load_info_t) info;
755
756 pset_lock(pset);
757 load_info->task_count = pset->task_count;
758 load_info->thread_count = pset->thread_count;
759 load_info->mach_factor = pset->mach_factor;
760 load_info->load_average = pset->load_average;
761 pset_unlock(pset);
762
763 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
764 return(KERN_SUCCESS);
765 }
766
767 return(KERN_INVALID_ARGUMENT);
768 }
769
770 /*
771 * processor_set_max_priority:
772 *
773 * Specify max priority permitted on processor set. This affects
774 * newly created and assigned threads. Optionally change existing
775 * ones.
776 */
777 kern_return_t
778 processor_set_max_priority(
779 processor_set_t pset,
780 int max_priority,
781 boolean_t change_threads)
782 {
783 return (KERN_INVALID_ARGUMENT);
784 }
785
786 /*
787 * processor_set_policy_enable:
788 *
789 * Allow indicated policy on processor set.
790 */
791
792 kern_return_t
793 processor_set_policy_enable(
794 processor_set_t pset,
795 int policy)
796 {
797 return (KERN_INVALID_ARGUMENT);
798 }
799
800 /*
801 * processor_set_policy_disable:
802 *
803 * Forbid indicated policy on processor set. Time sharing cannot
804 * be forbidden.
805 */
806 kern_return_t
807 processor_set_policy_disable(
808 processor_set_t pset,
809 int policy,
810 boolean_t change_threads)
811 {
812 return (KERN_INVALID_ARGUMENT);
813 }
814
815 #define THING_TASK 0
816 #define THING_THREAD 1
817
818 /*
819 * processor_set_things:
820 *
821 * Common internals for processor_set_{threads,tasks}
822 */
823 kern_return_t
824 processor_set_things(
825 processor_set_t pset,
826 mach_port_t **thing_list,
827 mach_msg_type_number_t *count,
828 int type)
829 {
830 unsigned int actual; /* this many things */
831 int i;
832
833 vm_size_t size, size_needed;
834 vm_offset_t addr;
835
836 if (pset == PROCESSOR_SET_NULL)
837 return KERN_INVALID_ARGUMENT;
838
839 size = 0; addr = 0;
840
841 for (;;) {
842 pset_lock(pset);
843 if (!pset->active) {
844 pset_unlock(pset);
845 return KERN_FAILURE;
846 }
847
848 if (type == THING_TASK)
849 actual = pset->task_count;
850 else
851 actual = pset->thread_count;
852
853 /* do we have the memory we need? */
854
855 size_needed = actual * sizeof(mach_port_t);
856 if (size_needed <= size)
857 break;
858
859 /* unlock the pset and allocate more memory */
860 pset_unlock(pset);
861
862 if (size != 0)
863 kfree(addr, size);
864
865 assert(size_needed > 0);
866 size = size_needed;
867
868 addr = kalloc(size);
869 if (addr == 0)
870 return KERN_RESOURCE_SHORTAGE;
871 }
872
873 /* OK, have memory and the processor_set is locked & active */
874
875 switch (type) {
876 case THING_TASK: {
877 task_t *tasks = (task_t *) addr;
878 task_t task;
879
880 for (i = 0, task = (task_t) queue_first(&pset->tasks);
881 !queue_end(&pset->tasks, (queue_entry_t) task);
882 task = (task_t) queue_next(&task->pset_tasks)) {
883
884 task_lock(task);
885 if (task->ref_count > 0) {
886 /* take ref for convert_task_to_port */
887 task_reference_locked(task);
888 tasks[i++] = task;
889 }
890 task_unlock(task);
891 }
892 break;
893 }
894
895 case THING_THREAD: {
896 thread_act_t *thr_acts = (thread_act_t *) addr;
897 thread_t thread;
898 thread_act_t thr_act;
899
900 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
901 !queue_end(&pset->threads, (queue_entry_t)thread);
902 thread = (thread_t) queue_next(&thread->pset_threads)) {
903
904 thr_act = thread_lock_act(thread);
905 if (thr_act && thr_act->ref_count > 0) {
906 /* take ref for convert_act_to_port */
907 act_locked_act_reference(thr_act);
908 thr_acts[i++] = thr_act;
909 }
910 thread_unlock_act(thread);
911 }
912 break;
913 }
914 }
915
916 /* can unlock processor set now that we have the task/thread refs */
917 pset_unlock(pset);
918
919 if (i < actual) {
920 actual = i;
921 size_needed = actual * sizeof(mach_port_t);
922 }
923 assert(i == actual);
924
925 if (actual == 0) {
926 /* no things, so return null pointer and deallocate memory */
927 *thing_list = 0;
928 *count = 0;
929
930 if (size != 0)
931 kfree(addr, size);
932 } else {
933 /* if we allocated too much, must copy */
934
935 if (size_needed < size) {
936 vm_offset_t newaddr;
937
938 newaddr = kalloc(size_needed);
939 if (newaddr == 0) {
940 switch (type) {
941 case THING_TASK: {
942 task_t *tasks = (task_t *) addr;
943
944 for (i = 0; i < actual; i++)
945 task_deallocate(tasks[i]);
946 break;
947 }
948
949 case THING_THREAD: {
950 thread_act_t *acts = (thread_act_t *) addr;
951
952 for (i = 0; i < actual; i++)
953 act_deallocate(acts[i]);
954 break;
955 }
956 }
957 kfree(addr, size);
958 return KERN_RESOURCE_SHORTAGE;
959 }
960
961 bcopy((char *) addr, (char *) newaddr, size_needed);
962 kfree(addr, size);
963 addr = newaddr;
964 }
965
966 *thing_list = (mach_port_t *) addr;
967 *count = actual;
968
969 /* do the conversion that Mig should handle */
970
971 switch (type) {
972 case THING_TASK: {
973 task_t *tasks = (task_t *) addr;
974
975 for (i = 0; i < actual; i++)
976 (*thing_list)[i] = convert_task_to_port(tasks[i]);
977 break;
978 }
979
980 case THING_THREAD: {
981 thread_act_t *thr_acts = (thread_act_t *) addr;
982
983 for (i = 0; i < actual; i++)
984 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
985 break;
986 }
987 }
988 }
989
990 return(KERN_SUCCESS);
991 }
992
993
994 /*
995 * processor_set_tasks:
996 *
997 * List all tasks in the processor set.
998 */
999 kern_return_t
1000 processor_set_tasks(
1001 processor_set_t pset,
1002 task_array_t *task_list,
1003 mach_msg_type_number_t *count)
1004 {
1005 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1006 }
1007
1008 /*
1009 * processor_set_threads:
1010 *
1011 * List all threads in the processor set.
1012 */
1013 kern_return_t
1014 processor_set_threads(
1015 processor_set_t pset,
1016 thread_array_t *thread_list,
1017 mach_msg_type_number_t *count)
1018 {
1019 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1020 }
1021
1022 /*
1023 * processor_set_base:
1024 *
1025 * Specify per-policy base priority for a processor set. Set processor
1026 * set default policy to the given policy. This affects newly created
1027 * and assigned threads. Optionally change existing ones.
1028 */
1029 kern_return_t
1030 processor_set_base(
1031 processor_set_t pset,
1032 policy_t policy,
1033 policy_base_t base,
1034 boolean_t change)
1035 {
1036 return (KERN_INVALID_ARGUMENT);
1037 }
1038
1039 /*
1040 * processor_set_limit:
1041 *
1042 * Specify per-policy limits for a processor set. This affects
1043 * newly created and assigned threads. Optionally change existing
1044 * ones.
1045 */
1046 kern_return_t
1047 processor_set_limit(
1048 processor_set_t pset,
1049 policy_t policy,
1050 policy_limit_t limit,
1051 boolean_t change)
1052 {
1053 return (KERN_POLICY_LIMIT);
1054 }
1055
1056 /*
1057 * processor_set_policy_control
1058 *
1059 * Controls the scheduling attributes governing the processor set.
1060 * Allows control of enabled policies, and per-policy base and limit
1061 * priorities.
1062 */
1063 kern_return_t
1064 processor_set_policy_control(
1065 processor_set_t pset,
1066 int flavor,
1067 processor_set_info_t policy_info,
1068 mach_msg_type_number_t count,
1069 boolean_t change)
1070 {
1071 return (KERN_INVALID_ARGUMENT);
1072 }