]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * processor.c: processor and processor_set manipulation routines.
58 */
59
60 #include <cpus.h>
61
62 #include <mach/boolean.h>
63 #include <mach/policy.h>
64 #include <mach/processor_info.h>
65 #include <mach/vm_param.h>
66 #include <kern/cpu_number.h>
67 #include <kern/host.h>
68 #include <kern/machine.h>
69 #include <kern/misc_protos.h>
70 #include <kern/processor.h>
71 #include <kern/sched.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/ipc_host.h>
75 #include <kern/ipc_tt.h>
76 #include <ipc/ipc_port.h>
77 #include <kern/kalloc.h>
78
79 /*
80 * Exported interface
81 */
82 #include <mach/mach_host_server.h>
83
84 /*
85 * Exported variables.
86 */
87 struct processor_set default_pset;
88 struct processor processor_array[NCPUS];
89
90 int master_cpu = 0;
91
92 processor_t master_processor;
93 processor_t processor_ptr[NCPUS];
94
95 /* Forwards */
96 void pset_init(
97 processor_set_t pset);
98
99 void processor_init(
100 register processor_t pr,
101 int slot_num);
102
103 void pset_quanta_setup(
104 processor_set_t pset);
105
106 kern_return_t processor_set_base(
107 processor_set_t pset,
108 policy_t policy,
109 policy_base_t base,
110 boolean_t change);
111
112 kern_return_t processor_set_limit(
113 processor_set_t pset,
114 policy_t policy,
115 policy_limit_t limit,
116 boolean_t change);
117
118 kern_return_t processor_set_things(
119 processor_set_t pset,
120 mach_port_t **thing_list,
121 mach_msg_type_number_t *count,
122 int type);
123
124
125 /*
126 * Bootstrap the processor/pset system so the scheduler can run.
127 */
128 void
129 pset_sys_bootstrap(void)
130 {
131 register int i;
132
133 pset_init(&default_pset);
134
135 for (i = 0; i < NCPUS; i++) {
136 /*
137 * Initialize processor data structures.
138 * Note that cpu_to_processor(i) is processor_ptr[i].
139 */
140 processor_ptr[i] = &processor_array[i];
141 processor_init(processor_ptr[i], i);
142 }
143
144 master_processor = cpu_to_processor(master_cpu);
145
146 default_pset.active = TRUE;
147 }
148
149 /*
150 * Initialize the given processor_set structure.
151 */
152
153 void pset_init(
154 register processor_set_t pset)
155 {
156 register int i;
157
158 /* setup run queue */
159 pset->runq.highq = IDLEPRI;
160 for (i = 0; i < NRQBM; i++)
161 pset->runq.bitmap[i] = 0;
162 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
163 pset->runq.urgency = pset->runq.count = 0;
164 for (i = 0; i < NRQS; i++)
165 queue_init(&pset->runq.queues[i]);
166
167 queue_init(&pset->idle_queue);
168 pset->idle_count = 0;
169 queue_init(&pset->active_queue);
170 simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE);
171 pset->run_count = pset->share_count = 0;
172 pset->mach_factor = pset->load_average = 0;
173 pset->sched_load = 0;
174 queue_init(&pset->processors);
175 pset->processor_count = 0;
176 queue_init(&pset->tasks);
177 pset->task_count = 0;
178 queue_init(&pset->threads);
179 pset->thread_count = 0;
180 pset->ref_count = 1;
181 pset->active = FALSE;
182 mutex_init(&pset->lock, ETAP_THREAD_PSET);
183 pset->pset_self = IP_NULL;
184 pset->pset_name_self = IP_NULL;
185 pset->timeshare_quanta = 1;
186
187 for (i = 0; i <= NCPUS; i++)
188 pset->quantum_factors[i] = 1;
189 }
190
191 /*
192 * Initialize the given processor structure for the processor in
193 * the slot specified by slot_num.
194 */
195 void
196 processor_init(
197 register processor_t p,
198 int slot_num)
199 {
200 register int i;
201
202 /* setup run queue */
203 p->runq.highq = IDLEPRI;
204 for (i = 0; i < NRQBM; i++)
205 p->runq.bitmap[i] = 0;
206 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
207 p->runq.urgency = p->runq.count = 0;
208 for (i = 0; i < NRQS; i++)
209 queue_init(&p->runq.queues[i]);
210
211 p->state = PROCESSOR_OFF_LINE;
212 p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL;
213 p->processor_set = PROCESSOR_SET_NULL;
214 p->current_pri = MINPRI;
215 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
216 p->timeslice = 0;
217 p->deadline = UINT64_MAX;
218 simple_lock_init(&p->lock, ETAP_THREAD_PROC);
219 p->processor_self = IP_NULL;
220 p->slot_num = slot_num;
221 }
222
223 /*
224 * pset_deallocate:
225 *
226 * Remove one reference to the processor set. Destroy processor_set
227 * if this was the last reference.
228 */
229 void
230 pset_deallocate(
231 processor_set_t pset)
232 {
233 if (pset == PROCESSOR_SET_NULL)
234 return;
235
236 assert(pset == &default_pset);
237 return;
238 }
239
240 /*
241 * pset_reference:
242 *
243 * Add one reference to the processor set.
244 */
245 void
246 pset_reference(
247 processor_set_t pset)
248 {
249 assert(pset == &default_pset);
250 }
251
252 #define pset_reference_locked(pset) assert(pset == &default_pset)
253
254 /*
255 * pset_remove_processor() removes a processor from a processor_set.
256 * It can only be called on the current processor. Caller must
257 * hold lock on current processor and processor set.
258 */
259 void
260 pset_remove_processor(
261 processor_set_t pset,
262 processor_t processor)
263 {
264 if (pset != processor->processor_set)
265 panic("pset_remove_processor: wrong pset");
266
267 queue_remove(&pset->processors, processor, processor_t, processors);
268 processor->processor_set = PROCESSOR_SET_NULL;
269 pset->processor_count--;
270 pset_quanta_setup(pset);
271 }
272
273 /*
274 * pset_add_processor() adds a processor to a processor_set.
275 * It can only be called on the current processor. Caller must
276 * hold lock on curent processor and on pset. No reference counting on
277 * processors. Processor reference to pset is implicit.
278 */
279 void
280 pset_add_processor(
281 processor_set_t pset,
282 processor_t processor)
283 {
284 queue_enter(&pset->processors, processor, processor_t, processors);
285 processor->processor_set = pset;
286 pset->processor_count++;
287 pset_quanta_setup(pset);
288 }
289
290 /*
291 * pset_remove_task() removes a task from a processor_set.
292 * Caller must hold locks on pset and task (unless task has
293 * no references left, in which case just the pset lock is
294 * needed). Pset reference count is not decremented;
295 * caller must explicitly pset_deallocate.
296 */
297 void
298 pset_remove_task(
299 processor_set_t pset,
300 task_t task)
301 {
302 if (pset != task->processor_set)
303 return;
304
305 queue_remove(&pset->tasks, task, task_t, pset_tasks);
306 task->processor_set = PROCESSOR_SET_NULL;
307 pset->task_count--;
308 }
309
310 /*
311 * pset_add_task() adds a task to a processor_set.
312 * Caller must hold locks on pset and task. Pset references to
313 * tasks are implicit.
314 */
315 void
316 pset_add_task(
317 processor_set_t pset,
318 task_t task)
319 {
320 queue_enter(&pset->tasks, task, task_t, pset_tasks);
321 task->processor_set = pset;
322 pset->task_count++;
323 pset_reference_locked(pset);
324 }
325
326 /*
327 * pset_remove_thread() removes a thread from a processor_set.
328 * Caller must hold locks on pset and thread (but only if thread
329 * has outstanding references that could be used to lookup the pset).
330 * The pset reference count is not decremented; caller must explicitly
331 * pset_deallocate.
332 */
333 void
334 pset_remove_thread(
335 processor_set_t pset,
336 thread_t thread)
337 {
338 queue_remove(&pset->threads, thread, thread_t, pset_threads);
339 thread->processor_set = PROCESSOR_SET_NULL;
340 pset->thread_count--;
341 }
342
343 /*
344 * pset_add_thread() adds a thread to a processor_set.
345 * Caller must hold locks on pset and thread. Pset references to
346 * threads are implicit.
347 */
348 void
349 pset_add_thread(
350 processor_set_t pset,
351 thread_t thread)
352 {
353 queue_enter(&pset->threads, thread, thread_t, pset_threads);
354 thread->processor_set = pset;
355 pset->thread_count++;
356 pset_reference_locked(pset);
357 }
358
359 /*
360 * thread_change_psets() changes the pset of a thread. Caller must
361 * hold locks on both psets and thread. The old pset must be
362 * explicitly pset_deallocat()'ed by caller.
363 */
364 void
365 thread_change_psets(
366 thread_t thread,
367 processor_set_t old_pset,
368 processor_set_t new_pset)
369 {
370 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
371 old_pset->thread_count--;
372 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
373 thread->processor_set = new_pset;
374 new_pset->thread_count++;
375 pset_reference_locked(new_pset);
376 }
377
378
379 kern_return_t
380 processor_info_count(
381 processor_flavor_t flavor,
382 mach_msg_type_number_t *count)
383 {
384 kern_return_t kr;
385
386 switch (flavor) {
387 case PROCESSOR_BASIC_INFO:
388 *count = PROCESSOR_BASIC_INFO_COUNT;
389 return KERN_SUCCESS;
390 case PROCESSOR_CPU_LOAD_INFO:
391 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
392 return KERN_SUCCESS;
393 default:
394 kr = cpu_info_count(flavor, count);
395 return kr;
396 }
397 }
398
399
400 kern_return_t
401 processor_info(
402 register processor_t processor,
403 processor_flavor_t flavor,
404 host_t *host,
405 processor_info_t info,
406 mach_msg_type_number_t *count)
407 {
408 register int i, slot_num, state;
409 register processor_basic_info_t basic_info;
410 register processor_cpu_load_info_t cpu_load_info;
411 kern_return_t kr;
412
413 if (processor == PROCESSOR_NULL)
414 return(KERN_INVALID_ARGUMENT);
415
416 slot_num = processor->slot_num;
417
418 switch (flavor) {
419
420 case PROCESSOR_BASIC_INFO:
421 {
422 if (*count < PROCESSOR_BASIC_INFO_COUNT)
423 return(KERN_FAILURE);
424
425 basic_info = (processor_basic_info_t) info;
426 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
427 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
428 state = processor->state;
429 if (state == PROCESSOR_OFF_LINE)
430 basic_info->running = FALSE;
431 else
432 basic_info->running = TRUE;
433 basic_info->slot_num = slot_num;
434 if (processor == master_processor)
435 basic_info->is_master = TRUE;
436 else
437 basic_info->is_master = FALSE;
438
439 *count = PROCESSOR_BASIC_INFO_COUNT;
440 *host = &realhost;
441 return(KERN_SUCCESS);
442 }
443 case PROCESSOR_CPU_LOAD_INFO:
444 {
445 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
446 return(KERN_FAILURE);
447
448 cpu_load_info = (processor_cpu_load_info_t) info;
449 for (i=0;i<CPU_STATE_MAX;i++)
450 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
451
452 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
453 *host = &realhost;
454 return(KERN_SUCCESS);
455 }
456 default:
457 {
458 kr=cpu_info(flavor, slot_num, info, count);
459 if (kr == KERN_SUCCESS)
460 *host = &realhost;
461 return(kr);
462 }
463 }
464 }
465
466 kern_return_t
467 processor_start(
468 processor_t processor)
469 {
470 kern_return_t result;
471 spl_t s;
472
473 if (processor == PROCESSOR_NULL)
474 return(KERN_INVALID_ARGUMENT);
475
476 if (processor == master_processor) {
477 processor_t prev;
478
479 prev = thread_bind(current_thread(), processor);
480 thread_block(THREAD_CONTINUE_NULL);
481
482 result = cpu_start(processor->slot_num);
483
484 thread_bind(current_thread(), prev);
485
486 return (result);
487 }
488
489 s = splsched();
490 processor_lock(processor);
491 if (processor->state != PROCESSOR_OFF_LINE) {
492 processor_unlock(processor);
493 splx(s);
494
495 return (KERN_FAILURE);
496 }
497
498 processor->state = PROCESSOR_START;
499 processor_unlock(processor);
500 splx(s);
501
502 if (processor->next_thread == THREAD_NULL) {
503 thread_t thread;
504 extern void start_cpu_thread(void);
505
506 thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL);
507
508 s = splsched();
509 thread_lock(thread);
510 thread->bound_processor = processor;
511 processor->next_thread = thread;
512 thread->state = TH_RUN;
513 pset_run_incr(thread->processor_set);
514 thread_unlock(thread);
515 splx(s);
516 }
517
518 if (processor->processor_self == IP_NULL)
519 ipc_processor_init(processor);
520
521 result = cpu_start(processor->slot_num);
522 if (result != KERN_SUCCESS) {
523 s = splsched();
524 processor_lock(processor);
525 processor->state = PROCESSOR_OFF_LINE;
526 processor_unlock(processor);
527 splx(s);
528
529 return (result);
530 }
531
532 ipc_processor_enable(processor);
533
534 return (KERN_SUCCESS);
535 }
536
537 kern_return_t
538 processor_exit(
539 processor_t processor)
540 {
541 if (processor == PROCESSOR_NULL)
542 return(KERN_INVALID_ARGUMENT);
543
544 return(processor_shutdown(processor));
545 }
546
547 kern_return_t
548 processor_control(
549 processor_t processor,
550 processor_info_t info,
551 mach_msg_type_number_t count)
552 {
553 if (processor == PROCESSOR_NULL)
554 return(KERN_INVALID_ARGUMENT);
555
556 return(cpu_control(processor->slot_num, info, count));
557 }
558
559 /*
560 * Precalculate the appropriate timesharing quanta based on load. The
561 * index into quantum_factors[] is the number of threads on the
562 * processor set queue. It is limited to the number of processors in
563 * the set.
564 */
565
566 void
567 pset_quanta_setup(
568 processor_set_t pset)
569 {
570 register int i, count = pset->processor_count;
571
572 for (i = 1; i <= count; i++)
573 pset->quantum_factors[i] = (count + (i / 2)) / i;
574
575 pset->quantum_factors[0] = pset->quantum_factors[1];
576
577 timeshare_quanta_update(pset);
578 }
579
580 kern_return_t
581 processor_set_create(
582 host_t host,
583 processor_set_t *new_set,
584 processor_set_t *new_name)
585 {
586 #ifdef lint
587 host++; new_set++; new_name++;
588 #endif /* lint */
589 return(KERN_FAILURE);
590 }
591
592 kern_return_t
593 processor_set_destroy(
594 processor_set_t pset)
595 {
596 #ifdef lint
597 pset++;
598 #endif /* lint */
599 return(KERN_FAILURE);
600 }
601
602 kern_return_t
603 processor_get_assignment(
604 processor_t processor,
605 processor_set_t *pset)
606 {
607 int state;
608
609 state = processor->state;
610 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
611 return(KERN_FAILURE);
612
613 *pset = processor->processor_set;
614 pset_reference(*pset);
615 return(KERN_SUCCESS);
616 }
617
618 kern_return_t
619 processor_set_info(
620 processor_set_t pset,
621 int flavor,
622 host_t *host,
623 processor_set_info_t info,
624 mach_msg_type_number_t *count)
625 {
626 if (pset == PROCESSOR_SET_NULL)
627 return(KERN_INVALID_ARGUMENT);
628
629 if (flavor == PROCESSOR_SET_BASIC_INFO) {
630 register processor_set_basic_info_t basic_info;
631
632 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
633 return(KERN_FAILURE);
634
635 basic_info = (processor_set_basic_info_t) info;
636 basic_info->processor_count = pset->processor_count;
637 basic_info->default_policy = POLICY_TIMESHARE;
638
639 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
640 *host = &realhost;
641 return(KERN_SUCCESS);
642 }
643 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
644 register policy_timeshare_base_t ts_base;
645
646 if (*count < POLICY_TIMESHARE_BASE_COUNT)
647 return(KERN_FAILURE);
648
649 ts_base = (policy_timeshare_base_t) info;
650 ts_base->base_priority = BASEPRI_DEFAULT;
651
652 *count = POLICY_TIMESHARE_BASE_COUNT;
653 *host = &realhost;
654 return(KERN_SUCCESS);
655 }
656 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
657 register policy_fifo_base_t fifo_base;
658
659 if (*count < POLICY_FIFO_BASE_COUNT)
660 return(KERN_FAILURE);
661
662 fifo_base = (policy_fifo_base_t) info;
663 fifo_base->base_priority = BASEPRI_DEFAULT;
664
665 *count = POLICY_FIFO_BASE_COUNT;
666 *host = &realhost;
667 return(KERN_SUCCESS);
668 }
669 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
670 register policy_rr_base_t rr_base;
671
672 if (*count < POLICY_RR_BASE_COUNT)
673 return(KERN_FAILURE);
674
675 rr_base = (policy_rr_base_t) info;
676 rr_base->base_priority = BASEPRI_DEFAULT;
677 rr_base->quantum = 1;
678
679 *count = POLICY_RR_BASE_COUNT;
680 *host = &realhost;
681 return(KERN_SUCCESS);
682 }
683 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
684 register policy_timeshare_limit_t ts_limit;
685
686 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
687 return(KERN_FAILURE);
688
689 ts_limit = (policy_timeshare_limit_t) info;
690 ts_limit->max_priority = MAXPRI_STANDARD;
691
692 *count = POLICY_TIMESHARE_LIMIT_COUNT;
693 *host = &realhost;
694 return(KERN_SUCCESS);
695 }
696 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
697 register policy_fifo_limit_t fifo_limit;
698
699 if (*count < POLICY_FIFO_LIMIT_COUNT)
700 return(KERN_FAILURE);
701
702 fifo_limit = (policy_fifo_limit_t) info;
703 fifo_limit->max_priority = MAXPRI_STANDARD;
704
705 *count = POLICY_FIFO_LIMIT_COUNT;
706 *host = &realhost;
707 return(KERN_SUCCESS);
708 }
709 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
710 register policy_rr_limit_t rr_limit;
711
712 if (*count < POLICY_RR_LIMIT_COUNT)
713 return(KERN_FAILURE);
714
715 rr_limit = (policy_rr_limit_t) info;
716 rr_limit->max_priority = MAXPRI_STANDARD;
717
718 *count = POLICY_RR_LIMIT_COUNT;
719 *host = &realhost;
720 return(KERN_SUCCESS);
721 }
722 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
723 register int *enabled;
724
725 if (*count < (sizeof(*enabled)/sizeof(int)))
726 return(KERN_FAILURE);
727
728 enabled = (int *) info;
729 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
730
731 *count = sizeof(*enabled)/sizeof(int);
732 *host = &realhost;
733 return(KERN_SUCCESS);
734 }
735
736
737 *host = HOST_NULL;
738 return(KERN_INVALID_ARGUMENT);
739 }
740
741 /*
742 * processor_set_statistics
743 *
744 * Returns scheduling statistics for a processor set.
745 */
746 kern_return_t
747 processor_set_statistics(
748 processor_set_t pset,
749 int flavor,
750 processor_set_info_t info,
751 mach_msg_type_number_t *count)
752 {
753 if (pset == PROCESSOR_SET_NULL)
754 return (KERN_INVALID_PROCESSOR_SET);
755
756 if (flavor == PROCESSOR_SET_LOAD_INFO) {
757 register processor_set_load_info_t load_info;
758
759 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
760 return(KERN_FAILURE);
761
762 load_info = (processor_set_load_info_t) info;
763
764 pset_lock(pset);
765 load_info->task_count = pset->task_count;
766 load_info->thread_count = pset->thread_count;
767 load_info->mach_factor = pset->mach_factor;
768 load_info->load_average = pset->load_average;
769 pset_unlock(pset);
770
771 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
772 return(KERN_SUCCESS);
773 }
774
775 return(KERN_INVALID_ARGUMENT);
776 }
777
778 /*
779 * processor_set_max_priority:
780 *
781 * Specify max priority permitted on processor set. This affects
782 * newly created and assigned threads. Optionally change existing
783 * ones.
784 */
785 kern_return_t
786 processor_set_max_priority(
787 processor_set_t pset,
788 int max_priority,
789 boolean_t change_threads)
790 {
791 return (KERN_INVALID_ARGUMENT);
792 }
793
794 /*
795 * processor_set_policy_enable:
796 *
797 * Allow indicated policy on processor set.
798 */
799
800 kern_return_t
801 processor_set_policy_enable(
802 processor_set_t pset,
803 int policy)
804 {
805 return (KERN_INVALID_ARGUMENT);
806 }
807
808 /*
809 * processor_set_policy_disable:
810 *
811 * Forbid indicated policy on processor set. Time sharing cannot
812 * be forbidden.
813 */
814 kern_return_t
815 processor_set_policy_disable(
816 processor_set_t pset,
817 int policy,
818 boolean_t change_threads)
819 {
820 return (KERN_INVALID_ARGUMENT);
821 }
822
823 #define THING_TASK 0
824 #define THING_THREAD 1
825
826 /*
827 * processor_set_things:
828 *
829 * Common internals for processor_set_{threads,tasks}
830 */
831 kern_return_t
832 processor_set_things(
833 processor_set_t pset,
834 mach_port_t **thing_list,
835 mach_msg_type_number_t *count,
836 int type)
837 {
838 unsigned int actual; /* this many things */
839 int i;
840
841 vm_size_t size, size_needed;
842 vm_offset_t addr;
843
844 if (pset == PROCESSOR_SET_NULL)
845 return KERN_INVALID_ARGUMENT;
846
847 size = 0; addr = 0;
848
849 for (;;) {
850 pset_lock(pset);
851 if (!pset->active) {
852 pset_unlock(pset);
853 return KERN_FAILURE;
854 }
855
856 if (type == THING_TASK)
857 actual = pset->task_count;
858 else
859 actual = pset->thread_count;
860
861 /* do we have the memory we need? */
862
863 size_needed = actual * sizeof(mach_port_t);
864 if (size_needed <= size)
865 break;
866
867 /* unlock the pset and allocate more memory */
868 pset_unlock(pset);
869
870 if (size != 0)
871 kfree(addr, size);
872
873 assert(size_needed > 0);
874 size = size_needed;
875
876 addr = kalloc(size);
877 if (addr == 0)
878 return KERN_RESOURCE_SHORTAGE;
879 }
880
881 /* OK, have memory and the processor_set is locked & active */
882
883 switch (type) {
884 case THING_TASK: {
885 task_t *tasks = (task_t *) addr;
886 task_t task;
887
888 for (i = 0, task = (task_t) queue_first(&pset->tasks);
889 !queue_end(&pset->tasks, (queue_entry_t) task);
890 task = (task_t) queue_next(&task->pset_tasks)) {
891
892 task_lock(task);
893 if (task->ref_count > 0) {
894 /* take ref for convert_task_to_port */
895 task_reference_locked(task);
896 tasks[i++] = task;
897 }
898 task_unlock(task);
899 }
900 break;
901 }
902
903 case THING_THREAD: {
904 thread_act_t *thr_acts = (thread_act_t *) addr;
905 thread_t thread;
906 thread_act_t thr_act;
907
908 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
909 !queue_end(&pset->threads, (queue_entry_t)thread);
910 thread = (thread_t) queue_next(&thread->pset_threads)) {
911
912 thr_act = thread_lock_act(thread);
913 if (thr_act && thr_act->act_ref_count > 0) {
914 /* take ref for convert_act_to_port */
915 act_reference_locked(thr_act);
916 thr_acts[i++] = thr_act;
917 }
918 thread_unlock_act(thread);
919 }
920 break;
921 }
922 }
923
924 /* can unlock processor set now that we have the task/thread refs */
925 pset_unlock(pset);
926
927 if (i < actual) {
928 actual = i;
929 size_needed = actual * sizeof(mach_port_t);
930 }
931 assert(i == actual);
932
933 if (actual == 0) {
934 /* no things, so return null pointer and deallocate memory */
935 *thing_list = 0;
936 *count = 0;
937
938 if (size != 0)
939 kfree(addr, size);
940 } else {
941 /* if we allocated too much, must copy */
942
943 if (size_needed < size) {
944 vm_offset_t newaddr;
945
946 newaddr = kalloc(size_needed);
947 if (newaddr == 0) {
948 switch (type) {
949 case THING_TASK: {
950 task_t *tasks = (task_t *) addr;
951
952 for (i = 0; i < actual; i++)
953 task_deallocate(tasks[i]);
954 break;
955 }
956
957 case THING_THREAD: {
958 thread_act_t *acts = (thread_act_t *) addr;
959
960 for (i = 0; i < actual; i++)
961 act_deallocate(acts[i]);
962 break;
963 }
964 }
965 kfree(addr, size);
966 return KERN_RESOURCE_SHORTAGE;
967 }
968
969 bcopy((char *) addr, (char *) newaddr, size_needed);
970 kfree(addr, size);
971 addr = newaddr;
972 }
973
974 *thing_list = (mach_port_t *) addr;
975 *count = actual;
976
977 /* do the conversion that Mig should handle */
978
979 switch (type) {
980 case THING_TASK: {
981 task_t *tasks = (task_t *) addr;
982
983 for (i = 0; i < actual; i++)
984 (*thing_list)[i] = convert_task_to_port(tasks[i]);
985 break;
986 }
987
988 case THING_THREAD: {
989 thread_act_t *thr_acts = (thread_act_t *) addr;
990
991 for (i = 0; i < actual; i++)
992 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
993 break;
994 }
995 }
996 }
997
998 return(KERN_SUCCESS);
999 }
1000
1001
1002 /*
1003 * processor_set_tasks:
1004 *
1005 * List all tasks in the processor set.
1006 */
1007 kern_return_t
1008 processor_set_tasks(
1009 processor_set_t pset,
1010 task_array_t *task_list,
1011 mach_msg_type_number_t *count)
1012 {
1013 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1014 }
1015
1016 /*
1017 * processor_set_threads:
1018 *
1019 * List all threads in the processor set.
1020 */
1021 kern_return_t
1022 processor_set_threads(
1023 processor_set_t pset,
1024 thread_array_t *thread_list,
1025 mach_msg_type_number_t *count)
1026 {
1027 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1028 }
1029
1030 /*
1031 * processor_set_base:
1032 *
1033 * Specify per-policy base priority for a processor set. Set processor
1034 * set default policy to the given policy. This affects newly created
1035 * and assigned threads. Optionally change existing ones.
1036 */
1037 kern_return_t
1038 processor_set_base(
1039 processor_set_t pset,
1040 policy_t policy,
1041 policy_base_t base,
1042 boolean_t change)
1043 {
1044 return (KERN_INVALID_ARGUMENT);
1045 }
1046
1047 /*
1048 * processor_set_limit:
1049 *
1050 * Specify per-policy limits for a processor set. This affects
1051 * newly created and assigned threads. Optionally change existing
1052 * ones.
1053 */
1054 kern_return_t
1055 processor_set_limit(
1056 processor_set_t pset,
1057 policy_t policy,
1058 policy_limit_t limit,
1059 boolean_t change)
1060 {
1061 return (KERN_POLICY_LIMIT);
1062 }
1063
1064 /*
1065 * processor_set_policy_control
1066 *
1067 * Controls the scheduling attributes governing the processor set.
1068 * Allows control of enabled policies, and per-policy base and limit
1069 * priorities.
1070 */
1071 kern_return_t
1072 processor_set_policy_control(
1073 processor_set_t pset,
1074 int flavor,
1075 processor_set_info_t policy_info,
1076 mach_msg_type_number_t count,
1077 boolean_t change)
1078 {
1079 return (KERN_INVALID_ARGUMENT);
1080 }