]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
c9251cc75919c80c060d7aed560b9971aaeee47f
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * processor.c: processor and processor_set manipulation routines.
58 */
59
60 #include <cpus.h>
61
62 #include <mach/boolean.h>
63 #include <mach/policy.h>
64 #include <mach/processor_info.h>
65 #include <mach/vm_param.h>
66 #include <kern/cpu_number.h>
67 #include <kern/host.h>
68 #include <kern/machine.h>
69 #include <kern/misc_protos.h>
70 #include <kern/processor.h>
71 #include <kern/sched.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/ipc_host.h>
75 #include <kern/ipc_tt.h>
76 #include <ipc/ipc_port.h>
77 #include <kern/kalloc.h>
78
79 /*
80 * Exported interface
81 */
82 #include <mach/mach_host_server.h>
83
84 /*
85 * Exported variables.
86 */
87 struct processor_set default_pset;
88 struct processor processor_array[NCPUS];
89
90 int master_cpu = 0;
91
92 processor_t master_processor;
93 processor_t processor_ptr[NCPUS];
94
95 /* Forwards */
96 void pset_init(
97 processor_set_t pset);
98
99 void processor_init(
100 register processor_t pr,
101 int slot_num);
102
103 void pset_quanta_set(
104 processor_set_t pset);
105
106 kern_return_t processor_set_base(
107 processor_set_t pset,
108 policy_t policy,
109 policy_base_t base,
110 boolean_t change);
111
112 kern_return_t processor_set_limit(
113 processor_set_t pset,
114 policy_t policy,
115 policy_limit_t limit,
116 boolean_t change);
117
118 kern_return_t processor_set_things(
119 processor_set_t pset,
120 mach_port_t **thing_list,
121 mach_msg_type_number_t *count,
122 int type);
123
124
125 /*
126 * Bootstrap the processor/pset system so the scheduler can run.
127 */
128 void
129 pset_sys_bootstrap(void)
130 {
131 register int i;
132
133 pset_init(&default_pset);
134 for (i = 0; i < NCPUS; i++) {
135 /*
136 * Initialize processor data structures.
137 * Note that cpu_to_processor(i) is processor_ptr[i].
138 */
139 processor_ptr[i] = &processor_array[i];
140 processor_init(processor_ptr[i], i);
141 }
142 master_processor = cpu_to_processor(master_cpu);
143 master_processor->cpu_data = get_cpu_data();
144 default_pset.active = TRUE;
145 }
146
147 /*
148 * Initialize the given processor_set structure.
149 */
150
151 void pset_init(
152 register processor_set_t pset)
153 {
154 register int i;
155
156 /* setup run queue */
157 simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ);
158 for (i = 0; i < NRQBM; i++)
159 pset->runq.bitmap[i] = 0;
160 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
161 pset->runq.highq = IDLEPRI;
162 pset->runq.urgency = pset->runq.count = 0;
163 for (i = 0; i < NRQS; i++)
164 queue_init(&pset->runq.queues[i]);
165
166 queue_init(&pset->idle_queue);
167 pset->idle_count = 0;
168 queue_init(&pset->active_queue);
169 simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE);
170 pset->run_count = 0;
171 pset->mach_factor = pset->load_average = 0;
172 pset->sched_load = 0;
173 queue_init(&pset->processors);
174 pset->processor_count = 0;
175 simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET);
176 queue_init(&pset->tasks);
177 pset->task_count = 0;
178 queue_init(&pset->threads);
179 pset->thread_count = 0;
180 pset->ref_count = 1;
181 pset->active = FALSE;
182 mutex_init(&pset->lock, ETAP_THREAD_PSET);
183 pset->pset_self = IP_NULL;
184 pset->pset_name_self = IP_NULL;
185 pset->set_quanta = 1;
186
187 for (i = 0; i <= NCPUS; i++)
188 pset->machine_quanta[i] = 1;
189 }
190
191 /*
192 * Initialize the given processor structure for the processor in
193 * the slot specified by slot_num.
194 */
195 void
196 processor_init(
197 register processor_t p,
198 int slot_num)
199 {
200 register int i;
201
202 /* setup run queue */
203 simple_lock_init(&p->runq.lock, ETAP_THREAD_PROC_RUNQ);
204 for (i = 0; i < NRQBM; i++)
205 p->runq.bitmap[i] = 0;
206 setbit(MAXPRI - IDLEPRI, p->runq.bitmap);
207 p->runq.highq = IDLEPRI;
208 p->runq.urgency = p->runq.count = 0;
209 for (i = 0; i < NRQS; i++)
210 queue_init(&p->runq.queues[i]);
211
212 p->state = PROCESSOR_OFF_LINE;
213 p->current_pri = MINPRI;
214 p->next_thread = THREAD_NULL;
215 p->idle_thread = THREAD_NULL;
216 timer_call_setup(&p->quantum_timer, thread_quantum_expire, p);
217 p->slice_quanta = 0;
218 p->processor_set = PROCESSOR_SET_NULL;
219 p->processor_set_next = PROCESSOR_SET_NULL;
220 simple_lock_init(&p->lock, ETAP_THREAD_PROC);
221 p->processor_self = IP_NULL;
222 p->slot_num = slot_num;
223 }
224
225 /*
226 * pset_deallocate:
227 *
228 * Remove one reference to the processor set. Destroy processor_set
229 * if this was the last reference.
230 */
231 void
232 pset_deallocate(
233 processor_set_t pset)
234 {
235 if (pset == PROCESSOR_SET_NULL)
236 return;
237
238 assert(pset == &default_pset);
239 return;
240 }
241
242 /*
243 * pset_reference:
244 *
245 * Add one reference to the processor set.
246 */
247 void
248 pset_reference(
249 processor_set_t pset)
250 {
251 assert(pset == &default_pset);
252 }
253
254 #define pset_reference_locked(pset) assert(pset == &default_pset)
255
256 /*
257 * pset_remove_processor() removes a processor from a processor_set.
258 * It can only be called on the current processor. Caller must
259 * hold lock on current processor and processor set.
260 */
261 void
262 pset_remove_processor(
263 processor_set_t pset,
264 processor_t processor)
265 {
266 if (pset != processor->processor_set)
267 panic("pset_remove_processor: wrong pset");
268
269 queue_remove(&pset->processors, processor, processor_t, processors);
270 processor->processor_set = PROCESSOR_SET_NULL;
271 pset->processor_count--;
272 pset_quanta_set(pset);
273 }
274
275 /*
276 * pset_add_processor() adds a processor to a processor_set.
277 * It can only be called on the current processor. Caller must
278 * hold lock on curent processor and on pset. No reference counting on
279 * processors. Processor reference to pset is implicit.
280 */
281 void
282 pset_add_processor(
283 processor_set_t pset,
284 processor_t processor)
285 {
286 queue_enter(&pset->processors, processor, processor_t, processors);
287 processor->processor_set = pset;
288 pset->processor_count++;
289 pset_quanta_set(pset);
290 }
291
292 /*
293 * pset_remove_task() removes a task from a processor_set.
294 * Caller must hold locks on pset and task (unless task has
295 * no references left, in which case just the pset lock is
296 * needed). Pset reference count is not decremented;
297 * caller must explicitly pset_deallocate.
298 */
299 void
300 pset_remove_task(
301 processor_set_t pset,
302 task_t task)
303 {
304 if (pset != task->processor_set)
305 return;
306
307 queue_remove(&pset->tasks, task, task_t, pset_tasks);
308 task->processor_set = PROCESSOR_SET_NULL;
309 pset->task_count--;
310 }
311
312 /*
313 * pset_add_task() adds a task to a processor_set.
314 * Caller must hold locks on pset and task. Pset references to
315 * tasks are implicit.
316 */
317 void
318 pset_add_task(
319 processor_set_t pset,
320 task_t task)
321 {
322 queue_enter(&pset->tasks, task, task_t, pset_tasks);
323 task->processor_set = pset;
324 pset->task_count++;
325 pset_reference_locked(pset);
326 }
327
328 /*
329 * pset_remove_thread() removes a thread from a processor_set.
330 * Caller must hold locks on pset and thread (but only if thread
331 * has outstanding references that could be used to lookup the pset).
332 * The pset reference count is not decremented; caller must explicitly
333 * pset_deallocate.
334 */
335 void
336 pset_remove_thread(
337 processor_set_t pset,
338 thread_t thread)
339 {
340 queue_remove(&pset->threads, thread, thread_t, pset_threads);
341 thread->processor_set = PROCESSOR_SET_NULL;
342 pset->thread_count--;
343 }
344
345 /*
346 * pset_add_thread() adds a thread to a processor_set.
347 * Caller must hold locks on pset and thread. Pset references to
348 * threads are implicit.
349 */
350 void
351 pset_add_thread(
352 processor_set_t pset,
353 thread_t thread)
354 {
355 queue_enter(&pset->threads, thread, thread_t, pset_threads);
356 thread->processor_set = pset;
357 pset->thread_count++;
358 pset_reference_locked(pset);
359 }
360
361 /*
362 * thread_change_psets() changes the pset of a thread. Caller must
363 * hold locks on both psets and thread. The old pset must be
364 * explicitly pset_deallocat()'ed by caller.
365 */
366 void
367 thread_change_psets(
368 thread_t thread,
369 processor_set_t old_pset,
370 processor_set_t new_pset)
371 {
372 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
373 old_pset->thread_count--;
374 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
375 thread->processor_set = new_pset;
376 new_pset->thread_count++;
377 pset_reference_locked(new_pset);
378 }
379
380
381 kern_return_t
382 processor_info_count(
383 processor_flavor_t flavor,
384 mach_msg_type_number_t *count)
385 {
386 kern_return_t kr;
387
388 switch (flavor) {
389 case PROCESSOR_BASIC_INFO:
390 *count = PROCESSOR_BASIC_INFO_COUNT;
391 return KERN_SUCCESS;
392 case PROCESSOR_CPU_LOAD_INFO:
393 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
394 return KERN_SUCCESS;
395 default:
396 kr = cpu_info_count(flavor, count);
397 return kr;
398 }
399 }
400
401
402 kern_return_t
403 processor_info(
404 register processor_t processor,
405 processor_flavor_t flavor,
406 host_t *host,
407 processor_info_t info,
408 mach_msg_type_number_t *count)
409 {
410 register int i, slot_num, state;
411 register processor_basic_info_t basic_info;
412 register processor_cpu_load_info_t cpu_load_info;
413 kern_return_t kr;
414
415 if (processor == PROCESSOR_NULL)
416 return(KERN_INVALID_ARGUMENT);
417
418 slot_num = processor->slot_num;
419
420 switch (flavor) {
421
422 case PROCESSOR_BASIC_INFO:
423 {
424 if (*count < PROCESSOR_BASIC_INFO_COUNT)
425 return(KERN_FAILURE);
426
427 basic_info = (processor_basic_info_t) info;
428 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
429 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
430 state = processor->state;
431 if (state == PROCESSOR_OFF_LINE)
432 basic_info->running = FALSE;
433 else
434 basic_info->running = TRUE;
435 basic_info->slot_num = slot_num;
436 if (processor == master_processor)
437 basic_info->is_master = TRUE;
438 else
439 basic_info->is_master = FALSE;
440
441 *count = PROCESSOR_BASIC_INFO_COUNT;
442 *host = &realhost;
443 return(KERN_SUCCESS);
444 }
445 case PROCESSOR_CPU_LOAD_INFO:
446 {
447 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
448 return(KERN_FAILURE);
449
450 cpu_load_info = (processor_cpu_load_info_t) info;
451 for (i=0;i<CPU_STATE_MAX;i++)
452 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
453
454 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
455 *host = &realhost;
456 return(KERN_SUCCESS);
457 }
458 default:
459 {
460 kr=cpu_info(flavor, slot_num, info, count);
461 if (kr == KERN_SUCCESS)
462 *host = &realhost;
463 return(kr);
464 }
465 }
466 }
467
468 kern_return_t
469 processor_start(
470 processor_t processor)
471 {
472 int state;
473 spl_t s;
474 kern_return_t kr;
475
476 if (processor == PROCESSOR_NULL)
477 return(KERN_INVALID_ARGUMENT);
478
479 if (processor == master_processor) {
480 thread_bind(current_thread(), processor);
481 thread_block(THREAD_CONTINUE_NULL);
482 kr = cpu_start(processor->slot_num);
483 thread_bind(current_thread(), PROCESSOR_NULL);
484
485 return(kr);
486 }
487
488 s = splsched();
489 processor_lock(processor);
490
491 state = processor->state;
492 if (state != PROCESSOR_OFF_LINE) {
493 processor_unlock(processor);
494 splx(s);
495 return(KERN_FAILURE);
496 }
497 processor->state = PROCESSOR_START;
498 processor_unlock(processor);
499 splx(s);
500
501 if (processor->next_thread == THREAD_NULL) {
502 thread_t thread;
503 extern void start_cpu_thread(void);
504
505 thread = kernel_thread_with_priority(
506 kernel_task, MAXPRI_KERNEL,
507 start_cpu_thread, TRUE, FALSE);
508
509 s = splsched();
510 thread_lock(thread);
511 thread_bind_locked(thread, processor);
512 thread_go_locked(thread, THREAD_AWAKENED);
513 (void)rem_runq(thread);
514 processor->next_thread = thread;
515 thread_unlock(thread);
516 splx(s);
517 }
518
519 kr = cpu_start(processor->slot_num);
520
521 if (kr != KERN_SUCCESS) {
522 s = splsched();
523 processor_lock(processor);
524 processor->state = PROCESSOR_OFF_LINE;
525 processor_unlock(processor);
526 splx(s);
527 }
528
529 return(kr);
530 }
531
532 kern_return_t
533 processor_exit(
534 processor_t processor)
535 {
536 if (processor == PROCESSOR_NULL)
537 return(KERN_INVALID_ARGUMENT);
538
539 return(processor_shutdown(processor));
540 }
541
542 kern_return_t
543 processor_control(
544 processor_t processor,
545 processor_info_t info,
546 mach_msg_type_number_t count)
547 {
548 if (processor == PROCESSOR_NULL)
549 return(KERN_INVALID_ARGUMENT);
550
551 return(cpu_control(processor->slot_num, info, count));
552 }
553
554 /*
555 * Precalculate the appropriate timesharing quanta based on load. The
556 * index into machine_quanta is the number of threads on the
557 * processor set queue. It is limited to the number of processors in
558 * the set.
559 */
560
561 void
562 pset_quanta_set(
563 processor_set_t pset)
564 {
565 register int i, count = pset->processor_count;
566
567 for (i = 1; i <= count; i++)
568 pset->machine_quanta[i] = (count + (i / 2)) / i;
569
570 pset->machine_quanta[0] = pset->machine_quanta[1];
571
572 pset_quanta_update(pset);
573 }
574
575 kern_return_t
576 processor_set_create(
577 host_t host,
578 processor_set_t *new_set,
579 processor_set_t *new_name)
580 {
581 #ifdef lint
582 host++; new_set++; new_name++;
583 #endif /* lint */
584 return(KERN_FAILURE);
585 }
586
587 kern_return_t
588 processor_set_destroy(
589 processor_set_t pset)
590 {
591 #ifdef lint
592 pset++;
593 #endif /* lint */
594 return(KERN_FAILURE);
595 }
596
597 kern_return_t
598 processor_get_assignment(
599 processor_t processor,
600 processor_set_t *pset)
601 {
602 int state;
603
604 state = processor->state;
605 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
606 return(KERN_FAILURE);
607
608 *pset = processor->processor_set;
609 pset_reference(*pset);
610 return(KERN_SUCCESS);
611 }
612
613 kern_return_t
614 processor_set_info(
615 processor_set_t pset,
616 int flavor,
617 host_t *host,
618 processor_set_info_t info,
619 mach_msg_type_number_t *count)
620 {
621 if (pset == PROCESSOR_SET_NULL)
622 return(KERN_INVALID_ARGUMENT);
623
624 if (flavor == PROCESSOR_SET_BASIC_INFO) {
625 register processor_set_basic_info_t basic_info;
626
627 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
628 return(KERN_FAILURE);
629
630 basic_info = (processor_set_basic_info_t) info;
631 basic_info->processor_count = pset->processor_count;
632 basic_info->default_policy = POLICY_TIMESHARE;
633
634 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
635 *host = &realhost;
636 return(KERN_SUCCESS);
637 }
638 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
639 register policy_timeshare_base_t ts_base;
640
641 if (*count < POLICY_TIMESHARE_BASE_COUNT)
642 return(KERN_FAILURE);
643
644 ts_base = (policy_timeshare_base_t) info;
645 ts_base->base_priority = BASEPRI_DEFAULT;
646
647 *count = POLICY_TIMESHARE_BASE_COUNT;
648 *host = &realhost;
649 return(KERN_SUCCESS);
650 }
651 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
652 register policy_fifo_base_t fifo_base;
653
654 if (*count < POLICY_FIFO_BASE_COUNT)
655 return(KERN_FAILURE);
656
657 fifo_base = (policy_fifo_base_t) info;
658 fifo_base->base_priority = BASEPRI_DEFAULT;
659
660 *count = POLICY_FIFO_BASE_COUNT;
661 *host = &realhost;
662 return(KERN_SUCCESS);
663 }
664 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
665 register policy_rr_base_t rr_base;
666
667 if (*count < POLICY_RR_BASE_COUNT)
668 return(KERN_FAILURE);
669
670 rr_base = (policy_rr_base_t) info;
671 rr_base->base_priority = BASEPRI_DEFAULT;
672 rr_base->quantum = 1;
673
674 *count = POLICY_RR_BASE_COUNT;
675 *host = &realhost;
676 return(KERN_SUCCESS);
677 }
678 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
679 register policy_timeshare_limit_t ts_limit;
680
681 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
682 return(KERN_FAILURE);
683
684 ts_limit = (policy_timeshare_limit_t) info;
685 ts_limit->max_priority = MAXPRI_STANDARD;
686
687 *count = POLICY_TIMESHARE_LIMIT_COUNT;
688 *host = &realhost;
689 return(KERN_SUCCESS);
690 }
691 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
692 register policy_fifo_limit_t fifo_limit;
693
694 if (*count < POLICY_FIFO_LIMIT_COUNT)
695 return(KERN_FAILURE);
696
697 fifo_limit = (policy_fifo_limit_t) info;
698 fifo_limit->max_priority = MAXPRI_STANDARD;
699
700 *count = POLICY_FIFO_LIMIT_COUNT;
701 *host = &realhost;
702 return(KERN_SUCCESS);
703 }
704 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
705 register policy_rr_limit_t rr_limit;
706
707 if (*count < POLICY_RR_LIMIT_COUNT)
708 return(KERN_FAILURE);
709
710 rr_limit = (policy_rr_limit_t) info;
711 rr_limit->max_priority = MAXPRI_STANDARD;
712
713 *count = POLICY_RR_LIMIT_COUNT;
714 *host = &realhost;
715 return(KERN_SUCCESS);
716 }
717 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
718 register int *enabled;
719
720 if (*count < (sizeof(*enabled)/sizeof(int)))
721 return(KERN_FAILURE);
722
723 enabled = (int *) info;
724 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
725
726 *count = sizeof(*enabled)/sizeof(int);
727 *host = &realhost;
728 return(KERN_SUCCESS);
729 }
730
731
732 *host = HOST_NULL;
733 return(KERN_INVALID_ARGUMENT);
734 }
735
736 /*
737 * processor_set_statistics
738 *
739 * Returns scheduling statistics for a processor set.
740 */
741 kern_return_t
742 processor_set_statistics(
743 processor_set_t pset,
744 int flavor,
745 processor_set_info_t info,
746 mach_msg_type_number_t *count)
747 {
748 if (pset == PROCESSOR_SET_NULL)
749 return (KERN_INVALID_PROCESSOR_SET);
750
751 if (flavor == PROCESSOR_SET_LOAD_INFO) {
752 register processor_set_load_info_t load_info;
753
754 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
755 return(KERN_FAILURE);
756
757 load_info = (processor_set_load_info_t) info;
758
759 pset_lock(pset);
760 load_info->task_count = pset->task_count;
761 load_info->thread_count = pset->thread_count;
762 load_info->mach_factor = pset->mach_factor;
763 load_info->load_average = pset->load_average;
764 pset_unlock(pset);
765
766 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
767 return(KERN_SUCCESS);
768 }
769
770 return(KERN_INVALID_ARGUMENT);
771 }
772
773 /*
774 * processor_set_max_priority:
775 *
776 * Specify max priority permitted on processor set. This affects
777 * newly created and assigned threads. Optionally change existing
778 * ones.
779 */
780 kern_return_t
781 processor_set_max_priority(
782 processor_set_t pset,
783 int max_priority,
784 boolean_t change_threads)
785 {
786 return (KERN_INVALID_ARGUMENT);
787 }
788
789 /*
790 * processor_set_policy_enable:
791 *
792 * Allow indicated policy on processor set.
793 */
794
795 kern_return_t
796 processor_set_policy_enable(
797 processor_set_t pset,
798 int policy)
799 {
800 return (KERN_INVALID_ARGUMENT);
801 }
802
803 /*
804 * processor_set_policy_disable:
805 *
806 * Forbid indicated policy on processor set. Time sharing cannot
807 * be forbidden.
808 */
809 kern_return_t
810 processor_set_policy_disable(
811 processor_set_t pset,
812 int policy,
813 boolean_t change_threads)
814 {
815 return (KERN_INVALID_ARGUMENT);
816 }
817
818 #define THING_TASK 0
819 #define THING_THREAD 1
820
821 /*
822 * processor_set_things:
823 *
824 * Common internals for processor_set_{threads,tasks}
825 */
826 kern_return_t
827 processor_set_things(
828 processor_set_t pset,
829 mach_port_t **thing_list,
830 mach_msg_type_number_t *count,
831 int type)
832 {
833 unsigned int actual; /* this many things */
834 int i;
835
836 vm_size_t size, size_needed;
837 vm_offset_t addr;
838
839 if (pset == PROCESSOR_SET_NULL)
840 return KERN_INVALID_ARGUMENT;
841
842 size = 0; addr = 0;
843
844 for (;;) {
845 pset_lock(pset);
846 if (!pset->active) {
847 pset_unlock(pset);
848 return KERN_FAILURE;
849 }
850
851 if (type == THING_TASK)
852 actual = pset->task_count;
853 else
854 actual = pset->thread_count;
855
856 /* do we have the memory we need? */
857
858 size_needed = actual * sizeof(mach_port_t);
859 if (size_needed <= size)
860 break;
861
862 /* unlock the pset and allocate more memory */
863 pset_unlock(pset);
864
865 if (size != 0)
866 kfree(addr, size);
867
868 assert(size_needed > 0);
869 size = size_needed;
870
871 addr = kalloc(size);
872 if (addr == 0)
873 return KERN_RESOURCE_SHORTAGE;
874 }
875
876 /* OK, have memory and the processor_set is locked & active */
877
878 switch (type) {
879 case THING_TASK: {
880 task_t *tasks = (task_t *) addr;
881 task_t task;
882
883 for (i = 0, task = (task_t) queue_first(&pset->tasks);
884 !queue_end(&pset->tasks, (queue_entry_t) task);
885 task = (task_t) queue_next(&task->pset_tasks)) {
886
887 task_lock(task);
888 if (task->ref_count > 0) {
889 /* take ref for convert_task_to_port */
890 task_reference_locked(task);
891 tasks[i++] = task;
892 }
893 task_unlock(task);
894 }
895 break;
896 }
897
898 case THING_THREAD: {
899 thread_act_t *thr_acts = (thread_act_t *) addr;
900 thread_t thread;
901 thread_act_t thr_act;
902
903 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
904 !queue_end(&pset->threads, (queue_entry_t)thread);
905 thread = (thread_t) queue_next(&thread->pset_threads)) {
906
907 thr_act = thread_lock_act(thread);
908 if (thr_act && thr_act->ref_count > 0) {
909 /* take ref for convert_act_to_port */
910 act_locked_act_reference(thr_act);
911 thr_acts[i++] = thr_act;
912 }
913 thread_unlock_act(thread);
914 }
915 break;
916 }
917 }
918
919 /* can unlock processor set now that we have the task/thread refs */
920 pset_unlock(pset);
921
922 if (i < actual) {
923 actual = i;
924 size_needed = actual * sizeof(mach_port_t);
925 }
926 assert(i == actual);
927
928 if (actual == 0) {
929 /* no things, so return null pointer and deallocate memory */
930 *thing_list = 0;
931 *count = 0;
932
933 if (size != 0)
934 kfree(addr, size);
935 } else {
936 /* if we allocated too much, must copy */
937
938 if (size_needed < size) {
939 vm_offset_t newaddr;
940
941 newaddr = kalloc(size_needed);
942 if (newaddr == 0) {
943 switch (type) {
944 case THING_TASK: {
945 task_t *tasks = (task_t *) addr;
946
947 for (i = 0; i < actual; i++)
948 task_deallocate(tasks[i]);
949 break;
950 }
951
952 case THING_THREAD: {
953 thread_act_t *acts = (thread_act_t *) addr;
954
955 for (i = 0; i < actual; i++)
956 act_deallocate(acts[i]);
957 break;
958 }
959 }
960 kfree(addr, size);
961 return KERN_RESOURCE_SHORTAGE;
962 }
963
964 bcopy((char *) addr, (char *) newaddr, size_needed);
965 kfree(addr, size);
966 addr = newaddr;
967 }
968
969 *thing_list = (mach_port_t *) addr;
970 *count = actual;
971
972 /* do the conversion that Mig should handle */
973
974 switch (type) {
975 case THING_TASK: {
976 task_t *tasks = (task_t *) addr;
977
978 for (i = 0; i < actual; i++)
979 (*thing_list)[i] = convert_task_to_port(tasks[i]);
980 break;
981 }
982
983 case THING_THREAD: {
984 thread_act_t *thr_acts = (thread_act_t *) addr;
985
986 for (i = 0; i < actual; i++)
987 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
988 break;
989 }
990 }
991 }
992
993 return(KERN_SUCCESS);
994 }
995
996
997 /*
998 * processor_set_tasks:
999 *
1000 * List all tasks in the processor set.
1001 */
1002 kern_return_t
1003 processor_set_tasks(
1004 processor_set_t pset,
1005 task_array_t *task_list,
1006 mach_msg_type_number_t *count)
1007 {
1008 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1009 }
1010
1011 /*
1012 * processor_set_threads:
1013 *
1014 * List all threads in the processor set.
1015 */
1016 kern_return_t
1017 processor_set_threads(
1018 processor_set_t pset,
1019 thread_array_t *thread_list,
1020 mach_msg_type_number_t *count)
1021 {
1022 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1023 }
1024
1025 /*
1026 * processor_set_base:
1027 *
1028 * Specify per-policy base priority for a processor set. Set processor
1029 * set default policy to the given policy. This affects newly created
1030 * and assigned threads. Optionally change existing ones.
1031 */
1032 kern_return_t
1033 processor_set_base(
1034 processor_set_t pset,
1035 policy_t policy,
1036 policy_base_t base,
1037 boolean_t change)
1038 {
1039 return (KERN_INVALID_ARGUMENT);
1040 }
1041
1042 /*
1043 * processor_set_limit:
1044 *
1045 * Specify per-policy limits for a processor set. This affects
1046 * newly created and assigned threads. Optionally change existing
1047 * ones.
1048 */
1049 kern_return_t
1050 processor_set_limit(
1051 processor_set_t pset,
1052 policy_t policy,
1053 policy_limit_t limit,
1054 boolean_t change)
1055 {
1056 return (KERN_POLICY_LIMIT);
1057 }
1058
1059 /*
1060 * processor_set_policy_control
1061 *
1062 * Controls the scheduling attributes governing the processor set.
1063 * Allows control of enabled policies, and per-policy base and limit
1064 * priorities.
1065 */
1066 kern_return_t
1067 processor_set_policy_control(
1068 processor_set_t pset,
1069 int flavor,
1070 processor_set_info_t policy_info,
1071 mach_msg_type_number_t count,
1072 boolean_t change)
1073 {
1074 return (KERN_INVALID_ARGUMENT);
1075 }