]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/processor.c
4e00eef381be2a40151e6fb5b1833673d747b4b4
[apple/xnu.git] / osfmk / kern / processor.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * processor.c: processor and processor_set manipulation routines.
55 */
56
57 #include <cpus.h>
58 #include <mach_host.h>
59
60 #include <mach/boolean.h>
61 #include <mach/policy.h>
62 #include <mach/processor_info.h>
63 #include <mach/vm_param.h>
64 #include <kern/cpu_number.h>
65 #include <kern/host.h>
66 #include <kern/machine.h>
67 #include <kern/misc_protos.h>
68 #include <kern/processor.h>
69 #include <kern/sched.h>
70 #include <kern/task.h>
71 #include <kern/thread.h>
72 #include <kern/ipc_host.h>
73 #include <kern/ipc_tt.h>
74 #include <ipc/ipc_port.h>
75 #include <kern/kalloc.h>
76
77 #if MACH_HOST
78 #include <kern/zalloc.h>
79 zone_t pset_zone;
80 #endif /* MACH_HOST */
81
82 #include <kern/sf.h>
83 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
84
85 /*
86 * Exported interface
87 */
88 #include <mach/mach_host_server.h>
89
90 /*
91 * Exported variables.
92 */
93 struct processor_set default_pset;
94 struct processor processor_array[NCPUS];
95
96 processor_t master_processor;
97 processor_t processor_ptr[NCPUS];
98
99 /* Forwards */
100 void pset_init(
101 processor_set_t pset);
102
103 void processor_init(
104 register processor_t pr,
105 int slot_num);
106
107 void quantum_set(
108 processor_set_t pset);
109
110 kern_return_t processor_set_base(
111 processor_set_t pset,
112 policy_t policy,
113 policy_base_t base,
114 boolean_t change);
115
116 kern_return_t processor_set_limit(
117 processor_set_t pset,
118 policy_t policy,
119 policy_limit_t limit,
120 boolean_t change);
121
122 kern_return_t processor_set_things(
123 processor_set_t pset,
124 mach_port_t **thing_list,
125 mach_msg_type_number_t *count,
126 int type);
127
128
129 /*
130 * Bootstrap the processor/pset system so the scheduler can run.
131 */
132 void
133 pset_sys_bootstrap(void)
134 {
135 register int i;
136
137 pset_init(&default_pset);
138 for (i = 0; i < NCPUS; i++) {
139 /*
140 * Initialize processor data structures.
141 * Note that cpu_to_processor(i) is processor_ptr[i].
142 */
143 processor_ptr[i] = &processor_array[i];
144 processor_init(processor_ptr[i], i);
145 }
146 master_processor = cpu_to_processor(master_cpu);
147 default_pset.active = TRUE;
148 }
149
150 /*
151 * Initialize the given processor_set structure.
152 */
153
154 void pset_init(
155 register processor_set_t pset)
156 {
157 int i;
158
159 /* setup run-queues */
160 simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ);
161 pset->runq.count = 0;
162 for (i = 0; i < NRQBM; i++) {
163 pset->runq.bitmap[i] = 0;
164 }
165 setbit(MAXPRI - IDLEPRI, pset->runq.bitmap);
166 pset->runq.highq = IDLEPRI;
167 for (i = 0; i < NRQS; i++) {
168 queue_init(&(pset->runq.queues[i]));
169 }
170
171 queue_init(&pset->idle_queue);
172 pset->idle_count = 0;
173 simple_lock_init(&pset->idle_lock, ETAP_THREAD_PSET_IDLE);
174 pset->mach_factor = pset->load_average = 0;
175 pset->sched_load = 0;
176 queue_init(&pset->processors);
177 pset->processor_count = 0;
178 simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET);
179 queue_init(&pset->tasks);
180 pset->task_count = 0;
181 queue_init(&pset->threads);
182 pset->thread_count = 0;
183 pset->ref_count = 1;
184 pset->active = FALSE;
185 mutex_init(&pset->lock, ETAP_THREAD_PSET);
186 pset->pset_self = IP_NULL;
187 pset->pset_name_self = IP_NULL;
188 pset->max_priority = MAXPRI_STANDARD;
189 pset->policies = POLICY_TIMESHARE | POLICY_FIFO | POLICY_RR;
190 pset->set_quantum = min_quantum;
191
192 pset->quantum_adj_index = 0;
193 simple_lock_init(&pset->quantum_adj_lock, ETAP_THREAD_PSET_QUANT);
194
195 for (i = 0; i <= NCPUS; i++) {
196 pset->machine_quantum[i] = min_quantum;
197 }
198
199 pset->policy_default = POLICY_TIMESHARE;
200 pset->policy_limit.ts.max_priority = MAXPRI_STANDARD;
201 pset->policy_limit.rr.max_priority = MAXPRI_STANDARD;
202 pset->policy_limit.fifo.max_priority = MAXPRI_STANDARD;
203 pset->policy_base.ts.base_priority = BASEPRI_DEFAULT;
204 pset->policy_base.rr.base_priority = BASEPRI_DEFAULT;
205 pset->policy_base.rr.quantum = min_quantum;
206 pset->policy_base.fifo.base_priority = BASEPRI_DEFAULT;
207 }
208
209 /*
210 * Initialize the given processor structure for the processor in
211 * the slot specified by slot_num.
212 */
213 void
214 processor_init(
215 register processor_t pr,
216 int slot_num)
217 {
218 int i;
219
220 /* setup run-queues */
221 simple_lock_init(&pr->runq.lock, ETAP_THREAD_PROC_RUNQ);
222 pr->runq.count = 0;
223 for (i = 0; i < NRQBM; i++) {
224 pr->runq.bitmap[i] = 0;
225 }
226 setbit(MAXPRI - IDLEPRI, pr->runq.bitmap);
227 pr->runq.highq = IDLEPRI;
228 for (i = 0; i < NRQS; i++) {
229 queue_init(&(pr->runq.queues[i]));
230 }
231
232 queue_init(&pr->processor_queue);
233 pr->state = PROCESSOR_OFF_LINE;
234 pr->next_thread = THREAD_NULL;
235 pr->idle_thread = THREAD_NULL;
236 pr->quantum = 0;
237 pr->first_quantum = FALSE;
238 pr->last_quantum = 0;
239 pr->processor_set = PROCESSOR_SET_NULL;
240 pr->processor_set_next = PROCESSOR_SET_NULL;
241 queue_init(&pr->processors);
242 simple_lock_init(&pr->lock, ETAP_THREAD_PROC);
243 pr->processor_self = IP_NULL;
244 pr->slot_num = slot_num;
245 }
246
247 /*
248 * pset_remove_processor() removes a processor from a processor_set.
249 * It can only be called on the current processor. Caller must
250 * hold lock on current processor and processor set.
251 */
252 void
253 pset_remove_processor(
254 processor_set_t pset,
255 processor_t processor)
256 {
257 if (pset != processor->processor_set)
258 panic("pset_remove_processor: wrong pset");
259
260 queue_remove(&pset->processors, processor, processor_t, processors);
261 processor->processor_set = PROCESSOR_SET_NULL;
262 pset->processor_count--;
263 quantum_set(pset);
264 }
265
266 /*
267 * pset_add_processor() adds a processor to a processor_set.
268 * It can only be called on the current processor. Caller must
269 * hold lock on curent processor and on pset. No reference counting on
270 * processors. Processor reference to pset is implicit.
271 */
272 void
273 pset_add_processor(
274 processor_set_t pset,
275 processor_t processor)
276 {
277 queue_enter(&pset->processors, processor, processor_t, processors);
278 processor->processor_set = pset;
279 pset->processor_count++;
280 quantum_set(pset);
281 }
282
283 /*
284 * pset_remove_task() removes a task from a processor_set.
285 * Caller must hold locks on pset and task. Pset reference count
286 * is not decremented; caller must explicitly pset_deallocate.
287 */
288 void
289 pset_remove_task(
290 processor_set_t pset,
291 task_t task)
292 {
293 if (pset != task->processor_set)
294 return;
295
296 queue_remove(&pset->tasks, task, task_t, pset_tasks);
297 task->processor_set = PROCESSOR_SET_NULL;
298 pset->task_count--;
299 }
300
301 /*
302 * pset_add_task() adds a task to a processor_set.
303 * Caller must hold locks on pset and task. Pset references to
304 * tasks are implicit.
305 */
306 void
307 pset_add_task(
308 processor_set_t pset,
309 task_t task)
310 {
311 queue_enter(&pset->tasks, task, task_t, pset_tasks);
312 task->processor_set = pset;
313 pset->task_count++;
314 pset->ref_count++;
315 }
316
317 /*
318 * pset_remove_thread() removes a thread from a processor_set.
319 * Caller must hold locks on pset and thread. Pset reference count
320 * is not decremented; caller must explicitly pset_deallocate.
321 */
322 void
323 pset_remove_thread(
324 processor_set_t pset,
325 thread_t thread)
326 {
327 queue_remove(&pset->threads, thread, thread_t, pset_threads);
328 thread->processor_set = PROCESSOR_SET_NULL;
329 pset->thread_count--;
330 }
331
332 /*
333 * pset_add_thread() adds a thread to a processor_set.
334 * Caller must hold locks on pset and thread. Pset references to
335 * threads are implicit.
336 */
337 void
338 pset_add_thread(
339 processor_set_t pset,
340 thread_t thread)
341 {
342 queue_enter(&pset->threads, thread, thread_t, pset_threads);
343 thread->processor_set = pset;
344 pset->thread_count++;
345 pset->ref_count++;
346 }
347
348 /*
349 * thread_change_psets() changes the pset of a thread. Caller must
350 * hold locks on both psets and thread. The old pset must be
351 * explicitly pset_deallocat()'ed by caller.
352 */
353 void
354 thread_change_psets(
355 thread_t thread,
356 processor_set_t old_pset,
357 processor_set_t new_pset)
358 {
359 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
360 old_pset->thread_count--;
361 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
362 thread->processor_set = new_pset;
363 new_pset->thread_count++;
364 new_pset->ref_count++;
365 }
366
367 /*
368 * pset_deallocate:
369 *
370 * Remove one reference to the processor set. Destroy processor_set
371 * if this was the last reference.
372 */
373 void
374 pset_deallocate(
375 processor_set_t pset)
376 {
377 if (pset == PROCESSOR_SET_NULL)
378 return;
379
380 pset_lock(pset);
381 if (--pset->ref_count > 0) {
382 pset_unlock(pset);
383 return;
384 }
385
386 panic("pset_deallocate: default_pset destroyed");
387 }
388
389 /*
390 * pset_reference:
391 *
392 * Add one reference to the processor set.
393 */
394 void
395 pset_reference(
396 processor_set_t pset)
397 {
398 pset_lock(pset);
399 pset->ref_count++;
400 pset_unlock(pset);
401 }
402
403
404 kern_return_t
405 processor_info_count(
406 processor_flavor_t flavor,
407 mach_msg_type_number_t *count)
408 {
409 kern_return_t kr;
410
411 switch (flavor) {
412 case PROCESSOR_BASIC_INFO:
413 *count = PROCESSOR_BASIC_INFO_COUNT;
414 return KERN_SUCCESS;
415 case PROCESSOR_CPU_LOAD_INFO:
416 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
417 return KERN_SUCCESS;
418 default:
419 kr = cpu_info_count(flavor, count);
420 return kr;
421 }
422 }
423
424
425 kern_return_t
426 processor_info(
427 register processor_t processor,
428 processor_flavor_t flavor,
429 host_t *host,
430 processor_info_t info,
431 mach_msg_type_number_t *count)
432 {
433 register int i, slot_num, state;
434 register processor_basic_info_t basic_info;
435 register processor_cpu_load_info_t cpu_load_info;
436 kern_return_t kr;
437
438 if (processor == PROCESSOR_NULL)
439 return(KERN_INVALID_ARGUMENT);
440
441 slot_num = processor->slot_num;
442
443 switch (flavor) {
444
445 case PROCESSOR_BASIC_INFO:
446 {
447 if (*count < PROCESSOR_BASIC_INFO_COUNT)
448 return(KERN_FAILURE);
449
450 basic_info = (processor_basic_info_t) info;
451 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
452 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
453 state = processor->state;
454 if (state == PROCESSOR_OFF_LINE)
455 basic_info->running = FALSE;
456 else
457 basic_info->running = TRUE;
458 basic_info->slot_num = slot_num;
459 if (processor == master_processor)
460 basic_info->is_master = TRUE;
461 else
462 basic_info->is_master = FALSE;
463
464 *count = PROCESSOR_BASIC_INFO_COUNT;
465 *host = &realhost;
466 return(KERN_SUCCESS);
467 }
468 case PROCESSOR_CPU_LOAD_INFO:
469 {
470 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
471 return(KERN_FAILURE);
472
473 cpu_load_info = (processor_cpu_load_info_t) info;
474 for (i=0;i<CPU_STATE_MAX;i++)
475 cpu_load_info->cpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i];
476
477 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
478 *host = &realhost;
479 return(KERN_SUCCESS);
480 }
481 default:
482 {
483 kr=cpu_info(flavor, slot_num, info, count);
484 if (kr == KERN_SUCCESS)
485 *host = &realhost;
486 return(kr);
487 }
488 }
489 }
490
491 kern_return_t
492 processor_start(
493 processor_t processor)
494 {
495 int state;
496 spl_t s;
497 kern_return_t kr;
498
499 if (processor == PROCESSOR_NULL)
500 return(KERN_INVALID_ARGUMENT);
501
502 if (processor == master_processor)
503 return(cpu_start(processor->slot_num));
504
505 s = splsched();
506 processor_lock(processor);
507
508 state = processor->state;
509 if (state != PROCESSOR_OFF_LINE) {
510 processor_unlock(processor);
511 splx(s);
512 return(KERN_FAILURE);
513 }
514 processor->state = PROCESSOR_START;
515 processor_unlock(processor);
516 splx(s);
517
518 if (processor->next_thread == THREAD_NULL) {
519 thread_t thread;
520 extern void start_cpu_thread(void);
521
522 thread = kernel_thread_with_priority(kernel_task, MAXPRI_KERNBAND,
523 start_cpu_thread, FALSE);
524
525 s = splsched();
526 thread_lock(thread);
527 thread_bind_locked(thread, processor);
528 thread_go_locked(thread, THREAD_AWAKENED);
529 (void)rem_runq(thread);
530 processor->next_thread = thread;
531 thread_unlock(thread);
532 splx(s);
533 }
534
535 kr = cpu_start(processor->slot_num);
536
537 if (kr != KERN_SUCCESS) {
538 s = splsched();
539 processor_lock(processor);
540 processor->state = PROCESSOR_OFF_LINE;
541 processor_unlock(processor);
542 splx(s);
543 }
544
545 return(kr);
546 }
547
548 kern_return_t
549 processor_exit(
550 processor_t processor)
551 {
552 if (processor == PROCESSOR_NULL)
553 return(KERN_INVALID_ARGUMENT);
554
555 return(processor_shutdown(processor));
556 }
557
558 kern_return_t
559 processor_control(
560 processor_t processor,
561 processor_info_t info,
562 mach_msg_type_number_t count)
563 {
564 if (processor == PROCESSOR_NULL)
565 return(KERN_INVALID_ARGUMENT);
566
567 return(cpu_control(processor->slot_num, info, count));
568 }
569
570 /*
571 * Precalculate the appropriate system quanta based on load. The
572 * index into machine_quantum is the number of threads on the
573 * processor set queue. It is limited to the number of processors in
574 * the set.
575 */
576
577 void
578 quantum_set(
579 processor_set_t pset)
580 {
581 #if NCPUS > 1
582 register int i, ncpus;
583
584 ncpus = pset->processor_count;
585
586 for (i=1; i <= ncpus; i++)
587 pset->machine_quantum[i] = ((min_quantum * ncpus) + (i / 2)) / i ;
588
589 pset->machine_quantum[0] = pset->machine_quantum[1];
590
591 i = (pset->runq.count > ncpus) ? ncpus : pset->runq.count;
592 pset->set_quantum = pset->machine_quantum[i];
593 #else /* NCPUS > 1 */
594 default_pset.set_quantum = min_quantum;
595 #endif /* NCPUS > 1 */
596 }
597
598 kern_return_t
599 processor_set_create(
600 host_t host,
601 processor_set_t *new_set,
602 processor_set_t *new_name)
603 {
604 #ifdef lint
605 host++; new_set++; new_name++;
606 #endif /* lint */
607 return(KERN_FAILURE);
608 }
609
610 kern_return_t
611 processor_set_destroy(
612 processor_set_t pset)
613 {
614 #ifdef lint
615 pset++;
616 #endif /* lint */
617 return(KERN_FAILURE);
618 }
619
620 kern_return_t
621 processor_get_assignment(
622 processor_t processor,
623 processor_set_t *pset)
624 {
625 int state;
626
627 state = processor->state;
628 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
629 return(KERN_FAILURE);
630
631 *pset = processor->processor_set;
632 pset_reference(*pset);
633 return(KERN_SUCCESS);
634 }
635
636 kern_return_t
637 processor_set_info(
638 processor_set_t pset,
639 int flavor,
640 host_t *host,
641 processor_set_info_t info,
642 mach_msg_type_number_t *count)
643 {
644 if (pset == PROCESSOR_SET_NULL)
645 return(KERN_INVALID_ARGUMENT);
646
647 if (flavor == PROCESSOR_SET_BASIC_INFO) {
648 register processor_set_basic_info_t basic_info;
649
650 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
651 return(KERN_FAILURE);
652
653 basic_info = (processor_set_basic_info_t) info;
654
655 pset_lock(pset);
656 simple_lock(&pset->processors_lock);
657 basic_info->processor_count = pset->processor_count;
658 simple_unlock(&pset->processors_lock);
659 basic_info->default_policy = pset->policy_default;
660 pset_unlock(pset);
661
662 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
663 *host = &realhost;
664 return(KERN_SUCCESS);
665 }
666 else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
667 register policy_timeshare_base_t ts_base;
668
669 if (*count < POLICY_TIMESHARE_BASE_COUNT)
670 return(KERN_FAILURE);
671
672 ts_base = (policy_timeshare_base_t) info;
673
674 pset_lock(pset);
675 *ts_base = pset->policy_base.ts;
676 pset_unlock(pset);
677
678 *count = POLICY_TIMESHARE_BASE_COUNT;
679 *host = &realhost;
680 return(KERN_SUCCESS);
681 }
682 else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
683 register policy_fifo_base_t fifo_base;
684
685 if (*count < POLICY_FIFO_BASE_COUNT)
686 return(KERN_FAILURE);
687
688 fifo_base = (policy_fifo_base_t) info;
689
690 pset_lock(pset);
691 *fifo_base = pset->policy_base.fifo;
692 pset_unlock(pset);
693
694 *count = POLICY_FIFO_BASE_COUNT;
695 *host = &realhost;
696 return(KERN_SUCCESS);
697 }
698 else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
699 register policy_rr_base_t rr_base;
700
701 if (*count < POLICY_RR_BASE_COUNT)
702 return(KERN_FAILURE);
703
704 rr_base = (policy_rr_base_t) info;
705
706 pset_lock(pset);
707 *rr_base = pset->policy_base.rr;
708 pset_unlock(pset);
709
710 *count = POLICY_RR_BASE_COUNT;
711 *host = &realhost;
712 return(KERN_SUCCESS);
713 }
714 else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
715 register policy_timeshare_limit_t ts_limit;
716
717 if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
718 return(KERN_FAILURE);
719
720 ts_limit = (policy_timeshare_limit_t) info;
721
722 pset_lock(pset);
723 *ts_limit = pset->policy_limit.ts;
724 pset_unlock(pset);
725
726 *count = POLICY_TIMESHARE_LIMIT_COUNT;
727 *host = &realhost;
728 return(KERN_SUCCESS);
729 }
730 else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
731 register policy_fifo_limit_t fifo_limit;
732
733 if (*count < POLICY_FIFO_LIMIT_COUNT)
734 return(KERN_FAILURE);
735
736 fifo_limit = (policy_fifo_limit_t) info;
737
738 pset_lock(pset);
739 *fifo_limit = pset->policy_limit.fifo;
740 pset_unlock(pset);
741
742 *count = POLICY_FIFO_LIMIT_COUNT;
743 *host = &realhost;
744 return(KERN_SUCCESS);
745 }
746 else if (flavor == PROCESSOR_SET_RR_LIMITS) {
747 register policy_rr_limit_t rr_limit;
748
749 if (*count < POLICY_RR_LIMIT_COUNT)
750 return(KERN_FAILURE);
751
752 rr_limit = (policy_rr_limit_t) info;
753
754 pset_lock(pset);
755 *rr_limit = pset->policy_limit.rr;
756 pset_unlock(pset);
757
758 *count = POLICY_RR_LIMIT_COUNT;
759 *host = &realhost;
760 return(KERN_SUCCESS);
761 }
762 else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
763 register int *enabled;
764
765 if (*count < (sizeof(*enabled)/sizeof(int)))
766 return(KERN_FAILURE);
767
768 enabled = (int *) info;
769
770 pset_lock(pset);
771 *enabled = pset->policies;
772 pset_unlock(pset);
773
774 *count = sizeof(*enabled)/sizeof(int);
775 *host = &realhost;
776 return(KERN_SUCCESS);
777 }
778
779
780 *host = HOST_NULL;
781 return(KERN_INVALID_ARGUMENT);
782 }
783
784 /*
785 * processor_set_statistics
786 *
787 * Returns scheduling statistics for a processor set.
788 */
789 kern_return_t
790 processor_set_statistics(
791 processor_set_t pset,
792 int flavor,
793 processor_set_info_t info,
794 mach_msg_type_number_t *count)
795 {
796 if (pset == PROCESSOR_SET_NULL)
797 return (KERN_INVALID_PROCESSOR_SET);
798
799 if (flavor == PROCESSOR_SET_LOAD_INFO) {
800 register processor_set_load_info_t load_info;
801
802 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
803 return(KERN_FAILURE);
804
805 load_info = (processor_set_load_info_t) info;
806
807 pset_lock(pset);
808 load_info->task_count = pset->task_count;
809 load_info->thread_count = pset->thread_count;
810 simple_lock(&pset->processors_lock);
811 load_info->mach_factor = pset->mach_factor;
812 load_info->load_average = pset->load_average;
813 simple_unlock(&pset->processors_lock);
814 pset_unlock(pset);
815
816 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
817 return(KERN_SUCCESS);
818 }
819
820 return(KERN_INVALID_ARGUMENT);
821 }
822
823 /*
824 * processor_set_max_priority:
825 *
826 * Specify max priority permitted on processor set. This affects
827 * newly created and assigned threads. Optionally change existing
828 * ones.
829 */
830 kern_return_t
831 processor_set_max_priority(
832 processor_set_t pset,
833 int max_priority,
834 boolean_t change_threads)
835 {
836 return (KERN_INVALID_ARGUMENT);
837 }
838
839 /*
840 * processor_set_policy_enable:
841 *
842 * Allow indicated policy on processor set.
843 */
844
845 kern_return_t
846 processor_set_policy_enable(
847 processor_set_t pset,
848 int policy)
849 {
850 return (KERN_INVALID_ARGUMENT);
851 }
852
853 /*
854 * processor_set_policy_disable:
855 *
856 * Forbid indicated policy on processor set. Time sharing cannot
857 * be forbidden.
858 */
859 kern_return_t
860 processor_set_policy_disable(
861 processor_set_t pset,
862 int policy,
863 boolean_t change_threads)
864 {
865 return (KERN_INVALID_ARGUMENT);
866 }
867
868 #define THING_TASK 0
869 #define THING_THREAD 1
870
871 /*
872 * processor_set_things:
873 *
874 * Common internals for processor_set_{threads,tasks}
875 */
876 kern_return_t
877 processor_set_things(
878 processor_set_t pset,
879 mach_port_t **thing_list,
880 mach_msg_type_number_t *count,
881 int type)
882 {
883 unsigned int actual; /* this many things */
884 int i;
885
886 vm_size_t size, size_needed;
887 vm_offset_t addr;
888
889 if (pset == PROCESSOR_SET_NULL)
890 return KERN_INVALID_ARGUMENT;
891
892 size = 0; addr = 0;
893
894 for (;;) {
895 pset_lock(pset);
896 if (!pset->active) {
897 pset_unlock(pset);
898 return KERN_FAILURE;
899 }
900
901 if (type == THING_TASK)
902 actual = pset->task_count;
903 else
904 actual = pset->thread_count;
905
906 /* do we have the memory we need? */
907
908 size_needed = actual * sizeof(mach_port_t);
909 if (size_needed <= size)
910 break;
911
912 /* unlock the pset and allocate more memory */
913 pset_unlock(pset);
914
915 if (size != 0)
916 kfree(addr, size);
917
918 assert(size_needed > 0);
919 size = size_needed;
920
921 addr = kalloc(size);
922 if (addr == 0)
923 return KERN_RESOURCE_SHORTAGE;
924 }
925
926 /* OK, have memory and the processor_set is locked & active */
927
928 switch (type) {
929 case THING_TASK: {
930 task_t *tasks = (task_t *) addr;
931 task_t task;
932
933 for (i = 0, task = (task_t) queue_first(&pset->tasks);
934 i < actual;
935 i++, task = (task_t) queue_next(&task->pset_tasks)) {
936 /* take ref for convert_task_to_port */
937 task_reference(task);
938 tasks[i] = task;
939 }
940 assert(queue_end(&pset->tasks, (queue_entry_t) task));
941 break;
942 }
943
944 case THING_THREAD: {
945 thread_act_t *thr_acts = (thread_act_t *) addr;
946 thread_t thread;
947 thread_act_t thr_act;
948 queue_head_t *list;
949
950 list = &pset->threads;
951 thread = (thread_t) queue_first(list);
952 i = 0;
953 while (i < actual && !queue_end(list, (queue_entry_t)thread)) {
954 thr_act = thread_lock_act(thread);
955 if (thr_act && thr_act->ref_count > 0) {
956 /* take ref for convert_act_to_port */
957 act_locked_act_reference(thr_act);
958 thr_acts[i] = thr_act;
959 i++;
960 }
961 thread_unlock_act(thread);
962 thread = (thread_t) queue_next(&thread->pset_threads);
963 }
964 if (i < actual) {
965 actual = i;
966 size_needed = actual * sizeof(mach_port_t);
967 }
968 break;
969 }
970 }
971
972 /* can unlock processor set now that we have the task/thread refs */
973 pset_unlock(pset);
974
975 if (actual == 0) {
976 /* no things, so return null pointer and deallocate memory */
977 *thing_list = 0;
978 *count = 0;
979
980 if (size != 0)
981 kfree(addr, size);
982 } else {
983 /* if we allocated too much, must copy */
984
985 if (size_needed < size) {
986 vm_offset_t newaddr;
987
988 newaddr = kalloc(size_needed);
989 if (newaddr == 0) {
990 switch (type) {
991 case THING_TASK: {
992 task_t *tasks = (task_t *) addr;
993
994 for (i = 0; i < actual; i++)
995 task_deallocate(tasks[i]);
996 break;
997 }
998
999 case THING_THREAD: {
1000 thread_t *threads = (thread_t *) addr;
1001
1002 for (i = 0; i < actual; i++)
1003 thread_deallocate(threads[i]);
1004 break;
1005 }
1006 }
1007 kfree(addr, size);
1008 return KERN_RESOURCE_SHORTAGE;
1009 }
1010
1011 bcopy((char *) addr, (char *) newaddr, size_needed);
1012 kfree(addr, size);
1013 addr = newaddr;
1014 }
1015
1016 *thing_list = (mach_port_t *) addr;
1017 *count = actual;
1018
1019 /* do the conversion that Mig should handle */
1020
1021 switch (type) {
1022 case THING_TASK: {
1023 task_t *tasks = (task_t *) addr;
1024
1025 for (i = 0; i < actual; i++)
1026 (*thing_list)[i] = convert_task_to_port(tasks[i]);
1027 break;
1028 }
1029
1030 case THING_THREAD: {
1031 thread_act_t *thr_acts = (thread_act_t *) addr;
1032
1033 for (i = 0; i < actual; i++)
1034 (*thing_list)[i] = convert_act_to_port(thr_acts[i]);
1035 break;
1036 }
1037 }
1038 }
1039
1040 return(KERN_SUCCESS);
1041 }
1042
1043
1044 /*
1045 * processor_set_tasks:
1046 *
1047 * List all tasks in the processor set.
1048 */
1049 kern_return_t
1050 processor_set_tasks(
1051 processor_set_t pset,
1052 task_array_t *task_list,
1053 mach_msg_type_number_t *count)
1054 {
1055 return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
1056 }
1057
1058 /*
1059 * processor_set_threads:
1060 *
1061 * List all threads in the processor set.
1062 */
1063 kern_return_t
1064 processor_set_threads(
1065 processor_set_t pset,
1066 thread_array_t *thread_list,
1067 mach_msg_type_number_t *count)
1068 {
1069 return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1070 }
1071
1072 /*
1073 * processor_set_base:
1074 *
1075 * Specify per-policy base priority for a processor set. Set processor
1076 * set default policy to the given policy. This affects newly created
1077 * and assigned threads. Optionally change existing ones.
1078 */
1079 kern_return_t
1080 processor_set_base(
1081 processor_set_t pset,
1082 policy_t policy,
1083 policy_base_t base,
1084 boolean_t change)
1085 {
1086 return (KERN_INVALID_ARGUMENT);
1087 }
1088
1089 /*
1090 * processor_set_limit:
1091 *
1092 * Specify per-policy limits for a processor set. This affects
1093 * newly created and assigned threads. Optionally change existing
1094 * ones.
1095 */
1096 kern_return_t
1097 processor_set_limit(
1098 processor_set_t pset,
1099 policy_t policy,
1100 policy_limit_t limit,
1101 boolean_t change)
1102 {
1103 return (KERN_POLICY_LIMIT);
1104 }
1105
1106 /*
1107 * processor_set_policy_control
1108 *
1109 * Controls the scheduling attributes governing the processor set.
1110 * Allows control of enabled policies, and per-policy base and limit
1111 * priorities.
1112 */
1113 kern_return_t
1114 processor_set_policy_control(
1115 processor_set_t pset,
1116 int flavor,
1117 processor_set_info_t policy_info,
1118 mach_msg_type_number_t count,
1119 boolean_t change)
1120 {
1121 return (KERN_INVALID_ARGUMENT);
1122 }