X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0b4e3aa066abc0728aacb4bbeb86f53f9737156e..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/osfmk/kern/processor.c?ds=sidebyside diff --git a/osfmk/kern/processor.c b/osfmk/kern/processor.c index a37913b15..6a1b9e459 100644 --- a/osfmk/kern/processor.c +++ b/osfmk/kern/processor.c @@ -84,6 +84,8 @@ struct processor_set default_pset; struct processor processor_array[NCPUS]; +int master_cpu = 0; + processor_t master_processor; processor_t processor_ptr[NCPUS]; @@ -95,7 +97,7 @@ void processor_init( register processor_t pr, int slot_num); -void pset_quanta_set( +void pset_quanta_setup( processor_set_t pset); kern_return_t processor_set_base( @@ -126,6 +128,7 @@ pset_sys_bootstrap(void) register int i; pset_init(&default_pset); + for (i = 0; i < NCPUS; i++) { /* * Initialize processor data structures. @@ -134,7 +137,9 @@ pset_sys_bootstrap(void) processor_ptr[i] = &processor_array[i]; processor_init(processor_ptr[i], i); } + master_processor = cpu_to_processor(master_cpu); + default_pset.active = TRUE; } @@ -145,28 +150,26 @@ pset_sys_bootstrap(void) void pset_init( register processor_set_t pset) { - int i; + register int i; - /* setup run-queues */ - simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ); - pset->runq.count = 0; - for (i = 0; i < NRQBM; i++) { + /* setup run queue */ + pset->runq.highq = IDLEPRI; + for (i = 0; i < NRQBM; i++) pset->runq.bitmap[i] = 0; - } setbit(MAXPRI - IDLEPRI, pset->runq.bitmap); - pset->runq.highq = IDLEPRI; - for (i = 0; i < NRQS; i++) { - queue_init(&(pset->runq.queues[i])); - } + pset->runq.urgency = pset->runq.count = 0; + for (i = 0; i < NRQS; i++) + queue_init(&pset->runq.queues[i]); queue_init(&pset->idle_queue); pset->idle_count = 0; - simple_lock_init(&pset->idle_lock, ETAP_THREAD_PSET_IDLE); + queue_init(&pset->active_queue); + simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE); + pset->run_count = pset->share_count = 0; pset->mach_factor = pset->load_average = 0; pset->sched_load = 0; queue_init(&pset->processors); pset->processor_count = 0; - simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET); queue_init(&pset->tasks); pset->task_count = 0; queue_init(&pset->threads); @@ -176,10 +179,10 @@ void pset_init( mutex_init(&pset->lock, ETAP_THREAD_PSET); pset->pset_self = IP_NULL; pset->pset_name_self = IP_NULL; - pset->set_quanta = 1; + pset->timeshare_quanta = 1; for (i = 0; i <= NCPUS; i++) - pset->machine_quanta[i] = 1; + pset->quantum_factors[i] = 1; } /* @@ -188,37 +191,63 @@ void pset_init( */ void processor_init( - register processor_t pr, - int slot_num) + register processor_t p, + int slot_num) { - int i; + register int i; - /* setup run-queues */ - simple_lock_init(&pr->runq.lock, ETAP_THREAD_PROC_RUNQ); - pr->runq.count = 0; - for (i = 0; i < NRQBM; i++) { - pr->runq.bitmap[i] = 0; - } - setbit(MAXPRI - IDLEPRI, pr->runq.bitmap); - pr->runq.highq = IDLEPRI; - for (i = 0; i < NRQS; i++) { - queue_init(&(pr->runq.queues[i])); - } + /* setup run queue */ + p->runq.highq = IDLEPRI; + for (i = 0; i < NRQBM; i++) + p->runq.bitmap[i] = 0; + setbit(MAXPRI - IDLEPRI, p->runq.bitmap); + p->runq.urgency = p->runq.count = 0; + for (i = 0; i < NRQS; i++) + queue_init(&p->runq.queues[i]); + + p->state = PROCESSOR_OFF_LINE; + p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL; + p->processor_set = PROCESSOR_SET_NULL; + p->current_pri = MINPRI; + timer_call_setup(&p->quantum_timer, thread_quantum_expire, p); + p->timeslice = 0; + p->deadline = UINT64_MAX; + simple_lock_init(&p->lock, ETAP_THREAD_PROC); + p->processor_self = IP_NULL; + p->slot_num = slot_num; +} + +/* + * pset_deallocate: + * + * Remove one reference to the processor set. Destroy processor_set + * if this was the last reference. + */ +void +pset_deallocate( + processor_set_t pset) +{ + if (pset == PROCESSOR_SET_NULL) + return; - queue_init(&pr->processor_queue); - pr->state = PROCESSOR_OFF_LINE; - pr->next_thread = THREAD_NULL; - pr->idle_thread = THREAD_NULL; - timer_call_setup(&pr->quantum_timer, thread_quantum_expire, pr); - pr->slice_quanta = 0; - pr->processor_set = PROCESSOR_SET_NULL; - pr->processor_set_next = PROCESSOR_SET_NULL; - queue_init(&pr->processors); - simple_lock_init(&pr->lock, ETAP_THREAD_PROC); - pr->processor_self = IP_NULL; - pr->slot_num = slot_num; + assert(pset == &default_pset); + return; } +/* + * pset_reference: + * + * Add one reference to the processor set. + */ +void +pset_reference( + processor_set_t pset) +{ + assert(pset == &default_pset); +} + +#define pset_reference_locked(pset) assert(pset == &default_pset) + /* * pset_remove_processor() removes a processor from a processor_set. * It can only be called on the current processor. Caller must @@ -235,7 +264,7 @@ pset_remove_processor( queue_remove(&pset->processors, processor, processor_t, processors); processor->processor_set = PROCESSOR_SET_NULL; pset->processor_count--; - pset_quanta_set(pset); + pset_quanta_setup(pset); } /* @@ -252,13 +281,15 @@ pset_add_processor( queue_enter(&pset->processors, processor, processor_t, processors); processor->processor_set = pset; pset->processor_count++; - pset_quanta_set(pset); + pset_quanta_setup(pset); } /* * pset_remove_task() removes a task from a processor_set. - * Caller must hold locks on pset and task. Pset reference count - * is not decremented; caller must explicitly pset_deallocate. + * Caller must hold locks on pset and task (unless task has + * no references left, in which case just the pset lock is + * needed). Pset reference count is not decremented; + * caller must explicitly pset_deallocate. */ void pset_remove_task( @@ -286,13 +317,15 @@ pset_add_task( queue_enter(&pset->tasks, task, task_t, pset_tasks); task->processor_set = pset; pset->task_count++; - pset->ref_count++; + pset_reference_locked(pset); } /* * pset_remove_thread() removes a thread from a processor_set. - * Caller must hold locks on pset and thread. Pset reference count - * is not decremented; caller must explicitly pset_deallocate. + * Caller must hold locks on pset and thread (but only if thread + * has outstanding references that could be used to lookup the pset). + * The pset reference count is not decremented; caller must explicitly + * pset_deallocate. */ void pset_remove_thread( @@ -317,7 +350,7 @@ pset_add_thread( queue_enter(&pset->threads, thread, thread_t, pset_threads); thread->processor_set = pset; pset->thread_count++; - pset->ref_count++; + pset_reference_locked(pset); } /* @@ -336,45 +369,9 @@ thread_change_psets( queue_enter(&new_pset->threads, thread, thread_t, pset_threads); thread->processor_set = new_pset; new_pset->thread_count++; - new_pset->ref_count++; + pset_reference_locked(new_pset); } -/* - * pset_deallocate: - * - * Remove one reference to the processor set. Destroy processor_set - * if this was the last reference. - */ -void -pset_deallocate( - processor_set_t pset) -{ - if (pset == PROCESSOR_SET_NULL) - return; - - pset_lock(pset); - if (--pset->ref_count > 0) { - pset_unlock(pset); - return; - } - - panic("pset_deallocate: default_pset destroyed"); -} - -/* - * pset_reference: - * - * Add one reference to the processor set. - */ -void -pset_reference( - processor_set_t pset) -{ - pset_lock(pset); - pset->ref_count++; - pset_unlock(pset); -} - kern_return_t processor_info_count( @@ -467,31 +464,34 @@ kern_return_t processor_start( processor_t processor) { - int state; - spl_t s; - kern_return_t kr; + kern_return_t result; + spl_t s; if (processor == PROCESSOR_NULL) return(KERN_INVALID_ARGUMENT); if (processor == master_processor) { - thread_bind(current_thread(), processor); - thread_block((void (*)(void)) 0); - kr = cpu_start(processor->slot_num); - thread_bind(current_thread(), PROCESSOR_NULL); + processor_t prev; - return(kr); + prev = thread_bind(current_thread(), processor); + thread_block(THREAD_CONTINUE_NULL); + + result = cpu_start(processor->slot_num); + + thread_bind(current_thread(), prev); + + return (result); } s = splsched(); processor_lock(processor); - - state = processor->state; - if (state != PROCESSOR_OFF_LINE) { + if (processor->state != PROCESSOR_OFF_LINE) { processor_unlock(processor); splx(s); - return(KERN_FAILURE); + + return (KERN_FAILURE); } + processor->state = PROCESSOR_START; processor_unlock(processor); splx(s); @@ -500,31 +500,35 @@ processor_start( thread_t thread; extern void start_cpu_thread(void); - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); + thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL); s = splsched(); thread_lock(thread); - thread_bind_locked(thread, processor); - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); + thread->bound_processor = processor; processor->next_thread = thread; + thread->state = TH_RUN; + pset_run_incr(thread->processor_set); thread_unlock(thread); splx(s); } - kr = cpu_start(processor->slot_num); + if (processor->processor_self == IP_NULL) + ipc_processor_init(processor); - if (kr != KERN_SUCCESS) { + result = cpu_start(processor->slot_num); + if (result != KERN_SUCCESS) { s = splsched(); processor_lock(processor); processor->state = PROCESSOR_OFF_LINE; processor_unlock(processor); splx(s); + + return (result); } - return(kr); + ipc_processor_enable(processor); + + return (KERN_SUCCESS); } kern_return_t @@ -551,26 +555,23 @@ processor_control( /* * Precalculate the appropriate timesharing quanta based on load. The - * index into machine_quanta is the number of threads on the + * index into quantum_factors[] is the number of threads on the * processor set queue. It is limited to the number of processors in * the set. */ void -pset_quanta_set( +pset_quanta_setup( processor_set_t pset) { - register int i, ncpus; - - ncpus = pset->processor_count; + register int i, count = pset->processor_count; - for (i=1; i <= ncpus; i++) - pset->machine_quanta[i] = (ncpus + (i / 2)) / i; + for (i = 1; i <= count; i++) + pset->quantum_factors[i] = (count + (i / 2)) / i; - pset->machine_quanta[0] = pset->machine_quanta[1]; + pset->quantum_factors[0] = pset->quantum_factors[1]; - i = (pset->runq.count > ncpus)? ncpus: pset->runq.count; - pset->set_quanta = pset->machine_quanta[i]; + timeshare_quanta_update(pset); } kern_return_t @@ -760,10 +761,8 @@ processor_set_statistics( pset_lock(pset); load_info->task_count = pset->task_count; load_info->thread_count = pset->thread_count; - simple_lock(&pset->processors_lock); load_info->mach_factor = pset->mach_factor; load_info->load_average = pset->load_average; - simple_unlock(&pset->processors_lock); pset_unlock(pset); *count = PROCESSOR_SET_LOAD_INFO_COUNT; @@ -884,13 +883,17 @@ processor_set_things( task_t task; for (i = 0, task = (task_t) queue_first(&pset->tasks); - i < actual; - i++, task = (task_t) queue_next(&task->pset_tasks)) { - /* take ref for convert_task_to_port */ - task_reference(task); - tasks[i] = task; + !queue_end(&pset->tasks, (queue_entry_t) task); + task = (task_t) queue_next(&task->pset_tasks)) { + + task_lock(task); + if (task->ref_count > 0) { + /* take ref for convert_task_to_port */ + task_reference_locked(task); + tasks[i++] = task; + } + task_unlock(task); } - assert(queue_end(&pset->tasks, (queue_entry_t) task)); break; } @@ -898,33 +901,32 @@ processor_set_things( thread_act_t *thr_acts = (thread_act_t *) addr; thread_t thread; thread_act_t thr_act; - queue_head_t *list; - list = &pset->threads; - thread = (thread_t) queue_first(list); - i = 0; - while (i < actual && !queue_end(list, (queue_entry_t)thread)) { + for (i = 0, thread = (thread_t) queue_first(&pset->threads); + !queue_end(&pset->threads, (queue_entry_t)thread); + thread = (thread_t) queue_next(&thread->pset_threads)) { + thr_act = thread_lock_act(thread); - if (thr_act && thr_act->ref_count > 0) { + if (thr_act && thr_act->act_ref_count > 0) { /* take ref for convert_act_to_port */ - act_locked_act_reference(thr_act); - thr_acts[i] = thr_act; - i++; + act_reference_locked(thr_act); + thr_acts[i++] = thr_act; } thread_unlock_act(thread); - thread = (thread_t) queue_next(&thread->pset_threads); - } - if (i < actual) { - actual = i; - size_needed = actual * sizeof(mach_port_t); } break; - } + } } - + /* can unlock processor set now that we have the task/thread refs */ pset_unlock(pset); + if (i < actual) { + actual = i; + size_needed = actual * sizeof(mach_port_t); + } + assert(i == actual); + if (actual == 0) { /* no things, so return null pointer and deallocate memory */ *thing_list = 0; @@ -950,10 +952,10 @@ processor_set_things( } case THING_THREAD: { - thread_t *threads = (thread_t *) addr; + thread_act_t *acts = (thread_act_t *) addr; for (i = 0; i < actual; i++) - thread_deallocate(threads[i]); + act_deallocate(acts[i]); break; } }