X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..2d21ac55c334faf3a56e5634905ed6987fc787d4:/osfmk/kern/machine.c diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index 0ef1b7e90..1e7d3e96f 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -57,129 +63,75 @@ * Support for machine independent machine abstraction. */ -#include - #include + +#include #include #include -#include #include #include #include +#include +#include + +#include #include #include #include #include #include #include +#include #include #include #include #include #include -#include -#include -#include +#if HIBERNATION +#include +#endif +#include /* * Exported variables: */ struct machine_info machine_info; -struct machine_slot machine_slot[NCPUS]; - -static queue_head_t processor_action_queue; -static boolean_t processor_action_active; -static thread_call_t processor_action_call; -static thread_call_data_t processor_action_call_data; -decl_simple_lock_data(static,processor_action_lock) - -thread_t machine_wake_thread; /* Forwards */ -processor_set_t processor_request_action( - processor_t processor, - processor_set_t new_pset); - -void processor_doaction( - processor_t processor); - void processor_doshutdown( processor_t processor); /* - * cpu_up: + * processor_up: * - * Flag specified cpu as up and running. Called when a processor comes - * online. + * Flag processor as up and running, and available + * for scheduling. */ void -cpu_up( - int cpu) +processor_up( + processor_t processor) { - processor_t processor = cpu_to_processor(cpu); - processor_set_t pset = &default_pset; - struct machine_slot *ms; - spl_t s; - - /* - * Just twiddle our thumbs; we've got nothing better to do - * yet, anyway. - */ - while (!simple_lock_try(&pset->processors_lock)) - continue; + processor_set_t pset; + spl_t s; s = splsched(); - processor_lock(processor); init_ast_check(processor); - ms = &machine_slot[cpu]; - ms->running = TRUE; - machine_info.avail_cpus++; - pset_add_processor(pset, processor); - simple_lock(&pset->sched_lock); - enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + pset = processor->processor_set; + pset_lock(pset); + pset->processor_count++; + enqueue_head(&pset->active_queue, (queue_entry_t)processor); processor->state = PROCESSOR_RUNNING; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); - splx(s); - - simple_unlock(&pset->processors_lock); -} - -/* - * cpu_down: - * - * Flag specified cpu as down. Called when a processor is about to - * go offline. - */ -void -cpu_down( - int cpu) -{ - processor_t processor; - struct machine_slot *ms; - spl_t s; - - processor = cpu_to_processor(cpu); - - s = splsched(); - processor_lock(processor); - ms = &machine_slot[cpu]; - ms->running = FALSE; - machine_info.avail_cpus--; - /* - * processor has already been removed from pset. - */ - processor->processor_set_next = PROCESSOR_SET_NULL; - processor->state = PROCESSOR_OFF_LINE; - processor_unlock(processor); + (void)hw_atomic_add(&processor_avail_count, 1); + pset_unlock(pset); + ml_cpu_up(); splx(s); } kern_return_t host_reboot( - host_priv_t host_priv, + host_priv_t host_priv, int options) { if (host_priv == HOST_PRIV_NULL) @@ -189,286 +141,191 @@ host_reboot( if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); + return (KERN_SUCCESS); } - else - halt_all_cpus(!(options & HOST_REBOOT_HALT)); - - return (KERN_SUCCESS); -} - -/* - * processor_request_action: - * - * Common internals of processor_assign and processor_shutdown. - * If new_pset is null, this is a shutdown, else it's an assign - * and caller must donate a reference. - * For assign operations, it returns an old pset that must be deallocated - * if it's not NULL. - * For shutdown operations, it always returns PROCESSOR_SET_NULL. - */ -processor_set_t -processor_request_action( - processor_t processor, - processor_set_t new_pset) -{ - processor_set_t pset, old_pset; - - /* - * Processor must be in a processor set. Must lock its idle lock to - * get at processor state. - */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); - - /* - * If the processor is dispatching, let it finish - it will set its - * state to running very soon. - */ - while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { - simple_unlock(&pset->sched_lock); - - simple_lock(&pset->sched_lock); - } - - assert( processor->state == PROCESSOR_IDLE || - processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_ASSIGN ); - - /* - * Now lock the action queue and do the dirty work. - */ - simple_lock(&processor_action_lock); - - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - } - else - if (processor->state == PROCESSOR_RUNNING) - remqueue(&pset->active_queue, (queue_entry_t)processor); - - if (processor->state != PROCESSOR_ASSIGN) - enqueue_tail(&processor_action_queue, (queue_entry_t)processor); - - /* - * And ask the action_thread to do the work. - */ - if (new_pset != PROCESSOR_SET_NULL) { - processor->state = PROCESSOR_ASSIGN; - old_pset = processor->processor_set_next; - processor->processor_set_next = new_pset; - } - else { - processor->state = PROCESSOR_SHUTDOWN; - old_pset = PROCESSOR_SET_NULL; - } - - simple_unlock(&pset->sched_lock); - - if (processor_action_active) { - simple_unlock(&processor_action_lock); - - return (old_pset); - } - - processor_action_active = TRUE; - simple_unlock(&processor_action_lock); - processor_unlock(processor); + if (options & HOST_REBOOT_UPSDELAY) { + // UPS power cutoff path + PEHaltRestart( kPEUPSDelayHaltCPU ); + } else { + halt_all_cpus(!(options & HOST_REBOOT_HALT)); + } - thread_call_enter(processor_action_call); - processor_lock(processor); - - return (old_pset); + return (KERN_SUCCESS); } kern_return_t processor_assign( - processor_t processor, - processor_set_t new_pset, - boolean_t wait) + __unused processor_t processor, + __unused processor_set_t new_pset, + __unused boolean_t wait) { -#ifdef lint - processor++; new_pset++; wait++; -#endif /* lint */ return (KERN_FAILURE); } -/* - * processor_shutdown() queues a processor up for shutdown. - * Any assignment in progress is overriden. - */ kern_return_t processor_shutdown( - processor_t processor) + processor_t processor) { - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); - processor_lock(processor); - if ( processor->state == PROCESSOR_OFF_LINE || - processor->state == PROCESSOR_SHUTDOWN ) { + pset = processor->processor_set; + pset_lock(pset); + if (processor->state == PROCESSOR_OFF_LINE) { /* - * Already shutdown or being shutdown -- nothing to do. + * Success if already shutdown. */ - processor_unlock(processor); + pset_unlock(pset); splx(s); return (KERN_SUCCESS); } - processor_request_action(processor, PROCESSOR_SET_NULL); - - assert_wait((event_t)processor, THREAD_UNINT); - - processor_unlock(processor); - splx(s); - - thread_block(THREAD_CONTINUE_NULL); - - return (KERN_SUCCESS); -} + if (processor->state == PROCESSOR_START) { + /* + * Failure if currently being started. + */ + pset_unlock(pset); + splx(s); -/* - * processor_action() shuts down processors or changes their assignment. - */ -static void -_processor_action( - thread_call_param_t p0, - thread_call_param_t p1) -{ - register processor_t processor; - spl_t s; + return (KERN_FAILURE); + } - s = splsched(); - simple_lock(&processor_action_lock); + /* + * If the processor is dispatching, let it finish. + */ + while (processor->state == PROCESSOR_DISPATCHING) { + pset_unlock(pset); + delay(1); + pset_lock(pset); + } - while (!queue_empty(&processor_action_queue)) { - processor = (processor_t)dequeue_head(&processor_action_queue); - simple_unlock(&processor_action_lock); + /* + * Success if already being shutdown. + */ + if (processor->state == PROCESSOR_SHUTDOWN) { + pset_unlock(pset); splx(s); - processor_doaction(processor); + return (KERN_SUCCESS); + } - s = splsched(); - simple_lock(&processor_action_lock); + if (processor->state == PROCESSOR_IDLE) { + remqueue(&pset->idle_queue, (queue_entry_t)processor); + pset->idle_count--; } + else + if (processor->state == PROCESSOR_RUNNING) + remqueue(&pset->active_queue, (queue_entry_t)processor); + else + panic("processor_shutdown"); + + processor->state = PROCESSOR_SHUTDOWN; - processor_action_active = FALSE; - simple_unlock(&processor_action_lock); + pset_unlock(pset); + + processor_doshutdown(processor); splx(s); -} -void -processor_action(void) -{ - queue_init(&processor_action_queue); - simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); - processor_action_active = FALSE; + cpu_exit_wait(PROCESSOR_DATA(processor, slot_num)); - thread_call_setup(&processor_action_call_data, _processor_action, NULL); - processor_action_call = &processor_action_call_data; + return (KERN_SUCCESS); } /* - * processor_doaction actually does the shutdown. The trick here - * is to schedule ourselves onto a cpu and then save our - * context back into the runqs before taking out the cpu. + * Called at splsched. */ void -processor_doaction( - processor_t processor) +processor_doshutdown( + processor_t processor) { - thread_t self = current_thread(); - processor_set_t pset; - thread_t old_thread; - spl_t s; + thread_t old_thread, self = current_thread(); + processor_t prev; /* * Get onto the processor to shutdown */ - thread_bind(self, processor); + prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); - pset = processor->processor_set; - simple_lock(&pset->processors_lock); +#if HIBERNATION + if (processor_avail_count < 2) + hibernate_vm_lock(); +#endif - if (pset->processor_count == 1) { - thread_t thread; - extern void start_cpu_thread(void); + assert(processor->state == PROCESSOR_SHUTDOWN); - simple_unlock(&pset->processors_lock); - - /* - * Create the thread, and point it at the routine. - */ - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); - - disable_preemption(); - - s = splsched(); - thread_lock(thread); - machine_wake_thread = thread; - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); - thread_unlock(thread); - splx(s); - - simple_lock(&pset->processors_lock); - enable_preemption(); - } - - s = splsched(); - processor_lock(processor); +#if HIBERNATION + if (processor_avail_count < 2) + hibernate_vm_unlock(); +#endif /* - * Do shutdown, make sure we live when processor dies. + * Continue processor shutdown in shutdown context. */ - if (processor->state != PROCESSOR_SHUTDOWN) { - panic("action_thread -- bad processor state"); - } + thread_bind(prev); + old_thread = machine_processor_shutdown(self, processor_offline, processor); - pset_remove_processor(pset, processor); - processor_unlock(processor); - simple_unlock(&pset->processors_lock); + thread_dispatch(old_thread, self); /* - * Clean up. + * If we just shutdown another processor, move any + * threads and timer call outs to the current processor. */ - thread_bind(self, PROCESSOR_NULL); - self->continuation = 0; - old_thread = switch_to_shutdown_context(self, - processor_doshutdown, processor); - if (processor != current_processor()) - timer_call_shutdown(processor); - thread_dispatch(old_thread); - thread_wakeup((event_t)processor); - splx(s); + if (processor != current_processor()) { + processor_set_t pset = processor->processor_set; + + pset_lock(pset); + + if (processor->state == PROCESSOR_OFF_LINE || processor->state == PROCESSOR_SHUTDOWN) { + timer_call_shutdown(processor); + processor_queue_shutdown(processor); + return; + } + + pset_unlock(pset); + } } /* - * Actually do the processor shutdown. This is called at splsched, - * running on the processor's shutdown stack. + * Complete the shutdown and place the processor offline. + * + * Called at splsched in the shutdown context. */ - void -processor_doshutdown( - processor_t processor) +processor_offline( + processor_t processor) { - register int cpu = processor->slot_num; + thread_t new_thread, old_thread = processor->active_thread; + processor_set_t pset; - timer_call_cancel(&processor->quantum_timer); - thread_dispatch(current_thread()); - timer_switch(&kernel_timer[cpu]); + new_thread = processor->idle_thread; + processor->active_thread = new_thread; + processor->current_pri = IDLEPRI; + processor->deadline = UINT64_MAX; + new_thread->last_processor = processor; + + processor->last_dispatch = mach_absolute_time(); + timer_stop(PROCESSOR_DATA(processor, thread_timer), processor->last_dispatch); + + machine_set_current_thread(new_thread); + + thread_dispatch(old_thread, new_thread); + + PMAP_DEACTIVATE_KERNEL(PROCESSOR_DATA(processor, slot_num)); + + pset = processor->processor_set; + pset_lock(pset); + pset->processor_count--; + processor->state = PROCESSOR_OFF_LINE; + if (processor == pset->low_hint) + pset->low_hint = PROCESSOR_NULL; + (void)hw_atomic_sub(&processor_avail_count, 1); + pset_unlock(pset); + ml_cpu_down(); - /* - * OK, now exit this cpu. - */ - PMAP_DEACTIVATE_KERNEL(cpu); - thread_machine_set_current(processor->idle_thread); - cpu_down(cpu); cpu_sleep(); panic("zombie processor"); /*NOTREACHED*/ @@ -479,11 +336,7 @@ host_get_boot_info( host_priv_t host_priv, kernel_boot_info_t boot_info) { - char *src = ""; - extern char *machine_boot_info( - kernel_boot_info_t boot_info, - vm_size_t buf_len); - + const char *src = ""; if (host_priv == HOST_PRIV_NULL) return (KERN_INVALID_HOST);