X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/osfmk/kern/machine.c diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index bb76bcf96..30d213539 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -60,129 +63,87 @@ * Support for machine independent machine abstraction. */ -#include - #include + +#include #include #include -#include #include #include #include +#include +#include + +#include #include #include #include #include -#include #include +#include #include #include #include #include #include -#include -#include -#include +#include + +#if HIBERNATION +#include +#endif +#include + +#if CONFIG_DTRACE +extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t); +#endif /* * Exported variables: */ struct machine_info machine_info; -struct machine_slot machine_slot[NCPUS]; - -static queue_head_t processor_action_queue; -static boolean_t processor_action_active; -static thread_call_t processor_action_call; -static thread_call_data_t processor_action_call_data; -decl_simple_lock_data(static,processor_action_lock) - -thread_t machine_wake_thread; /* Forwards */ -processor_set_t processor_request_action( - processor_t processor, - processor_set_t new_pset); - -void processor_doaction( - processor_t processor); - void processor_doshutdown( processor_t processor); /* - * cpu_up: + * processor_up: * - * Flag specified cpu as up and running. Called when a processor comes - * online. + * Flag processor as up and running, and available + * for scheduling. */ void -cpu_up( - int cpu) +processor_up( + processor_t processor) { - processor_t processor = cpu_to_processor(cpu); - processor_set_t pset = &default_pset; - struct machine_slot *ms; - spl_t s; - - /* - * Just twiddle our thumbs; we've got nothing better to do - * yet, anyway. - */ - while (!simple_lock_try(&pset->processors_lock)) - continue; + processor_set_t pset; + spl_t s; s = splsched(); - processor_lock(processor); init_ast_check(processor); - ms = &machine_slot[cpu]; - ms->running = TRUE; - machine_info.avail_cpus++; - pset_add_processor(pset, processor); - simple_lock(&pset->sched_lock); + pset = processor->processor_set; + pset_lock(pset); + ++pset->online_processor_count; enqueue_tail(&pset->active_queue, (queue_entry_t)processor); processor->state = PROCESSOR_RUNNING; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); + (void)hw_atomic_add(&processor_avail_count, 1); + commpage_update_active_cpus(); + pset_unlock(pset); + ml_cpu_up(); splx(s); - simple_unlock(&pset->processors_lock); -} - -/* - * cpu_down: - * - * Flag specified cpu as down. Called when a processor is about to - * go offline. - */ -void -cpu_down( - int cpu) -{ - processor_t processor; - struct machine_slot *ms; - spl_t s; - - processor = cpu_to_processor(cpu); - - s = splsched(); - processor_lock(processor); - ms = &machine_slot[cpu]; - ms->running = FALSE; - machine_info.avail_cpus--; - /* - * processor has already been removed from pset. - */ - processor->processor_set_next = PROCESSOR_SET_NULL; - processor->state = PROCESSOR_OFF_LINE; - processor_unlock(processor); - splx(s); +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); +#endif } +#include kern_return_t host_reboot( - host_priv_t host_priv, + host_priv_t host_priv, int options) { if (host_priv == HOST_PRIV_NULL) @@ -190,289 +151,211 @@ host_reboot( assert(host_priv == &realhost); +#if DEVELOPMENT || DEBUG if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); + return (KERN_SUCCESS); } - else - halt_all_cpus(!(options & HOST_REBOOT_HALT)); - - return (KERN_SUCCESS); -} - -/* - * processor_request_action: - * - * Common internals of processor_assign and processor_shutdown. - * If new_pset is null, this is a shutdown, else it's an assign - * and caller must donate a reference. - * For assign operations, it returns an old pset that must be deallocated - * if it's not NULL. - * For shutdown operations, it always returns PROCESSOR_SET_NULL. - */ -processor_set_t -processor_request_action( - processor_t processor, - processor_set_t new_pset) -{ - processor_set_t pset, old_pset; - - /* - * Processor must be in a processor set. Must lock its idle lock to - * get at processor state. - */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); - - /* - * If the processor is dispatching, let it finish - it will set its - * state to running very soon. - */ - while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { - simple_unlock(&pset->sched_lock); - - simple_lock(&pset->sched_lock); - } - - assert( processor->state == PROCESSOR_IDLE || - processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_ASSIGN ); - - /* - * Now lock the action queue and do the dirty work. - */ - simple_lock(&processor_action_lock); - - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - } - else - if (processor->state == PROCESSOR_RUNNING) - remqueue(&pset->active_queue, (queue_entry_t)processor); - - if (processor->state != PROCESSOR_ASSIGN) - enqueue_tail(&processor_action_queue, (queue_entry_t)processor); - - /* - * And ask the action_thread to do the work. - */ - if (new_pset != PROCESSOR_SET_NULL) { - processor->state = PROCESSOR_ASSIGN; - old_pset = processor->processor_set_next; - processor->processor_set_next = new_pset; - } - else { - processor->state = PROCESSOR_SHUTDOWN; - old_pset = PROCESSOR_SET_NULL; - } - - simple_unlock(&pset->sched_lock); - - if (processor_action_active) { - simple_unlock(&processor_action_lock); - - return (old_pset); - } - - processor_action_active = TRUE; - simple_unlock(&processor_action_lock); +#endif - processor_unlock(processor); + if (options & HOST_REBOOT_UPSDELAY) { + // UPS power cutoff path + PEHaltRestart( kPEUPSDelayHaltCPU ); + } else { + halt_all_cpus(!(options & HOST_REBOOT_HALT)); + } - thread_call_enter(processor_action_call); - processor_lock(processor); - - return (old_pset); + return (KERN_SUCCESS); } kern_return_t processor_assign( - processor_t processor, - processor_set_t new_pset, - boolean_t wait) + __unused processor_t processor, + __unused processor_set_t new_pset, + __unused boolean_t wait) { -#ifdef lint - processor++; new_pset++; wait++; -#endif /* lint */ return (KERN_FAILURE); } -/* - * processor_shutdown() queues a processor up for shutdown. - * Any assignment in progress is overriden. - */ kern_return_t processor_shutdown( - processor_t processor) + processor_t processor) { - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); - processor_lock(processor); - if ( processor->state == PROCESSOR_OFF_LINE || - processor->state == PROCESSOR_SHUTDOWN ) { + pset = processor->processor_set; + pset_lock(pset); + if (processor->state == PROCESSOR_OFF_LINE) { /* - * Already shutdown or being shutdown -- nothing to do. + * Success if already shutdown. */ - processor_unlock(processor); + pset_unlock(pset); splx(s); return (KERN_SUCCESS); } - processor_request_action(processor, PROCESSOR_SET_NULL); - - assert_wait((event_t)processor, THREAD_UNINT); - - processor_unlock(processor); - splx(s); + if (processor->state == PROCESSOR_START) { + /* + * Failure if currently being started. + */ + pset_unlock(pset); + splx(s); - thread_block(THREAD_CONTINUE_NULL); + return (KERN_FAILURE); + } - return (KERN_SUCCESS); -} + /* + * If the processor is dispatching, let it finish. + */ + while (processor->state == PROCESSOR_DISPATCHING) { + pset_unlock(pset); + splx(s); + delay(1); + s = splsched(); + pset_lock(pset); + } -/* - * processor_action() shuts down processors or changes their assignment. - */ -static void -_processor_action( - thread_call_param_t p0, - thread_call_param_t p1) -{ - register processor_t processor; - spl_t s; + /* + * Success if already being shutdown. + */ + if (processor->state == PROCESSOR_SHUTDOWN) { + pset_unlock(pset); + splx(s); - s = splsched(); - simple_lock(&processor_action_lock); + return (KERN_SUCCESS); + } - while (!queue_empty(&processor_action_queue)) { - processor = (processor_t)dequeue_head(&processor_action_queue); - simple_unlock(&processor_action_lock); - splx(s); + if (processor->state == PROCESSOR_IDLE) + remqueue((queue_entry_t)processor); + else + if (processor->state == PROCESSOR_RUNNING) + remqueue((queue_entry_t)processor); - processor_doaction(processor); + processor->state = PROCESSOR_SHUTDOWN; - s = splsched(); - simple_lock(&processor_action_lock); - } + pset_unlock(pset); - processor_action_active = FALSE; - simple_unlock(&processor_action_lock); + processor_doshutdown(processor); splx(s); -} -void -processor_action(void) -{ - queue_init(&processor_action_queue); - simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); - processor_action_active = FALSE; + cpu_exit_wait(processor->cpu_id); - thread_call_setup(&processor_action_call_data, _processor_action, NULL); - processor_action_call = &processor_action_call_data; + return (KERN_SUCCESS); } /* - * processor_doaction actually does the shutdown. The trick here - * is to schedule ourselves onto a cpu and then save our - * context back into the runqs before taking out the cpu. + * Called with interrupts disabled. */ void -processor_doaction( - processor_t processor) +processor_doshutdown( + processor_t processor) { - thread_t self = current_thread(); - processor_set_t pset; - thread_t old_thread; - spl_t s; + thread_t old_thread, self = current_thread(); + processor_t prev; + processor_set_t pset; /* * Get onto the processor to shutdown */ - thread_bind(self, processor); + prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); - pset = processor->processor_set; - simple_lock(&pset->processors_lock); - - if (pset->processor_count == 1) { - thread_t thread; - extern void start_cpu_thread(void); - - simple_unlock(&pset->processors_lock); - - /* - * Create the thread, and point it at the routine. - */ - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); + assert(processor->state == PROCESSOR_SHUTDOWN); - disable_preemption(); +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); +#endif - s = splsched(); - thread_lock(thread); - machine_wake_thread = thread; - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); - thread_unlock(thread); - splx(s); + ml_cpu_down(); - simple_lock(&pset->processors_lock); - enable_preemption(); +#if HIBERNATION + if (processor_avail_count < 2) { + hibernate_vm_lock(); + hibernate_vm_unlock(); } +#endif - s = splsched(); - processor_lock(processor); + pset = processor->processor_set; + pset_lock(pset); + processor->state = PROCESSOR_OFF_LINE; + --pset->online_processor_count; + (void)hw_atomic_sub(&processor_avail_count, 1); + commpage_update_active_cpus(); + SCHED(processor_queue_shutdown)(processor); + /* pset lock dropped */ /* - * Do shutdown, make sure we live when processor dies. + * Continue processor shutdown in shutdown context. + * + * We save the current context in machine_processor_shutdown in such a way + * that when this thread is next invoked it will return from here instead of + * from the machine_switch_context() in thread_invoke like a normal context switch. + * + * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever + * thread invoked back to this one. (Usually, it's another processor's idle thread.) + * + * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync + * with thread_invoke. */ - if (processor->state != PROCESSOR_SHUTDOWN) { - panic("action_thread -- bad processor state"); - } + thread_bind(prev); + old_thread = machine_processor_shutdown(self, processor_offline, processor); - pset_remove_processor(pset, processor); - processor_unlock(processor); - simple_unlock(&pset->processors_lock); - - /* - * Clean up. - */ - thread_bind(self, PROCESSOR_NULL); - self->continuation = 0; - old_thread = switch_to_shutdown_context(self, - processor_doshutdown, processor); - if (processor != current_processor()) - timer_call_shutdown(processor); - thread_dispatch(old_thread); - thread_wakeup((event_t)processor); - splx(s); + thread_dispatch(old_thread, self); } /* - * Actually do the processor shutdown. This is called at splsched, - * running on the processor's shutdown stack. + * Complete the shutdown and place the processor offline. + * + * Called at splsched in the shutdown context. + * This performs a minimal thread_invoke() to the idle thread, + * so it needs to be kept in sync with what thread_invoke() does. + * + * The onlining half of this is done in load_context(). */ - void -processor_doshutdown( - processor_t processor) +processor_offline( + processor_t processor) { - register thread_t old_thread = current_thread(); - register int cpu = processor->slot_num; + assert(processor == current_processor()); + assert(processor->active_thread == current_thread()); - timer_call_cancel(&processor->quantum_timer); - timer_switch(&kernel_timer[cpu]); - thread_machine_set_current(processor->idle_thread); - thread_dispatch(old_thread); + thread_t old_thread = processor->active_thread; + thread_t new_thread = processor->idle_thread; + + processor->active_thread = new_thread; + processor->current_pri = IDLEPRI; + processor->current_thmode = TH_MODE_NONE; + processor->starting_pri = IDLEPRI; + processor->current_sfi_class = SFI_CLASS_KERNEL; + processor->deadline = UINT64_MAX; + new_thread->last_processor = processor; + + uint64_t ctime = mach_absolute_time(); + + processor->last_dispatch = ctime; + old_thread->last_run_time = ctime; + + /* Update processor->thread_timer and ->kernel_timer to point to the new thread */ + thread_timer_event(ctime, &new_thread->system_timer); + PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer; + + timer_stop(PROCESSOR_DATA(processor, current_state), ctime); + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, + old_thread->reason, (uintptr_t)thread_tid(new_thread), + old_thread->sched_pri, new_thread->sched_pri, 0); + + machine_set_current_thread(new_thread); + + thread_dispatch(old_thread, new_thread); + + PMAP_DEACTIVATE_KERNEL(processor->cpu_id); - /* - * OK, now exit this cpu. - */ - PMAP_DEACTIVATE_KERNEL(cpu); - cpu_down(cpu); cpu_sleep(); panic("zombie processor"); /*NOTREACHED*/ @@ -483,11 +366,7 @@ host_get_boot_info( host_priv_t host_priv, kernel_boot_info_t boot_info) { - char *src = ""; - extern char *machine_boot_info( - kernel_boot_info_t boot_info, - vm_size_t buf_len); - + const char *src = ""; if (host_priv == HOST_PRIV_NULL) return (KERN_INVALID_HOST); @@ -503,3 +382,80 @@ host_get_boot_info( return (KERN_SUCCESS); } + +#if CONFIG_DTRACE +#include +#endif + +unsigned long long ml_io_read(uintptr_t vaddr, int size) { + unsigned long long result = 0; + unsigned char s1; + unsigned short s2; + +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timeread = FALSE; +#if DEVELOPMENT || DEBUG + pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphyreaddelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timeread = TRUE; + } +#endif /* x86_64 */ + + switch (size) { + case 1: + s1 = *(volatile unsigned char *)vaddr; + result = s1; + break; + case 2: + s2 = *(volatile unsigned short *)vaddr; + result = s2; + break; + case 4: + result = *(volatile unsigned int *)vaddr; + break; + case 8: + result = *(volatile unsigned long long *)vaddr; + break; + default: + panic("Invalid size %d for ml_io_read(%p)\n", size, (void *)vaddr); + break; + } + +#if defined(__x86_64__) + if (__improbable(timeread == TRUE)) { + eabs = mach_absolute_time(); + (void)ml_set_interrupts_enabled(istate); + + if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { + if (phyreadpanic) { + panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size); +#endif /* CONFIG_DTRACE */ + } + } +#endif /* x86_64 */ + return result; +} + +unsigned int ml_io_read8(uintptr_t vaddr) { + return (unsigned) ml_io_read(vaddr, 1); +} + +unsigned int ml_io_read16(uintptr_t vaddr) { + return (unsigned) ml_io_read(vaddr, 2); +} + +unsigned int ml_io_read32(uintptr_t vaddr) { + return (unsigned) ml_io_read(vaddr, 4); +} + +unsigned long long ml_io_read64(uintptr_t vaddr) { + return ml_io_read(vaddr, 8); +}