X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..d9a64523371fa019c4575bb400cbbc3a50ac9903:/osfmk/i386/mp.h diff --git a/osfmk/i386/mp.h b/osfmk/i386/mp.h index 30a6d941b..705f41c18 100644 --- a/osfmk/i386/mp.h +++ b/osfmk/i386/mp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -58,82 +58,36 @@ */ #ifdef KERNEL_PRIVATE -#ifndef _I386AT_MP_H_ -#define _I386AT_MP_H_ +#ifndef _I386_MP_H_ +#define _I386_MP_H_ -#ifndef DEBUG -#include -#endif //#define MP_DEBUG 1 #include #include -#define LAPIC_ID_MAX (LAPIC_ID_MASK) - -#define MAX_CPUS (LAPIC_ID_MAX + 1) +#define MAX_CPUS 64 /* 8 * sizeof(cpumask_t) */ #ifndef ASSEMBLER +#include #include #include #include +#include +#include +#include __BEGIN_DECLS extern kern_return_t intel_startCPU(int slot_num); +extern kern_return_t intel_startCPU_fast(int slot_num); extern void i386_init_slave(void); +extern void i386_init_slave_fast(void); extern void smp_init(void); extern void cpu_interrupt(int cpu); - -extern void lapic_init(void); -extern void lapic_shutdown(void); -extern void lapic_smm_restore(void); -extern boolean_t lapic_probe(void); -extern void lapic_dump(void); -extern int lapic_interrupt(int interrupt, void *state); -extern void lapic_end_of_interrupt(void); -extern int lapic_to_cpu[]; -extern int cpu_to_lapic[]; -extern int lapic_interrupt_base; -extern void lapic_cpu_map(int lapic, int cpu_num); - -extern void lapic_set_timer( - boolean_t interrupt, - lapic_timer_mode_t mode, - lapic_timer_divide_t divisor, - lapic_timer_count_t initial_count); - -extern void lapic_get_timer( - lapic_timer_mode_t *mode, - lapic_timer_divide_t *divisor, - lapic_timer_count_t *initial_count, - lapic_timer_count_t *current_count); - -typedef void (*i386_intr_func_t)(void *); -extern void lapic_set_timer_func(i386_intr_func_t func); -extern void lapic_set_pmi_func(i386_intr_func_t func); - __END_DECLS -#endif /* ASSEMBLER */ - -#define CPU_NUMBER(r) \ - movl %gs:CPU_NUMBER_GS,r - -#define CPU_NUMBER_FROM_LAPIC(r) \ - movl EXT(lapic_id),r; \ - movl 0(r),r; \ - shrl $(LAPIC_ID_SHIFT),r; \ - andl $(LAPIC_ID_MASK),r; \ - movl EXT(lapic_to_cpu)(,r,4),r - - -/* word describing the reason for the interrupt, one per cpu */ - -#ifndef ASSEMBLER -#include - extern unsigned int real_ncpus; /* real number of cpus */ extern unsigned int max_ncpus; /* max number of cpus */ decl_simple_lock_data(extern,kdb_lock) /* kdb lock */ @@ -146,20 +100,114 @@ extern void console_cpu_free(void *console_buf); extern int kdb_cpu; /* current cpu running kdb */ extern int kdb_debug; -extern int kdb_is_slave[]; extern int kdb_active[]; extern volatile boolean_t mp_kdp_trap; -extern void mp_kdp_enter(void); -extern void mp_kdp_exit(void); +extern volatile boolean_t mp_kdp_is_NMI; +extern volatile boolean_t force_immediate_debugger_NMI; +extern volatile boolean_t pmap_tlb_flush_timeout; +extern volatile usimple_lock_t spinlock_timed_out; +extern volatile uint32_t spinlock_owner_cpu; +extern uint32_t spinlock_timeout_NMI(uintptr_t thread_addr); + +extern uint64_t LastDebuggerEntryAllowance; + +extern void mp_kdp_enter(boolean_t proceed_on_failure); +extern void mp_kdp_exit(void); +extern boolean_t mp_kdp_all_cpus_halted(void); + +extern boolean_t mp_recent_debugger_activity(void); +extern void kernel_spin(uint64_t spin_ns); /* * All cpu rendezvous: */ -extern void mp_rendezvous(void (*setup_func)(void *), - void (*action_func)(void *), - void (*teardown_func)(void *), - void *arg); +extern void mp_rendezvous( + void (*setup_func)(void *), + void (*action_func)(void *), + void (*teardown_func)(void *), + void *arg); +extern void mp_rendezvous_no_intrs( + void (*action_func)(void *), + void *arg); +extern void mp_rendezvous_break_lock(void); +extern void mp_rendezvous_lock(void); +extern void mp_rendezvous_unlock(void); + +/* + * All cpu broadcast. + * Called from thread context, this blocks until all active cpus have + * run action_func: + */ +extern void mp_broadcast( + void (*action_func)(void *), + void *arg); +#if MACH_KDP +typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu); + +extern long kdp_x86_xcpu_invoke(const uint16_t lcpu, + kdp_x86_xcpu_func_t func, + void *arg0, void *arg1); +typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t; +#endif + +typedef uint32_t cpu_t; +typedef volatile uint64_t cpumask_t; +static inline cpumask_t +cpu_to_cpumask(cpu_t cpu) +{ + return (cpu < MAX_CPUS) ? (1ULL << cpu) : 0; +} +#define CPUMASK_ALL 0xffffffffffffffffULL +#define CPUMASK_SELF cpu_to_cpumask(cpu_number()) +#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF) + +/* Initialation routing called at processor registration */ +extern void mp_cpus_call_cpu_init(int cpu); + +/* + * Invoke a function (possibly NULL) on a set of cpus specified by a mask. + * The mask may include the local cpu. + * If the mode is: + * - ASYNC: other cpus make their calls in parallel + * - SYNC: the calls are performed serially in logical cpu order + * - NOSYNC: the calls are queued + * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been + * called on all specified cpus. + * The return value is the number of cpus where the call was made or queued. + * The action function is called with interrupts disabled. + */ +extern cpu_t mp_cpus_call( + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *), + void *arg); +extern cpu_t mp_cpus_call1( + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *, void*), + void *arg0, + void *arg1, + cpumask_t *cpus_calledp); + +typedef enum { + NONE = 0, + SPINLOCK_TIMEOUT, + TLB_FLUSH_TIMEOUT, + CROSSCALL_TIMEOUT, + INTERRUPT_WATCHDOG +} NMI_reason_t; +extern void NMIPI_panic(cpumask_t cpus, NMI_reason_t reason); + +/* Interrupt a set of cpus, forcing an exit out of non-root mode */ +extern void mp_cpus_kick(cpumask_t cpus); +/* + * Power-management-specific SPI to: + * - register a callout function, and + * - request the callout (if registered) on a given cpu. + */ +extern void PM_interrupt_register(void (*fn)(void)); +extern void cpu_PM_interrupt(int cpu); __END_DECLS @@ -214,7 +262,7 @@ extern cpu_signal_event_log_t *cpu_handle[]; (vm_offset_t *) hdl_logpp, \ sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\ - bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \ + bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \ } #else /* MP_DEBUG */ #define DBGLOG(log,_cpu,_event) @@ -223,118 +271,18 @@ extern cpu_signal_event_log_t *cpu_handle[]; #endif /* ASSEMBLER */ -#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit))) - - -/* - * Device driver synchronization. - * - * at386_io_lock(op) and at386_io_unlock() are called - * by device drivers when accessing H/W. The underlying - * Processing is machine dependant. But the op argument - * to the at386_io_lock is generic - */ - -#define MP_DEV_OP_MAX 4 -#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */ +#ifdef ASSEMBLER +#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit))) +#else +__attribute__((always_inline)) static inline long +i_bit_impl(long word, long bit) { + long bitmask = 1L << bit; + return word & bitmask; +} +#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit) +#endif -/* - * If the caller specifies an op value different than MP_DEV_WAIT, the - * at386_io_lock function must return true if lock was successful else - * false - */ -#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */ -#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */ -#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */ -#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */ - -#if MACH_RT -#define _DISABLE_PREEMPTION \ - incl %gs:CPU_PREEMPTION_LEVEL - -#define _ENABLE_PREEMPTION \ - decl %gs:CPU_PREEMPTION_LEVEL ; \ - jne 9f ; \ - pushl %eax ; \ - pushl %ecx ; \ - pushl %edx ; \ - call EXT(kernel_preempt_check) ; \ - popl %edx ; \ - popl %ecx ; \ - popl %eax ; \ -9: - -#define _ENABLE_PREEMPTION_NO_CHECK \ - decl %gs:CPU_PREEMPTION_LEVEL - -#if MACH_ASSERT -#define DISABLE_PREEMPTION \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_disable_preemption); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#define ENABLE_PREEMPTION \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_enable_preemption); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#define ENABLE_PREEMPTION_NO_CHECK \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_enable_preemption_no_check); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#define MP_DISABLE_PREEMPTION \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_mp_disable_preemption); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#define MP_ENABLE_PREEMPTION \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_mp_enable_preemption); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#define MP_ENABLE_PREEMPTION_NO_CHECK \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call EXT(_mp_enable_preemption_no_check); \ - popl %edx; \ - popl %ecx; \ - popl %eax -#else /* MACH_ASSERT */ -#define DISABLE_PREEMPTION _DISABLE_PREEMPTION -#define ENABLE_PREEMPTION _ENABLE_PREEMPTION -#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK -#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION -#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION -#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK -#endif /* MACH_ASSERT */ - -#else /* MACH_RT */ -#define DISABLE_PREEMPTION -#define ENABLE_PREEMPTION -#define ENABLE_PREEMPTION_NO_CHECK -#define MP_DISABLE_PREEMPTION -#define MP_ENABLE_PREEMPTION -#define MP_ENABLE_PREEMPTION_NO_CHECK -#endif /* MACH_RT */ - -#endif /* _I386AT_MP_H_ */ +#endif /* _I386_MP_H_ */ #endif /* KERNEL_PRIVATE */