/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#ifdef KERNEL_PRIVATE
-#ifndef _I386AT_MP_H_
-#define _I386AT_MP_H_
+#ifndef _I386_MP_H_
+#define _I386_MP_H_
-#ifndef DEBUG
-#include <debug.h>
-#endif
//#define MP_DEBUG 1
#include <i386/apic.h>
#include <i386/mp_events.h>
-#define LAPIC_ID_MAX (LAPIC_ID_MASK)
-
-#define MAX_CPUS (LAPIC_ID_MAX + 1)
+#define MAX_CPUS 32 /* (8*sizeof(long)) */
#ifndef ASSEMBLER
+#include <stdint.h>
#include <sys/cdefs.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/i386/thread_status.h>
+#include <mach/vm_types.h>
+#include <kern/simple_lock.h>
__BEGIN_DECLS
extern kern_return_t intel_startCPU(int slot_num);
+extern kern_return_t intel_startCPU_fast(int slot_num);
extern void i386_init_slave(void);
+extern void i386_init_slave_fast(void);
extern void smp_init(void);
extern void cpu_interrupt(int cpu);
-
-extern void lapic_init(void);
-extern void lapic_shutdown(void);
-extern void lapic_smm_restore(void);
-extern boolean_t lapic_probe(void);
-extern void lapic_dump(void);
-extern int lapic_interrupt(int interrupt, x86_saved_state_t *state);
-extern void lapic_end_of_interrupt(void);
-extern int lapic_to_cpu[];
-extern int cpu_to_lapic[];
-extern int lapic_interrupt_base;
-extern void lapic_cpu_map(int lapic, int cpu_num);
-extern uint32_t ml_get_apicid(uint32_t cpu);
-
-extern void lapic_set_timer(
- boolean_t interrupt,
- lapic_timer_mode_t mode,
- lapic_timer_divide_t divisor,
- lapic_timer_count_t initial_count);
-
-extern void lapic_get_timer(
- lapic_timer_mode_t *mode,
- lapic_timer_divide_t *divisor,
- lapic_timer_count_t *initial_count,
- lapic_timer_count_t *current_count);
-
-typedef void (*i386_intr_func_t)(void *);
-extern void lapic_set_timer_func(i386_intr_func_t func);
-extern void lapic_set_pmi_func(i386_intr_func_t func);
-extern void lapic_set_thermal_func(i386_intr_func_t func);
-
__END_DECLS
-/*
- * By default, use high vectors to leave vector space for systems
- * with multiple I/O APIC's. However some systems that boot with
- * local APIC disabled will hang in SMM when vectors greater than
- * 0x5F are used. Those systems are not expected to have I/O APIC
- * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
- */
-#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
-#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
-/*
- * Specific lapic interrupts are relative to this base
- * in priority order from high to low:
- */
-
-#define LAPIC_PERFCNT_INTERRUPT 0xF
-#define LAPIC_TIMER_INTERRUPT 0xE
-#define LAPIC_INTERPROCESSOR_INTERRUPT 0xD
-#define LAPIC_THERMAL_INTERRUPT 0xC
-#define LAPIC_ERROR_INTERRUPT 0xB
-#define LAPIC_SPURIOUS_INTERRUPT 0xA
-/* The vector field is ignored for NMI interrupts via the LAPIC
- * or otherwise, so this is not an offset from the interrupt
- * base.
- */
-#define LAPIC_NMI_INTERRUPT 0x2
-
-#define LAPIC_REG(reg) \
- (*((volatile uint32_t *)(lapic_start + LAPIC_##reg)))
-#define LAPIC_REG_OFFSET(reg,off) \
- (*((volatile uint32_t *)(lapic_start + LAPIC_##reg + (off))))
-
-#define LAPIC_VECTOR(src) \
- (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
-
-#define LAPIC_ISR_IS_SET(base,src) \
- (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
- (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
-
-extern vm_offset_t lapic_start;
-
-#endif /* ASSEMBLER */
-
-#define CPU_NUMBER(r) \
- movl %gs:CPU_NUMBER_GS,r
-
-#define CPU_NUMBER_FROM_LAPIC(r) \
- movl EXT(lapic_id),r; \
- movl 0(r),r; \
- shrl $(LAPIC_ID_SHIFT),r; \
- andl $(LAPIC_ID_MASK),r; \
- movl EXT(lapic_to_cpu)(,r,4),r
-
-
-/* word describing the reason for the interrupt, one per cpu */
-
-#ifndef ASSEMBLER
-#include <kern/lock.h>
-
extern unsigned int real_ncpus; /* real number of cpus */
extern unsigned int max_ncpus; /* max number of cpus */
decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
extern int kdb_active[];
extern volatile boolean_t mp_kdp_trap;
+extern volatile boolean_t force_immediate_debugger_NMI;
+extern volatile boolean_t pmap_tlb_flush_timeout;
+extern volatile usimple_lock_t spinlock_timed_out;
+extern volatile uint32_t spinlock_owner_cpu;
+extern uint32_t spinlock_timeout_NMI(uintptr_t thread_addr);
+
+extern uint64_t LastDebuggerEntryAllowance;
+
extern void mp_kdp_enter(void);
extern void mp_kdp_exit(void);
-#if MACH_KDB
-extern void mp_kdb_exit(void);
-#endif
+extern boolean_t mp_recent_debugger_activity(void);
/*
* All cpu rendezvous:
void *arg);
extern void mp_rendezvous_break_lock(void);
+/*
+ * All cpu broadcast.
+ * Called from thread context, this blocks until all active cpus have
+ * run action_func:
+ */
+extern void mp_broadcast(
+ void (*action_func)(void *),
+ void *arg);
+#if MACH_KDP
+typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
+
+extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
+ kdp_x86_xcpu_func_t func,
+ void *arg0, void *arg1);
+typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
+#endif
+
+typedef uint32_t cpu_t;
+typedef volatile long cpumask_t;
+static inline cpumask_t
+cpu_to_cpumask(cpu_t cpu)
+{
+ return (cpu < 32) ? (1 << cpu) : 0;
+}
+#define CPUMASK_ALL 0xffffffff
+#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
+#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
+
+/* Initialation routing called at processor registration */
+extern void mp_cpus_call_cpu_init(int cpu);
+
+/*
+ * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
+ * The mask may include the local cpu.
+ * If the mode is:
+ * - ASYNC: other cpus make their calls in parallel
+ * - SYNC: the calls are performed serially in logical cpu order
+ * - NOSYNC: the calls are queued
+ * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
+ * called on all specified cpus.
+ * The return value is the number of cpus where the call was made or queued.
+ * The action function is called with interrupts disabled.
+ */
+extern cpu_t mp_cpus_call(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *),
+ void *arg);
+extern cpu_t mp_cpus_call1(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *, void*),
+ void *arg0,
+ void *arg1,
+ cpumask_t *cpus_calledp,
+ cpumask_t *cpus_notcalledp);
+
+extern void mp_cpus_NMIPI(cpumask_t cpus);
+
+/* Interrupt a set of cpus, forcing an exit out of non-root mode */
+extern void mp_cpus_kick(cpumask_t cpus);
+/*
+ * Power-management-specific SPI to:
+ * - register a callout function, and
+ * - request the callout (if registered) on a given cpu.
+ */
+extern void PM_interrupt_register(void (*fn)(void));
+extern void cpu_PM_interrupt(int cpu);
+
__END_DECLS
#if MP_DEBUG
(vm_offset_t *) hdl_logpp, \
sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
- bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
+ bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
}
#else /* MP_DEBUG */
#define DBGLOG(log,_cpu,_event)
#endif /* ASSEMBLER */
-#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
-
-
-/*
- * Device driver synchronization.
- *
- * at386_io_lock(op) and at386_io_unlock() are called
- * by device drivers when accessing H/W. The underlying
- * Processing is machine dependant. But the op argument
- * to the at386_io_lock is generic
- */
-
-#define MP_DEV_OP_MAX 4
-#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
+#ifdef ASSEMBLER
+#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
+#else
+__attribute__((always_inline)) static inline long
+i_bit_impl(long word, long bit) {
+ long bitmask = 1L << bit;
+ return word & bitmask;
+}
+#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
+#endif
-/*
- * If the caller specifies an op value different than MP_DEV_WAIT, the
- * at386_io_lock function must return true if lock was successful else
- * false
- */
+#if MACH_RT
-#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
-#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
-#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
-#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
+#if defined(__x86_64__)
-#if MACH_RT
#define _DISABLE_PREEMPTION \
incl %gs:CPU_PREEMPTION_LEVEL
#define _ENABLE_PREEMPTION \
decl %gs:CPU_PREEMPTION_LEVEL ; \
jne 9f ; \
- pushl %eax ; \
- pushl %ecx ; \
- pushl %edx ; \
call EXT(kernel_preempt_check) ; \
- popl %edx ; \
- popl %ecx ; \
- popl %eax ; \
9:
#define _ENABLE_PREEMPTION_NO_CHECK \
decl %gs:CPU_PREEMPTION_LEVEL
-#if MACH_ASSERT
-#define DISABLE_PREEMPTION \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_disable_preemption); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#define ENABLE_PREEMPTION \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_enable_preemption); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#define ENABLE_PREEMPTION_NO_CHECK \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_enable_preemption_no_check); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#define MP_DISABLE_PREEMPTION \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_mp_disable_preemption); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#define MP_ENABLE_PREEMPTION \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_mp_enable_preemption); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#define MP_ENABLE_PREEMPTION_NO_CHECK \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call EXT(_mp_enable_preemption_no_check); \
- popl %edx; \
- popl %ecx; \
- popl %eax
-#else /* MACH_ASSERT */
+#else
+#error Unsupported architecture
+#endif
+
+/* x86_64 just calls through to the other macro directly */
#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
-#endif /* MACH_ASSERT */
#else /* MACH_RT */
#define DISABLE_PREEMPTION
#define MP_ENABLE_PREEMPTION_NO_CHECK
#endif /* MACH_RT */
-#endif /* _I386AT_MP_H_ */
+#endif /* _I386_MP_H_ */
#endif /* KERNEL_PRIVATE */