#ifndef _I386_MP_H_
#define _I386_MP_H_
-#ifndef DEBUG
-#include <debug.h>
-#endif
//#define MP_DEBUG 1
#include <i386/apic.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/i386/thread_status.h>
+#include <mach/vm_types.h>
#include <kern/lock.h>
__BEGIN_DECLS
extern int kdb_active[];
extern volatile boolean_t mp_kdp_trap;
-extern volatile boolean_t force_immediate_debugger_NMI;
+extern volatile boolean_t force_immediate_debugger_NMI;
extern volatile boolean_t pmap_tlb_flush_timeout;
+extern volatile usimple_lock_t spinlock_timed_out;
+extern volatile uint32_t spinlock_owner_cpu;
+
+extern uint64_t LastDebuggerEntryAllowance;
extern void mp_kdp_enter(void);
extern void mp_kdp_exit(void);
+extern boolean_t mp_recent_debugger_activity(void);
#if MACH_KDB
extern void mp_kdb_exit(void);
#endif
extern void mp_broadcast(
void (*action_func)(void *),
void *arg);
+#if MACH_KDP
+typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
+
+extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
+ kdp_x86_xcpu_func_t func,
+ void *arg0, void *arg1);
+typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
+#endif
typedef uint32_t cpu_t;
typedef uint32_t cpumask_t;
* Invoke a function (possibly NULL) on a set of cpus specified by a mask.
* The mask may include the local cpu.
* If the mode is:
- * - ASYNC: other cpus make their calls in parallel.
- * - SYNC: the calls are performed serially in logical cpu order.
- * This call returns when the function has been run on all specified cpus.
- * The return value is the number of cpus on which the call was made.
+ * - ASYNC: other cpus make their calls in parallel
+ * - SYNC: the calls are performed serially in logical cpu order
+ * - NOSYNC: the calls are queued
+ * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
+ * called on all specified cpus.
+ * The return value is the number of cpus where the call was made or queued.
* The action function is called with interrupts disabled.
*/
extern cpu_t mp_cpus_call(
mp_sync_t mode,
void (*action_func)(void *),
void *arg);
+extern cpu_t mp_cpus_call1(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *, void*),
+ void *arg0,
+ void *arg1,
+ cpumask_t *cpus_calledp,
+ cpumask_t *cpus_notcalledp);
/*
* Power-management-specific SPI to:
extern void PM_interrupt_register(void (*fn)(void));
extern void cpu_PM_interrupt(int cpu);
-
__END_DECLS
#if MP_DEBUG
(vm_offset_t *) hdl_logpp, \
sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
- bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
+ bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
}
#else /* MP_DEBUG */
#define DBGLOG(log,_cpu,_event)
#endif /* ASSEMBLER */
-#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
-
-
-/*
- * Device driver synchronization.
- *
- * at386_io_lock(op) and at386_io_unlock() are called
- * by device drivers when accessing H/W. The underlying
- * Processing is machine dependant. But the op argument
- * to the at386_io_lock is generic
- */
-
-#define MP_DEV_OP_MAX 4
-#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
+#ifdef ASSEMBLER
+#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
+#else
+__attribute__((always_inline)) static inline long
+i_bit_impl(long word, long bit) {
+ long bitmask = 1L << bit;
+ return word & bitmask;
+}
+#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
+#endif
-/*
- * If the caller specifies an op value different than MP_DEV_WAIT, the
- * at386_io_lock function must return true if lock was successful else
- * false
- */
+#if MACH_RT
-#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
-#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
-#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
-#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
+#if defined(__i386__)
-#if MACH_RT
#define _DISABLE_PREEMPTION \
incl %gs:CPU_PREEMPTION_LEVEL
#define _ENABLE_PREEMPTION_NO_CHECK \
decl %gs:CPU_PREEMPTION_LEVEL
-#if MACH_ASSERT
+#elif defined(__x86_64__)
+
+#define _DISABLE_PREEMPTION \
+ incl %gs:CPU_PREEMPTION_LEVEL
+
+#define _ENABLE_PREEMPTION \
+ decl %gs:CPU_PREEMPTION_LEVEL ; \
+ jne 9f ; \
+ call EXT(kernel_preempt_check) ; \
+9:
+
+#define _ENABLE_PREEMPTION_NO_CHECK \
+ decl %gs:CPU_PREEMPTION_LEVEL
+
+#else
+#error Unsupported architecture
+#endif
+
+/* x86_64 just calls through to the other macro directly */
+#if MACH_ASSERT && defined(__i386__)
#define DISABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \