/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
/*
*/
+#ifdef KERNEL_PRIVATE
-#ifndef _I386AT_MP_H_
-#define _I386AT_MP_H_
-
-#if !defined(NCPUS)
-#include <cpus.h>
-#endif /* !defined(NCPUS) */
+#ifndef _I386_MP_H_
+#define _I386_MP_H_
-#if NCPUS > 1
-
-#ifndef DEBUG
-#include <debug.h>
-#endif
-#if DEBUG
-#define MP_DEBUG 1
-#endif
+//#define MP_DEBUG 1
#include <i386/apic.h>
#include <i386/mp_events.h>
-#define SPURIOUS_INTERRUPT 0xDD
-#define INTERPROCESS_INTERRUPT 0xDE
-#define APIC_ERROR_INTERRUPT 0xDF
-
-#define LAPIC_ID_MAX (LAPIC_ID_MASK)
+#define MAX_CPUS 32 /* (8*sizeof(long)) */
#ifndef ASSEMBLER
-extern void lapic_dump(void);
-extern void lapic_interrupt(int interrupt, void *state);
-extern int lapic_to_cpu[];
-extern int cpu_to_lapic[];
-extern void lapic_cpu_map(int lapic, int cpu_num);
-#endif /* ASSEMBLER */
-
-#define CPU_NUMBER(r) \
- movl EXT(lapic_id),r; \
- movl 0(r),r; \
- shrl $(LAPIC_ID_SHIFT),r; \
- andl $(LAPIC_ID_MASK),r; \
- movl EXT(lapic_to_cpu)(,r,4),r
+#include <stdint.h>
+#include <sys/cdefs.h>
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/i386/thread_status.h>
+#include <mach/vm_types.h>
+#include <kern/lock.h>
+__BEGIN_DECLS
-#define MP_IPL SPL6 /* software interrupt level */
+extern kern_return_t intel_startCPU(int slot_num);
+extern kern_return_t intel_startCPU_fast(int slot_num);
+extern void i386_init_slave(void);
+extern void i386_init_slave_fast(void);
+extern void smp_init(void);
-/* word describing the reason for the interrupt, one per cpu */
+extern void cpu_interrupt(int cpu);
+__END_DECLS
-#ifndef ASSEMBLER
-#include <kern/lock.h>
-extern int real_ncpus; /* real number of cpus */
-extern int wncpu; /* wanted number of cpus */
+extern unsigned int real_ncpus; /* real number of cpus */
+extern unsigned int max_ncpus; /* max number of cpus */
decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
-decl_simple_lock_data(extern,mp_putc_lock)
+
+__BEGIN_DECLS
+
+extern void console_init(void);
+extern void *console_cpu_alloc(boolean_t boot_cpu);
+extern void console_cpu_free(void *console_buf);
extern int kdb_cpu; /* current cpu running kdb */
extern int kdb_debug;
-extern int kdb_is_slave[];
extern int kdb_active[];
extern volatile boolean_t mp_kdp_trap;
-extern void mp_trap_enter();
-extern void mp_trap_exit();
+extern volatile boolean_t force_immediate_debugger_NMI;
+extern volatile boolean_t pmap_tlb_flush_timeout;
+extern volatile usimple_lock_t spinlock_timed_out;
+extern volatile uint32_t spinlock_owner_cpu;
+
+extern uint64_t LastDebuggerEntryAllowance;
+
+extern void mp_kdp_enter(void);
+extern void mp_kdp_exit(void);
+
+extern boolean_t mp_recent_debugger_activity(void);
+#if MACH_KDB
+extern void mp_kdb_exit(void);
+#endif
/*
* All cpu rendezvous:
*/
-extern void mp_rendezvous(void (*setup_func)(void *),
- void (*action_func)(void *),
- void (*teardown_func)(void *),
- void *arg);
+extern void mp_rendezvous(
+ void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
+ void *arg);
+extern void mp_rendezvous_no_intrs(
+ void (*action_func)(void *),
+ void *arg);
+extern void mp_rendezvous_break_lock(void);
+
+/*
+ * All cpu broadcast.
+ * Called from thread context, this blocks until all active cpus have
+ * run action_func:
+ */
+extern void mp_broadcast(
+ void (*action_func)(void *),
+ void *arg);
+#if MACH_KDP
+typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
+
+extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
+ kdp_x86_xcpu_func_t func,
+ void *arg0, void *arg1);
+typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
+#endif
+
+typedef uint32_t cpu_t;
+typedef uint32_t cpumask_t;
+static inline cpumask_t
+cpu_to_cpumask(cpu_t cpu)
+{
+ return (cpu < 32) ? (1 << cpu) : 0;
+}
+#define CPUMASK_ALL 0xffffffff
+#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
+#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
+
+/*
+ * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
+ * The mask may include the local cpu.
+ * If the mode is:
+ * - ASYNC: other cpus make their calls in parallel.
+ * - SYNC: the calls are performed serially in logical cpu order.
+ * This call returns when the function has been run on all specified cpus.
+ * The return value is the number of cpus on which the call was made.
+ * The action function is called with interrupts disabled.
+ */
+extern cpu_t mp_cpus_call(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *),
+ void *arg);
+
+/*
+ * Power-management-specific SPI to:
+ * - register a callout function, and
+ * - request the callout (if registered) on a given cpu.
+ */
+extern void PM_interrupt_register(void (*fn)(void));
+extern void cpu_PM_interrupt(int cpu);
+
+
+__END_DECLS
#if MP_DEBUG
typedef struct {
cpu_signal_event_t entry[LOG_NENTRIES];
} cpu_signal_event_log_t;
-extern cpu_signal_event_log_t cpu_signal[NCPUS];
-extern cpu_signal_event_log_t cpu_handle[NCPUS];
+extern cpu_signal_event_log_t *cpu_signal[];
+extern cpu_signal_event_log_t *cpu_handle[];
#define DBGLOG(log,_cpu,_event) { \
- cpu_signal_event_log_t *logp = &log[cpu_number()]; \
+ boolean_t spl = ml_set_interrupts_enabled(FALSE); \
+ cpu_signal_event_log_t *logp = log[cpu_number()]; \
int next = logp->next_entry; \
cpu_signal_event_t *eventp = &logp->entry[next]; \
- boolean_t spl = ml_set_interrupts_enabled(FALSE); \
\
logp->count[_event]++; \
\
\
(void) ml_set_interrupts_enabled(spl); \
}
+
+#define DBGLOG_CPU_INIT(cpu) { \
+ cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
+ cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
+ \
+ if (*sig_logpp == NULL && \
+ kmem_alloc(kernel_map, \
+ (vm_offset_t *) sig_logpp, \
+ sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
+ panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
+ bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
+ if (*hdl_logpp == NULL && \
+ kmem_alloc(kernel_map, \
+ (vm_offset_t *) hdl_logpp, \
+ sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
+ panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
+ bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
+}
#else /* MP_DEBUG */
#define DBGLOG(log,_cpu,_event)
+#define DBGLOG_CPU_INIT(cpu)
#endif /* MP_DEBUG */
#endif /* ASSEMBLER */
-#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
+#ifdef ASSEMBLER
+#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
+#else
+// Workaround for 6640051
+static inline long
+i_bit_impl(long word, long bit) {
+ return word & 1L << bit;
+}
+#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
+#endif
/*
#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
-#else /* NCPUS > 1 */
-#define at386_io_lock_state()
-#define at386_io_lock(op) (TRUE)
-#define at386_io_unlock()
-#define mp_trap_enter()
-#define mp_trap_exit()
-#include <i386/apic.h>
-#endif /* NCPUS > 1 */
-
#if MACH_RT
-#define _DISABLE_PREEMPTION(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- incl %gs:(r)
-#define _ENABLE_PREEMPTION(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- decl %gs:(r) ; \
+#if defined(__i386__)
+
+#define _DISABLE_PREEMPTION \
+ incl %gs:CPU_PREEMPTION_LEVEL
+
+#define _ENABLE_PREEMPTION \
+ decl %gs:CPU_PREEMPTION_LEVEL ; \
jne 9f ; \
pushl %eax ; \
pushl %ecx ; \
popl %eax ; \
9:
-#define _ENABLE_PREEMPTION_NO_CHECK(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- decl %gs:(r)
+#define _ENABLE_PREEMPTION_NO_CHECK \
+ decl %gs:CPU_PREEMPTION_LEVEL
+
+#elif defined(__x86_64__)
+
+#define _DISABLE_PREEMPTION \
+ incl %gs:CPU_PREEMPTION_LEVEL
+
+#define _ENABLE_PREEMPTION \
+ decl %gs:CPU_PREEMPTION_LEVEL ; \
+ jne 9f ; \
+ call EXT(kernel_preempt_check) ; \
+9:
+
+#define _ENABLE_PREEMPTION_NO_CHECK \
+ decl %gs:CPU_PREEMPTION_LEVEL
-#if MACH_ASSERT
-#define DISABLE_PREEMPTION(r) \
+#else
+#error Unsupported architecture
+#endif
+
+/* x86_64 just calls through to the other macro directly */
+#if MACH_ASSERT && defined(__i386__)
+#define DISABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define ENABLE_PREEMPTION(r) \
+#define ENABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define ENABLE_PREEMPTION_NO_CHECK(r) \
+#define ENABLE_PREEMPTION_NO_CHECK \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#if NCPUS > 1
-#define MP_DISABLE_PREEMPTION(r) \
+#define MP_DISABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define MP_ENABLE_PREEMPTION(r) \
+#define MP_ENABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r) \
+#define MP_ENABLE_PREEMPTION_NO_CHECK \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#else /* NCPUS > 1 */
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
-#endif /* NCPUS > 1 */
#else /* MACH_ASSERT */
-#define DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r)
-#if NCPUS > 1
-#define MP_DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r)
-#else /* NCPUS > 1 */
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
-#endif /* NCPUS > 1 */
+#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
+#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
+#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
+#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
#endif /* MACH_ASSERT */
#else /* MACH_RT */
-#define DISABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION_NO_CHECK(r)
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
+#define DISABLE_PREEMPTION
+#define ENABLE_PREEMPTION
+#define ENABLE_PREEMPTION_NO_CHECK
+#define MP_DISABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION_NO_CHECK
#endif /* MACH_RT */
-#endif /* _I386AT_MP_H_ */
+#endif /* _I386_MP_H_ */
+
+#endif /* KERNEL_PRIVATE */