/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
/*
*/
+#ifdef KERNEL_PRIVATE
#ifndef _I386AT_MP_H_
#define _I386AT_MP_H_
-#if !defined(NCPUS)
-#include <cpus.h>
-#endif /* !defined(NCPUS) */
-
-#if NCPUS > 1
-
#ifndef DEBUG
#include <debug.h>
#endif
-#if DEBUG
-#define MP_DEBUG 1
-#endif
+//#define MP_DEBUG 1
#include <i386/apic.h>
#include <i386/mp_events.h>
-#define SPURIOUS_INTERRUPT 0xDD
-#define INTERPROCESS_INTERRUPT 0xDE
-#define APIC_ERROR_INTERRUPT 0xDF
+#define LAPIC_ID_MAX (LAPIC_ID_MASK)
-#define LAPIC_ID_MAX (LAPIC_ID_MASK)
+#define MAX_CPUS (LAPIC_ID_MAX + 1)
#ifndef ASSEMBLER
+#include <sys/cdefs.h>
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/i386/thread_status.h>
+
+__BEGIN_DECLS
+
+extern kern_return_t intel_startCPU(int slot_num);
+extern void i386_init_slave(void);
+extern void smp_init(void);
+
+extern void cpu_interrupt(int cpu);
+
+extern void lapic_init(void);
+extern void lapic_shutdown(void);
+extern void lapic_smm_restore(void);
+extern boolean_t lapic_probe(void);
extern void lapic_dump(void);
-extern void lapic_interrupt(int interrupt, void *state);
+extern int lapic_interrupt(int interrupt, x86_saved_state_t *state);
+extern void lapic_end_of_interrupt(void);
extern int lapic_to_cpu[];
extern int cpu_to_lapic[];
+extern int lapic_interrupt_base;
extern void lapic_cpu_map(int lapic, int cpu_num);
+extern uint32_t ml_get_apicid(uint32_t cpu);
+
+extern void lapic_set_timer(
+ boolean_t interrupt,
+ lapic_timer_mode_t mode,
+ lapic_timer_divide_t divisor,
+ lapic_timer_count_t initial_count);
+
+extern void lapic_get_timer(
+ lapic_timer_mode_t *mode,
+ lapic_timer_divide_t *divisor,
+ lapic_timer_count_t *initial_count,
+ lapic_timer_count_t *current_count);
+
+typedef void (*i386_intr_func_t)(void *);
+extern void lapic_set_timer_func(i386_intr_func_t func);
+extern void lapic_set_pmi_func(i386_intr_func_t func);
+extern void lapic_set_thermal_func(i386_intr_func_t func);
+
+__END_DECLS
+
+/*
+ * By default, use high vectors to leave vector space for systems
+ * with multiple I/O APIC's. However some systems that boot with
+ * local APIC disabled will hang in SMM when vectors greater than
+ * 0x5F are used. Those systems are not expected to have I/O APIC
+ * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
+ */
+#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
+#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
+/*
+ * Specific lapic interrupts are relative to this base
+ * in priority order from high to low:
+ */
+
+#define LAPIC_PERFCNT_INTERRUPT 0xF
+#define LAPIC_TIMER_INTERRUPT 0xE
+#define LAPIC_INTERPROCESSOR_INTERRUPT 0xD
+#define LAPIC_THERMAL_INTERRUPT 0xC
+#define LAPIC_ERROR_INTERRUPT 0xB
+#define LAPIC_SPURIOUS_INTERRUPT 0xA
+/* The vector field is ignored for NMI interrupts via the LAPIC
+ * or otherwise, so this is not an offset from the interrupt
+ * base.
+ */
+#define LAPIC_NMI_INTERRUPT 0x2
+
+#define LAPIC_REG(reg) \
+ (*((volatile uint32_t *)(lapic_start + LAPIC_##reg)))
+#define LAPIC_REG_OFFSET(reg,off) \
+ (*((volatile uint32_t *)(lapic_start + LAPIC_##reg + (off))))
+
+#define LAPIC_VECTOR(src) \
+ (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
+
+#define LAPIC_ISR_IS_SET(base,src) \
+ (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
+ (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
+
+extern vm_offset_t lapic_start;
+
#endif /* ASSEMBLER */
#define CPU_NUMBER(r) \
+ movl %gs:CPU_NUMBER_GS,r
+
+#define CPU_NUMBER_FROM_LAPIC(r) \
movl EXT(lapic_id),r; \
movl 0(r),r; \
shrl $(LAPIC_ID_SHIFT),r; \
movl EXT(lapic_to_cpu)(,r,4),r
-#define MP_IPL SPL6 /* software interrupt level */
-
/* word describing the reason for the interrupt, one per cpu */
#ifndef ASSEMBLER
#include <kern/lock.h>
-extern int real_ncpus; /* real number of cpus */
-extern int wncpu; /* wanted number of cpus */
+
+extern unsigned int real_ncpus; /* real number of cpus */
+extern unsigned int max_ncpus; /* max number of cpus */
decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
-decl_simple_lock_data(extern,mp_putc_lock)
+
+__BEGIN_DECLS
+
+extern void console_init(void);
+extern void *console_cpu_alloc(boolean_t boot_cpu);
+extern void console_cpu_free(void *console_buf);
extern int kdb_cpu; /* current cpu running kdb */
extern int kdb_debug;
-extern int kdb_is_slave[];
extern int kdb_active[];
extern volatile boolean_t mp_kdp_trap;
-extern void mp_trap_enter();
-extern void mp_trap_exit();
+extern void mp_kdp_enter(void);
+extern void mp_kdp_exit(void);
+
+#if MACH_KDB
+extern void mp_kdb_exit(void);
+#endif
/*
* All cpu rendezvous:
*/
-extern void mp_rendezvous(void (*setup_func)(void *),
- void (*action_func)(void *),
- void (*teardown_func)(void *),
- void *arg);
+extern void mp_rendezvous(
+ void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
+ void *arg);
+extern void mp_rendezvous_no_intrs(
+ void (*action_func)(void *),
+ void *arg);
+extern void mp_rendezvous_break_lock(void);
+
+__END_DECLS
#if MP_DEBUG
typedef struct {
cpu_signal_event_t entry[LOG_NENTRIES];
} cpu_signal_event_log_t;
-extern cpu_signal_event_log_t cpu_signal[NCPUS];
-extern cpu_signal_event_log_t cpu_handle[NCPUS];
+extern cpu_signal_event_log_t *cpu_signal[];
+extern cpu_signal_event_log_t *cpu_handle[];
#define DBGLOG(log,_cpu,_event) { \
- cpu_signal_event_log_t *logp = &log[cpu_number()]; \
+ boolean_t spl = ml_set_interrupts_enabled(FALSE); \
+ cpu_signal_event_log_t *logp = log[cpu_number()]; \
int next = logp->next_entry; \
cpu_signal_event_t *eventp = &logp->entry[next]; \
- boolean_t spl = ml_set_interrupts_enabled(FALSE); \
\
logp->count[_event]++; \
\
\
(void) ml_set_interrupts_enabled(spl); \
}
+
+#define DBGLOG_CPU_INIT(cpu) { \
+ cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
+ cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
+ \
+ if (*sig_logpp == NULL && \
+ kmem_alloc(kernel_map, \
+ (vm_offset_t *) sig_logpp, \
+ sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
+ panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
+ bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
+ if (*hdl_logpp == NULL && \
+ kmem_alloc(kernel_map, \
+ (vm_offset_t *) hdl_logpp, \
+ sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
+ panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
+ bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
+}
#else /* MP_DEBUG */
#define DBGLOG(log,_cpu,_event)
+#define DBGLOG_CPU_INIT(cpu)
#endif /* MP_DEBUG */
#endif /* ASSEMBLER */
#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
-#else /* NCPUS > 1 */
-#define at386_io_lock_state()
-#define at386_io_lock(op) (TRUE)
-#define at386_io_unlock()
-#define mp_trap_enter()
-#define mp_trap_exit()
-#include <i386/apic.h>
-#endif /* NCPUS > 1 */
-
#if MACH_RT
-#define _DISABLE_PREEMPTION(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- incl %gs:(r)
+#define _DISABLE_PREEMPTION \
+ incl %gs:CPU_PREEMPTION_LEVEL
-#define _ENABLE_PREEMPTION(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- decl %gs:(r) ; \
+#define _ENABLE_PREEMPTION \
+ decl %gs:CPU_PREEMPTION_LEVEL ; \
jne 9f ; \
pushl %eax ; \
pushl %ecx ; \
popl %eax ; \
9:
-#define _ENABLE_PREEMPTION_NO_CHECK(r) \
- movl $ CPD_PREEMPTION_LEVEL,r ; \
- decl %gs:(r)
+#define _ENABLE_PREEMPTION_NO_CHECK \
+ decl %gs:CPU_PREEMPTION_LEVEL
#if MACH_ASSERT
-#define DISABLE_PREEMPTION(r) \
+#define DISABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define ENABLE_PREEMPTION(r) \
+#define ENABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define ENABLE_PREEMPTION_NO_CHECK(r) \
+#define ENABLE_PREEMPTION_NO_CHECK \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#if NCPUS > 1
-#define MP_DISABLE_PREEMPTION(r) \
+#define MP_DISABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define MP_ENABLE_PREEMPTION(r) \
+#define MP_ENABLE_PREEMPTION \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r) \
+#define MP_ENABLE_PREEMPTION_NO_CHECK \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
popl %edx; \
popl %ecx; \
popl %eax
-#else /* NCPUS > 1 */
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
-#endif /* NCPUS > 1 */
#else /* MACH_ASSERT */
-#define DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r)
-#if NCPUS > 1
-#define MP_DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r)
-#else /* NCPUS > 1 */
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
-#endif /* NCPUS > 1 */
+#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
+#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
+#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
+#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
#endif /* MACH_ASSERT */
#else /* MACH_RT */
-#define DISABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION(r)
-#define ENABLE_PREEMPTION_NO_CHECK(r)
-#define MP_DISABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION(r)
-#define MP_ENABLE_PREEMPTION_NO_CHECK(r)
+#define DISABLE_PREEMPTION
+#define ENABLE_PREEMPTION
+#define ENABLE_PREEMPTION_NO_CHECK
+#define MP_DISABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION
+#define MP_ENABLE_PREEMPTION_NO_CHECK
#endif /* MACH_RT */
#endif /* _I386AT_MP_H_ */
+
+#endif /* KERNEL_PRIVATE */