]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.h
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.h
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*
31 * @OSF_COPYRIGHT@
32 */
33/*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59/*
60 */
91447636 61#ifdef KERNEL_PRIVATE
1c79356b
A
62
63#ifndef _I386AT_MP_H_
64#define _I386AT_MP_H_
65
55e303ae
A
66#ifndef DEBUG
67#include <debug.h>
68#endif
91447636 69//#define MP_DEBUG 1
55e303ae 70
1c79356b 71#include <i386/apic.h>
55e303ae
A
72#include <i386/mp_events.h>
73
91447636 74#define LAPIC_ID_MAX (LAPIC_ID_MASK)
55e303ae 75
91447636 76#define MAX_CPUS (LAPIC_ID_MAX + 1)
55e303ae
A
77
78#ifndef ASSEMBLER
91447636
A
79#include <sys/cdefs.h>
80#include <mach/boolean.h>
81#include <mach/kern_return.h>
5d5c5d0d 82#include <mach/i386/thread_status.h>
91447636
A
83
84__BEGIN_DECLS
85
86extern kern_return_t intel_startCPU(int slot_num);
87extern void i386_init_slave(void);
88extern void smp_init(void);
89
90extern void cpu_interrupt(int cpu);
91
92extern void lapic_init(void);
93extern void lapic_shutdown(void);
94extern void lapic_smm_restore(void);
95extern boolean_t lapic_probe(void);
55e303ae 96extern void lapic_dump(void);
5d5c5d0d 97extern int lapic_interrupt(int interrupt, x86_saved_state_t *state);
91447636 98extern void lapic_end_of_interrupt(void);
55e303ae
A
99extern int lapic_to_cpu[];
100extern int cpu_to_lapic[];
91447636 101extern int lapic_interrupt_base;
55e303ae 102extern void lapic_cpu_map(int lapic, int cpu_num);
5d5c5d0d 103extern uint32_t ml_get_apicid(uint32_t cpu);
91447636
A
104
105extern void lapic_set_timer(
106 boolean_t interrupt,
107 lapic_timer_mode_t mode,
108 lapic_timer_divide_t divisor,
109 lapic_timer_count_t initial_count);
110
111extern void lapic_get_timer(
112 lapic_timer_mode_t *mode,
113 lapic_timer_divide_t *divisor,
114 lapic_timer_count_t *initial_count,
115 lapic_timer_count_t *current_count);
116
117typedef void (*i386_intr_func_t)(void *);
118extern void lapic_set_timer_func(i386_intr_func_t func);
119extern void lapic_set_pmi_func(i386_intr_func_t func);
5d5c5d0d 120extern void lapic_set_thermal_func(i386_intr_func_t func);
91447636
A
121
122__END_DECLS
123
5d5c5d0d
A
124/*
125 * By default, use high vectors to leave vector space for systems
126 * with multiple I/O APIC's. However some systems that boot with
127 * local APIC disabled will hang in SMM when vectors greater than
128 * 0x5F are used. Those systems are not expected to have I/O APIC
129 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
130 */
131#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
132#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
133/*
134 * Specific lapic interrupts are relative to this base
135 * in priority order from high to low:
136 */
137
138#define LAPIC_PERFCNT_INTERRUPT 0xF
139#define LAPIC_TIMER_INTERRUPT 0xE
140#define LAPIC_INTERPROCESSOR_INTERRUPT 0xD
141#define LAPIC_THERMAL_INTERRUPT 0xC
142#define LAPIC_ERROR_INTERRUPT 0xB
143#define LAPIC_SPURIOUS_INTERRUPT 0xA
144/* The vector field is ignored for NMI interrupts via the LAPIC
145 * or otherwise, so this is not an offset from the interrupt
146 * base.
147 */
148#define LAPIC_NMI_INTERRUPT 0x2
149
150#define LAPIC_REG(reg) \
151 (*((volatile uint32_t *)(lapic_start + LAPIC_##reg)))
152#define LAPIC_REG_OFFSET(reg,off) \
153 (*((volatile uint32_t *)(lapic_start + LAPIC_##reg + (off))))
154
155#define LAPIC_VECTOR(src) \
156 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
157
158#define LAPIC_ISR_IS_SET(base,src) \
159 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
160 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
161
162extern vm_offset_t lapic_start;
163
55e303ae 164#endif /* ASSEMBLER */
1c79356b 165
55e303ae 166#define CPU_NUMBER(r) \
91447636
A
167 movl %gs:CPU_NUMBER_GS,r
168
169#define CPU_NUMBER_FROM_LAPIC(r) \
55e303ae
A
170 movl EXT(lapic_id),r; \
171 movl 0(r),r; \
172 shrl $(LAPIC_ID_SHIFT),r; \
173 andl $(LAPIC_ID_MASK),r; \
174 movl EXT(lapic_to_cpu)(,r,4),r
1c79356b
A
175
176
1c79356b
A
177/* word describing the reason for the interrupt, one per cpu */
178
179#ifndef ASSEMBLER
180#include <kern/lock.h>
91447636
A
181
182extern unsigned int real_ncpus; /* real number of cpus */
183extern unsigned int max_ncpus; /* max number of cpus */
1c79356b 184decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
91447636
A
185
186__BEGIN_DECLS
187
188extern void console_init(void);
189extern void *console_cpu_alloc(boolean_t boot_cpu);
190extern void console_cpu_free(void *console_buf);
1c79356b
A
191
192extern int kdb_cpu; /* current cpu running kdb */
193extern int kdb_debug;
1c79356b 194extern int kdb_active[];
55e303ae
A
195
196extern volatile boolean_t mp_kdp_trap;
91447636
A
197extern void mp_kdp_enter(void);
198extern void mp_kdp_exit(void);
55e303ae 199
5d5c5d0d
A
200#if MACH_KDB
201extern void mp_kdb_exit(void);
202#endif
203
55e303ae
A
204/*
205 * All cpu rendezvous:
206 */
5d5c5d0d
A
207extern void mp_rendezvous(
208 void (*setup_func)(void *),
209 void (*action_func)(void *),
210 void (*teardown_func)(void *),
211 void *arg);
212extern void mp_rendezvous_no_intrs(
213 void (*action_func)(void *),
214 void *arg);
215extern void mp_rendezvous_break_lock(void);
55e303ae 216
91447636
A
217__END_DECLS
218
55e303ae
A
219#if MP_DEBUG
220typedef struct {
221 uint64_t time;
222 int cpu;
223 mp_event_t event;
224} cpu_signal_event_t;
225
226#define LOG_NENTRIES 100
227typedef struct {
228 uint64_t count[MP_LAST];
229 int next_entry;
230 cpu_signal_event_t entry[LOG_NENTRIES];
231} cpu_signal_event_log_t;
232
91447636
A
233extern cpu_signal_event_log_t *cpu_signal[];
234extern cpu_signal_event_log_t *cpu_handle[];
55e303ae
A
235
236#define DBGLOG(log,_cpu,_event) { \
91447636
A
237 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
238 cpu_signal_event_log_t *logp = log[cpu_number()]; \
55e303ae
A
239 int next = logp->next_entry; \
240 cpu_signal_event_t *eventp = &logp->entry[next]; \
55e303ae
A
241 \
242 logp->count[_event]++; \
243 \
244 eventp->time = rdtsc64(); \
245 eventp->cpu = _cpu; \
246 eventp->event = _event; \
247 if (next == (LOG_NENTRIES - 1)) \
248 logp->next_entry = 0; \
249 else \
250 logp->next_entry++; \
251 \
252 (void) ml_set_interrupts_enabled(spl); \
253}
91447636
A
254
255#define DBGLOG_CPU_INIT(cpu) { \
256 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
257 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
258 \
259 if (*sig_logpp == NULL && \
260 kmem_alloc(kernel_map, \
261 (vm_offset_t *) sig_logpp, \
262 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
263 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
264 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
265 if (*hdl_logpp == NULL && \
266 kmem_alloc(kernel_map, \
267 (vm_offset_t *) hdl_logpp, \
268 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
269 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
270 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
271}
55e303ae
A
272#else /* MP_DEBUG */
273#define DBGLOG(log,_cpu,_event)
91447636 274#define DBGLOG_CPU_INIT(cpu)
55e303ae
A
275#endif /* MP_DEBUG */
276
1c79356b
A
277#endif /* ASSEMBLER */
278
279#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
280
281
282/*
283 * Device driver synchronization.
284 *
285 * at386_io_lock(op) and at386_io_unlock() are called
286 * by device drivers when accessing H/W. The underlying
287 * Processing is machine dependant. But the op argument
288 * to the at386_io_lock is generic
289 */
290
291#define MP_DEV_OP_MAX 4
292#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
293
294/*
295 * If the caller specifies an op value different than MP_DEV_WAIT, the
296 * at386_io_lock function must return true if lock was successful else
297 * false
298 */
299
300#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
301#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
302#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
303#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
304
1c79356b 305#if MACH_RT
91447636
A
306#define _DISABLE_PREEMPTION \
307 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b 308
91447636
A
309#define _ENABLE_PREEMPTION \
310 decl %gs:CPU_PREEMPTION_LEVEL ; \
1c79356b
A
311 jne 9f ; \
312 pushl %eax ; \
313 pushl %ecx ; \
314 pushl %edx ; \
315 call EXT(kernel_preempt_check) ; \
316 popl %edx ; \
317 popl %ecx ; \
318 popl %eax ; \
3199:
320
91447636
A
321#define _ENABLE_PREEMPTION_NO_CHECK \
322 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
323
324#if MACH_ASSERT
91447636 325#define DISABLE_PREEMPTION \
1c79356b
A
326 pushl %eax; \
327 pushl %ecx; \
328 pushl %edx; \
329 call EXT(_disable_preemption); \
330 popl %edx; \
331 popl %ecx; \
332 popl %eax
91447636 333#define ENABLE_PREEMPTION \
1c79356b
A
334 pushl %eax; \
335 pushl %ecx; \
336 pushl %edx; \
337 call EXT(_enable_preemption); \
338 popl %edx; \
339 popl %ecx; \
340 popl %eax
91447636 341#define ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
342 pushl %eax; \
343 pushl %ecx; \
344 pushl %edx; \
345 call EXT(_enable_preemption_no_check); \
346 popl %edx; \
347 popl %ecx; \
348 popl %eax
91447636 349#define MP_DISABLE_PREEMPTION \
1c79356b
A
350 pushl %eax; \
351 pushl %ecx; \
352 pushl %edx; \
353 call EXT(_mp_disable_preemption); \
354 popl %edx; \
355 popl %ecx; \
356 popl %eax
91447636 357#define MP_ENABLE_PREEMPTION \
1c79356b
A
358 pushl %eax; \
359 pushl %ecx; \
360 pushl %edx; \
361 call EXT(_mp_enable_preemption); \
362 popl %edx; \
363 popl %ecx; \
364 popl %eax
91447636 365#define MP_ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
366 pushl %eax; \
367 pushl %ecx; \
368 pushl %edx; \
369 call EXT(_mp_enable_preemption_no_check); \
370 popl %edx; \
371 popl %ecx; \
372 popl %eax
1c79356b 373#else /* MACH_ASSERT */
91447636
A
374#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
375#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
376#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
377#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
378#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
379#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
380#endif /* MACH_ASSERT */
381
382#else /* MACH_RT */
91447636
A
383#define DISABLE_PREEMPTION
384#define ENABLE_PREEMPTION
385#define ENABLE_PREEMPTION_NO_CHECK
386#define MP_DISABLE_PREEMPTION
387#define MP_ENABLE_PREEMPTION
388#define MP_ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
389#endif /* MACH_RT */
390
391#endif /* _I386AT_MP_H_ */
91447636
A
392
393#endif /* KERNEL_PRIVATE */