]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.h
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
91447636 59#ifdef KERNEL_PRIVATE
1c79356b
A
60
61#ifndef _I386AT_MP_H_
62#define _I386AT_MP_H_
63
55e303ae
A
64#ifndef DEBUG
65#include <debug.h>
66#endif
91447636 67//#define MP_DEBUG 1
55e303ae 68
1c79356b 69#include <i386/apic.h>
55e303ae
A
70#include <i386/mp_events.h>
71
91447636 72#define LAPIC_ID_MAX (LAPIC_ID_MASK)
55e303ae 73
91447636 74#define MAX_CPUS (LAPIC_ID_MAX + 1)
55e303ae
A
75
76#ifndef ASSEMBLER
91447636
A
77#include <sys/cdefs.h>
78#include <mach/boolean.h>
79#include <mach/kern_return.h>
0c530ab8 80#include <mach/i386/thread_status.h>
91447636
A
81
82__BEGIN_DECLS
83
84extern kern_return_t intel_startCPU(int slot_num);
85extern void i386_init_slave(void);
86extern void smp_init(void);
87
88extern void cpu_interrupt(int cpu);
89
90extern void lapic_init(void);
91extern void lapic_shutdown(void);
92extern void lapic_smm_restore(void);
93extern boolean_t lapic_probe(void);
55e303ae 94extern void lapic_dump(void);
0c530ab8 95extern int lapic_interrupt(int interrupt, x86_saved_state_t *state);
91447636 96extern void lapic_end_of_interrupt(void);
55e303ae
A
97extern int lapic_to_cpu[];
98extern int cpu_to_lapic[];
91447636 99extern int lapic_interrupt_base;
55e303ae 100extern void lapic_cpu_map(int lapic, int cpu_num);
0c530ab8 101extern uint32_t ml_get_apicid(uint32_t cpu);
91447636
A
102
103extern void lapic_set_timer(
104 boolean_t interrupt,
105 lapic_timer_mode_t mode,
106 lapic_timer_divide_t divisor,
107 lapic_timer_count_t initial_count);
108
109extern void lapic_get_timer(
110 lapic_timer_mode_t *mode,
111 lapic_timer_divide_t *divisor,
112 lapic_timer_count_t *initial_count,
113 lapic_timer_count_t *current_count);
114
115typedef void (*i386_intr_func_t)(void *);
116extern void lapic_set_timer_func(i386_intr_func_t func);
117extern void lapic_set_pmi_func(i386_intr_func_t func);
0c530ab8 118extern void lapic_set_thermal_func(i386_intr_func_t func);
91447636
A
119
120__END_DECLS
121
0c530ab8
A
122/*
123 * By default, use high vectors to leave vector space for systems
124 * with multiple I/O APIC's. However some systems that boot with
125 * local APIC disabled will hang in SMM when vectors greater than
126 * 0x5F are used. Those systems are not expected to have I/O APIC
127 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
128 */
129#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
130#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
131/*
132 * Specific lapic interrupts are relative to this base
133 * in priority order from high to low:
134 */
135
136#define LAPIC_PERFCNT_INTERRUPT 0xF
137#define LAPIC_TIMER_INTERRUPT 0xE
138#define LAPIC_INTERPROCESSOR_INTERRUPT 0xD
139#define LAPIC_THERMAL_INTERRUPT 0xC
140#define LAPIC_ERROR_INTERRUPT 0xB
141#define LAPIC_SPURIOUS_INTERRUPT 0xA
142/* The vector field is ignored for NMI interrupts via the LAPIC
143 * or otherwise, so this is not an offset from the interrupt
144 * base.
145 */
146#define LAPIC_NMI_INTERRUPT 0x2
147
148#define LAPIC_REG(reg) \
149 (*((volatile uint32_t *)(lapic_start + LAPIC_##reg)))
150#define LAPIC_REG_OFFSET(reg,off) \
151 (*((volatile uint32_t *)(lapic_start + LAPIC_##reg + (off))))
152
153#define LAPIC_VECTOR(src) \
154 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
155
156#define LAPIC_ISR_IS_SET(base,src) \
157 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
158 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
159
160extern vm_offset_t lapic_start;
161
55e303ae 162#endif /* ASSEMBLER */
1c79356b 163
55e303ae 164#define CPU_NUMBER(r) \
91447636
A
165 movl %gs:CPU_NUMBER_GS,r
166
167#define CPU_NUMBER_FROM_LAPIC(r) \
55e303ae
A
168 movl EXT(lapic_id),r; \
169 movl 0(r),r; \
170 shrl $(LAPIC_ID_SHIFT),r; \
171 andl $(LAPIC_ID_MASK),r; \
172 movl EXT(lapic_to_cpu)(,r,4),r
1c79356b
A
173
174
1c79356b
A
175/* word describing the reason for the interrupt, one per cpu */
176
177#ifndef ASSEMBLER
178#include <kern/lock.h>
91447636
A
179
180extern unsigned int real_ncpus; /* real number of cpus */
181extern unsigned int max_ncpus; /* max number of cpus */
1c79356b 182decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
91447636
A
183
184__BEGIN_DECLS
185
186extern void console_init(void);
187extern void *console_cpu_alloc(boolean_t boot_cpu);
188extern void console_cpu_free(void *console_buf);
1c79356b
A
189
190extern int kdb_cpu; /* current cpu running kdb */
191extern int kdb_debug;
1c79356b 192extern int kdb_active[];
55e303ae
A
193
194extern volatile boolean_t mp_kdp_trap;
2d21ac55
A
195extern boolean_t force_immediate_debugger_NMI;
196
91447636
A
197extern void mp_kdp_enter(void);
198extern void mp_kdp_exit(void);
55e303ae 199
0c530ab8
A
200#if MACH_KDB
201extern void mp_kdb_exit(void);
202#endif
203
55e303ae
A
204/*
205 * All cpu rendezvous:
206 */
0c530ab8
A
207extern void mp_rendezvous(
208 void (*setup_func)(void *),
209 void (*action_func)(void *),
210 void (*teardown_func)(void *),
211 void *arg);
212extern void mp_rendezvous_no_intrs(
213 void (*action_func)(void *),
214 void *arg);
215extern void mp_rendezvous_break_lock(void);
55e303ae 216
2d21ac55
A
217/*
218 * All cpu broadcast.
219 * Called from thread context, this blocks until all active cpus have
220 * run action_func:
221 */
222extern void mp_broadcast(
223 void (*action_func)(void *),
224 void *arg);
225
226typedef uint32_t cpu_t;
227typedef uint32_t cpumask_t;
228static inline cpumask_t
229cpu_to_cpumask(cpu_t cpu)
230{
231 return (cpu < 32) ? (1 << cpu) : 0;
232}
233#define CPUMASK_ALL 0xffffffff
234#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
235#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
236
237/*
238 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
239 * The mask may include the local cpu.
240 * If the mode is:
241 * - ASYNC: other cpus make their calls in parallel.
242 * - SYNC: the calls are performed serially in logical cpu order.
243 * This call returns when the function has been run on all specified cpus.
244 * The return value is the number of cpus on which the call was made.
245 * The action function is called with interrupts disabled.
246 */
247extern cpu_t mp_cpus_call(
248 cpumask_t cpus,
249 mp_sync_t mode,
250 void (*action_func)(void *),
251 void *arg);
252
91447636
A
253__END_DECLS
254
55e303ae
A
255#if MP_DEBUG
256typedef struct {
257 uint64_t time;
258 int cpu;
259 mp_event_t event;
260} cpu_signal_event_t;
261
262#define LOG_NENTRIES 100
263typedef struct {
264 uint64_t count[MP_LAST];
265 int next_entry;
266 cpu_signal_event_t entry[LOG_NENTRIES];
267} cpu_signal_event_log_t;
268
91447636
A
269extern cpu_signal_event_log_t *cpu_signal[];
270extern cpu_signal_event_log_t *cpu_handle[];
55e303ae
A
271
272#define DBGLOG(log,_cpu,_event) { \
91447636
A
273 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
274 cpu_signal_event_log_t *logp = log[cpu_number()]; \
55e303ae
A
275 int next = logp->next_entry; \
276 cpu_signal_event_t *eventp = &logp->entry[next]; \
55e303ae
A
277 \
278 logp->count[_event]++; \
279 \
280 eventp->time = rdtsc64(); \
281 eventp->cpu = _cpu; \
282 eventp->event = _event; \
283 if (next == (LOG_NENTRIES - 1)) \
284 logp->next_entry = 0; \
285 else \
286 logp->next_entry++; \
287 \
288 (void) ml_set_interrupts_enabled(spl); \
289}
91447636
A
290
291#define DBGLOG_CPU_INIT(cpu) { \
292 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
293 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
294 \
295 if (*sig_logpp == NULL && \
296 kmem_alloc(kernel_map, \
297 (vm_offset_t *) sig_logpp, \
298 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
299 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
300 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
301 if (*hdl_logpp == NULL && \
302 kmem_alloc(kernel_map, \
303 (vm_offset_t *) hdl_logpp, \
304 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
305 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
306 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
307}
55e303ae
A
308#else /* MP_DEBUG */
309#define DBGLOG(log,_cpu,_event)
91447636 310#define DBGLOG_CPU_INIT(cpu)
55e303ae
A
311#endif /* MP_DEBUG */
312
1c79356b
A
313#endif /* ASSEMBLER */
314
315#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
316
317
318/*
319 * Device driver synchronization.
320 *
321 * at386_io_lock(op) and at386_io_unlock() are called
322 * by device drivers when accessing H/W. The underlying
323 * Processing is machine dependant. But the op argument
324 * to the at386_io_lock is generic
325 */
326
327#define MP_DEV_OP_MAX 4
328#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
329
330/*
331 * If the caller specifies an op value different than MP_DEV_WAIT, the
332 * at386_io_lock function must return true if lock was successful else
333 * false
334 */
335
336#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
337#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
338#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
339#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
340
1c79356b 341#if MACH_RT
91447636
A
342#define _DISABLE_PREEMPTION \
343 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b 344
91447636
A
345#define _ENABLE_PREEMPTION \
346 decl %gs:CPU_PREEMPTION_LEVEL ; \
1c79356b
A
347 jne 9f ; \
348 pushl %eax ; \
349 pushl %ecx ; \
350 pushl %edx ; \
351 call EXT(kernel_preempt_check) ; \
352 popl %edx ; \
353 popl %ecx ; \
354 popl %eax ; \
3559:
356
91447636
A
357#define _ENABLE_PREEMPTION_NO_CHECK \
358 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
359
360#if MACH_ASSERT
91447636 361#define DISABLE_PREEMPTION \
1c79356b
A
362 pushl %eax; \
363 pushl %ecx; \
364 pushl %edx; \
365 call EXT(_disable_preemption); \
366 popl %edx; \
367 popl %ecx; \
368 popl %eax
91447636 369#define ENABLE_PREEMPTION \
1c79356b
A
370 pushl %eax; \
371 pushl %ecx; \
372 pushl %edx; \
373 call EXT(_enable_preemption); \
374 popl %edx; \
375 popl %ecx; \
376 popl %eax
91447636 377#define ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
378 pushl %eax; \
379 pushl %ecx; \
380 pushl %edx; \
381 call EXT(_enable_preemption_no_check); \
382 popl %edx; \
383 popl %ecx; \
384 popl %eax
91447636 385#define MP_DISABLE_PREEMPTION \
1c79356b
A
386 pushl %eax; \
387 pushl %ecx; \
388 pushl %edx; \
389 call EXT(_mp_disable_preemption); \
390 popl %edx; \
391 popl %ecx; \
392 popl %eax
91447636 393#define MP_ENABLE_PREEMPTION \
1c79356b
A
394 pushl %eax; \
395 pushl %ecx; \
396 pushl %edx; \
397 call EXT(_mp_enable_preemption); \
398 popl %edx; \
399 popl %ecx; \
400 popl %eax
91447636 401#define MP_ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
402 pushl %eax; \
403 pushl %ecx; \
404 pushl %edx; \
405 call EXT(_mp_enable_preemption_no_check); \
406 popl %edx; \
407 popl %ecx; \
408 popl %eax
1c79356b 409#else /* MACH_ASSERT */
91447636
A
410#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
411#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
412#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
413#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
414#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
415#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
416#endif /* MACH_ASSERT */
417
418#else /* MACH_RT */
91447636
A
419#define DISABLE_PREEMPTION
420#define ENABLE_PREEMPTION
421#define ENABLE_PREEMPTION_NO_CHECK
422#define MP_DISABLE_PREEMPTION
423#define MP_ENABLE_PREEMPTION
424#define MP_ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
425#endif /* MACH_RT */
426
427#endif /* _I386AT_MP_H_ */
91447636
A
428
429#endif /* KERNEL_PRIVATE */