]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/mp.h
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
59#ifdef KERNEL_PRIVATE
60
61#ifndef _I386_MP_H_
62#define _I386_MP_H_
63
64//#define MP_DEBUG 1
65
66#include <i386/apic.h>
67#include <i386/mp_events.h>
68
69#define MAX_CPUS 32 /* (8*sizeof(long)) */
70
71#ifndef ASSEMBLER
72#include <stdint.h>
73#include <sys/cdefs.h>
74#include <mach/boolean.h>
75#include <mach/kern_return.h>
76#include <mach/i386/thread_status.h>
77#include <mach/vm_types.h>
78#include <kern/lock.h>
79
80__BEGIN_DECLS
81
82extern kern_return_t intel_startCPU(int slot_num);
83extern kern_return_t intel_startCPU_fast(int slot_num);
84extern void i386_init_slave(void);
85extern void i386_init_slave_fast(void);
86extern void smp_init(void);
87
88extern void cpu_interrupt(int cpu);
89__END_DECLS
90
91extern unsigned int real_ncpus; /* real number of cpus */
92extern unsigned int max_ncpus; /* max number of cpus */
93decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
94
95__BEGIN_DECLS
96
97extern void console_init(void);
98extern void *console_cpu_alloc(boolean_t boot_cpu);
99extern void console_cpu_free(void *console_buf);
100
101extern int kdb_cpu; /* current cpu running kdb */
102extern int kdb_debug;
103extern int kdb_active[];
104
105extern volatile boolean_t mp_kdp_trap;
106extern volatile boolean_t force_immediate_debugger_NMI;
107extern volatile boolean_t pmap_tlb_flush_timeout;
108extern uint64_t LastDebuggerEntryAllowance;
109
110extern void mp_kdp_enter(void);
111extern void mp_kdp_exit(void);
112
113extern boolean_t mp_recent_debugger_activity(void);
114#if MACH_KDB
115extern void mp_kdb_exit(void);
116#endif
117
118/*
119 * All cpu rendezvous:
120 */
121extern void mp_rendezvous(
122 void (*setup_func)(void *),
123 void (*action_func)(void *),
124 void (*teardown_func)(void *),
125 void *arg);
126extern void mp_rendezvous_no_intrs(
127 void (*action_func)(void *),
128 void *arg);
129extern void mp_rendezvous_break_lock(void);
130
131/*
132 * All cpu broadcast.
133 * Called from thread context, this blocks until all active cpus have
134 * run action_func:
135 */
136extern void mp_broadcast(
137 void (*action_func)(void *),
138 void *arg);
139#if MACH_KDP
140typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
141
142extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
143 kdp_x86_xcpu_func_t func,
144 void *arg0, void *arg1);
145typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
146#endif
147
148typedef uint32_t cpu_t;
149typedef uint32_t cpumask_t;
150static inline cpumask_t
151cpu_to_cpumask(cpu_t cpu)
152{
153 return (cpu < 32) ? (1 << cpu) : 0;
154}
155#define CPUMASK_ALL 0xffffffff
156#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
157#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
158
159/*
160 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
161 * The mask may include the local cpu.
162 * If the mode is:
163 * - ASYNC: other cpus make their calls in parallel.
164 * - SYNC: the calls are performed serially in logical cpu order.
165 * This call returns when the function has been run on all specified cpus.
166 * The return value is the number of cpus on which the call was made.
167 * The action function is called with interrupts disabled.
168 */
169extern cpu_t mp_cpus_call(
170 cpumask_t cpus,
171 mp_sync_t mode,
172 void (*action_func)(void *),
173 void *arg);
174
175/*
176 * Power-management-specific SPI to:
177 * - register a callout function, and
178 * - request the callout (if registered) on a given cpu.
179 */
180extern void PM_interrupt_register(void (*fn)(void));
181extern void cpu_PM_interrupt(int cpu);
182
183
184__END_DECLS
185
186#if MP_DEBUG
187typedef struct {
188 uint64_t time;
189 int cpu;
190 mp_event_t event;
191} cpu_signal_event_t;
192
193#define LOG_NENTRIES 100
194typedef struct {
195 uint64_t count[MP_LAST];
196 int next_entry;
197 cpu_signal_event_t entry[LOG_NENTRIES];
198} cpu_signal_event_log_t;
199
200extern cpu_signal_event_log_t *cpu_signal[];
201extern cpu_signal_event_log_t *cpu_handle[];
202
203#define DBGLOG(log,_cpu,_event) { \
204 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
205 cpu_signal_event_log_t *logp = log[cpu_number()]; \
206 int next = logp->next_entry; \
207 cpu_signal_event_t *eventp = &logp->entry[next]; \
208 \
209 logp->count[_event]++; \
210 \
211 eventp->time = rdtsc64(); \
212 eventp->cpu = _cpu; \
213 eventp->event = _event; \
214 if (next == (LOG_NENTRIES - 1)) \
215 logp->next_entry = 0; \
216 else \
217 logp->next_entry++; \
218 \
219 (void) ml_set_interrupts_enabled(spl); \
220}
221
222#define DBGLOG_CPU_INIT(cpu) { \
223 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
224 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
225 \
226 if (*sig_logpp == NULL && \
227 kmem_alloc(kernel_map, \
228 (vm_offset_t *) sig_logpp, \
229 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
230 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
231 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
232 if (*hdl_logpp == NULL && \
233 kmem_alloc(kernel_map, \
234 (vm_offset_t *) hdl_logpp, \
235 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
236 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
237 bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
238}
239#else /* MP_DEBUG */
240#define DBGLOG(log,_cpu,_event)
241#define DBGLOG_CPU_INIT(cpu)
242#endif /* MP_DEBUG */
243
244#endif /* ASSEMBLER */
245
246#ifdef ASSEMBLER
247#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
248#else
249// Workaround for 6640051
250static inline long
251i_bit_impl(long word, long bit) {
252 return word & 1L << bit;
253}
254#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
255#endif
256
257
258/*
259 * Device driver synchronization.
260 *
261 * at386_io_lock(op) and at386_io_unlock() are called
262 * by device drivers when accessing H/W. The underlying
263 * Processing is machine dependant. But the op argument
264 * to the at386_io_lock is generic
265 */
266
267#define MP_DEV_OP_MAX 4
268#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
269
270/*
271 * If the caller specifies an op value different than MP_DEV_WAIT, the
272 * at386_io_lock function must return true if lock was successful else
273 * false
274 */
275
276#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
277#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
278#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
279#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
280
281#if MACH_RT
282
283#if defined(__i386__)
284
285#define _DISABLE_PREEMPTION \
286 incl %gs:CPU_PREEMPTION_LEVEL
287
288#define _ENABLE_PREEMPTION \
289 decl %gs:CPU_PREEMPTION_LEVEL ; \
290 jne 9f ; \
291 pushl %eax ; \
292 pushl %ecx ; \
293 pushl %edx ; \
294 call EXT(kernel_preempt_check) ; \
295 popl %edx ; \
296 popl %ecx ; \
297 popl %eax ; \
2989:
299
300#define _ENABLE_PREEMPTION_NO_CHECK \
301 decl %gs:CPU_PREEMPTION_LEVEL
302
303#elif defined(__x86_64__)
304
305#define _DISABLE_PREEMPTION \
306 incl %gs:CPU_PREEMPTION_LEVEL
307
308#define _ENABLE_PREEMPTION \
309 decl %gs:CPU_PREEMPTION_LEVEL ; \
310 jne 9f ; \
311 call EXT(kernel_preempt_check) ; \
3129:
313
314#define _ENABLE_PREEMPTION_NO_CHECK \
315 decl %gs:CPU_PREEMPTION_LEVEL
316
317#else
318#error Unsupported architecture
319#endif
320
321/* x86_64 just calls through to the other macro directly */
322#if MACH_ASSERT && defined(__i386__)
323#define DISABLE_PREEMPTION \
324 pushl %eax; \
325 pushl %ecx; \
326 pushl %edx; \
327 call EXT(_disable_preemption); \
328 popl %edx; \
329 popl %ecx; \
330 popl %eax
331#define ENABLE_PREEMPTION \
332 pushl %eax; \
333 pushl %ecx; \
334 pushl %edx; \
335 call EXT(_enable_preemption); \
336 popl %edx; \
337 popl %ecx; \
338 popl %eax
339#define ENABLE_PREEMPTION_NO_CHECK \
340 pushl %eax; \
341 pushl %ecx; \
342 pushl %edx; \
343 call EXT(_enable_preemption_no_check); \
344 popl %edx; \
345 popl %ecx; \
346 popl %eax
347#define MP_DISABLE_PREEMPTION \
348 pushl %eax; \
349 pushl %ecx; \
350 pushl %edx; \
351 call EXT(_mp_disable_preemption); \
352 popl %edx; \
353 popl %ecx; \
354 popl %eax
355#define MP_ENABLE_PREEMPTION \
356 pushl %eax; \
357 pushl %ecx; \
358 pushl %edx; \
359 call EXT(_mp_enable_preemption); \
360 popl %edx; \
361 popl %ecx; \
362 popl %eax
363#define MP_ENABLE_PREEMPTION_NO_CHECK \
364 pushl %eax; \
365 pushl %ecx; \
366 pushl %edx; \
367 call EXT(_mp_enable_preemption_no_check); \
368 popl %edx; \
369 popl %ecx; \
370 popl %eax
371#else /* MACH_ASSERT */
372#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
373#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
374#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
375#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
376#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
377#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
378#endif /* MACH_ASSERT */
379
380#else /* MACH_RT */
381#define DISABLE_PREEMPTION
382#define ENABLE_PREEMPTION
383#define ENABLE_PREEMPTION_NO_CHECK
384#define MP_DISABLE_PREEMPTION
385#define MP_ENABLE_PREEMPTION
386#define MP_ENABLE_PREEMPTION_NO_CHECK
387#endif /* MACH_RT */
388
389#endif /* _I386_MP_H_ */
390
391#endif /* KERNEL_PRIVATE */