]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.h
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.h
CommitLineData
1c79356b 1/*
593a1d5f 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
91447636 59#ifdef KERNEL_PRIVATE
1c79356b 60
593a1d5f
A
61#ifndef _I386_MP_H_
62#define _I386_MP_H_
1c79356b 63
91447636 64//#define MP_DEBUG 1
55e303ae 65
1c79356b 66#include <i386/apic.h>
55e303ae
A
67#include <i386/mp_events.h>
68
593a1d5f 69#define MAX_CPUS 32 /* (8*sizeof(long)) */
55e303ae
A
70
71#ifndef ASSEMBLER
593a1d5f 72#include <stdint.h>
91447636
A
73#include <sys/cdefs.h>
74#include <mach/boolean.h>
75#include <mach/kern_return.h>
0c530ab8 76#include <mach/i386/thread_status.h>
b0d623f7 77#include <mach/vm_types.h>
593a1d5f 78#include <kern/lock.h>
91447636
A
79
80__BEGIN_DECLS
81
82extern kern_return_t intel_startCPU(int slot_num);
593a1d5f 83extern kern_return_t intel_startCPU_fast(int slot_num);
91447636 84extern void i386_init_slave(void);
593a1d5f 85extern void i386_init_slave_fast(void);
91447636
A
86extern void smp_init(void);
87
88extern void cpu_interrupt(int cpu);
91447636
A
89__END_DECLS
90
91447636
A
91extern unsigned int real_ncpus; /* real number of cpus */
92extern unsigned int max_ncpus; /* max number of cpus */
1c79356b 93decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
91447636
A
94
95__BEGIN_DECLS
96
97extern void console_init(void);
98extern void *console_cpu_alloc(boolean_t boot_cpu);
99extern void console_cpu_free(void *console_buf);
1c79356b
A
100
101extern int kdb_cpu; /* current cpu running kdb */
102extern int kdb_debug;
1c79356b 103extern int kdb_active[];
55e303ae
A
104
105extern volatile boolean_t mp_kdp_trap;
b0d623f7 106extern volatile boolean_t force_immediate_debugger_NMI;
935ed37a 107extern volatile boolean_t pmap_tlb_flush_timeout;
060df5ea 108extern volatile usimple_lock_t spinlock_timed_out;
6d2010ae 109extern volatile uint32_t spinlock_owner_cpu;
060df5ea 110
b0d623f7 111extern uint64_t LastDebuggerEntryAllowance;
2d21ac55 112
91447636
A
113extern void mp_kdp_enter(void);
114extern void mp_kdp_exit(void);
55e303ae 115
b0d623f7 116extern boolean_t mp_recent_debugger_activity(void);
0c530ab8
A
117#if MACH_KDB
118extern void mp_kdb_exit(void);
119#endif
120
55e303ae
A
121/*
122 * All cpu rendezvous:
123 */
0c530ab8
A
124extern void mp_rendezvous(
125 void (*setup_func)(void *),
126 void (*action_func)(void *),
127 void (*teardown_func)(void *),
128 void *arg);
129extern void mp_rendezvous_no_intrs(
130 void (*action_func)(void *),
131 void *arg);
132extern void mp_rendezvous_break_lock(void);
55e303ae 133
2d21ac55
A
134/*
135 * All cpu broadcast.
136 * Called from thread context, this blocks until all active cpus have
137 * run action_func:
138 */
139extern void mp_broadcast(
140 void (*action_func)(void *),
141 void *arg);
b0d623f7
A
142#if MACH_KDP
143typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
144
145extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
146 kdp_x86_xcpu_func_t func,
147 void *arg0, void *arg1);
148typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
149#endif
2d21ac55
A
150
151typedef uint32_t cpu_t;
152typedef uint32_t cpumask_t;
153static inline cpumask_t
154cpu_to_cpumask(cpu_t cpu)
155{
156 return (cpu < 32) ? (1 << cpu) : 0;
157}
158#define CPUMASK_ALL 0xffffffff
159#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
160#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
161
162/*
163 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
164 * The mask may include the local cpu.
165 * If the mode is:
6d2010ae
A
166 * - ASYNC: other cpus make their calls in parallel
167 * - SYNC: the calls are performed serially in logical cpu order
168 * - NOSYNC: the calls are queued
169 * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
170 * called on all specified cpus.
171 * The return value is the number of cpus where the call was made or queued.
2d21ac55
A
172 * The action function is called with interrupts disabled.
173 */
174extern cpu_t mp_cpus_call(
175 cpumask_t cpus,
176 mp_sync_t mode,
177 void (*action_func)(void *),
178 void *arg);
6d2010ae
A
179extern cpu_t mp_cpus_call1(
180 cpumask_t cpus,
181 mp_sync_t mode,
182 void (*action_func)(void *, void*),
183 void *arg0,
184 void *arg1,
185 cpumask_t *cpus_calledp,
186 cpumask_t *cpus_notcalledp);
2d21ac55 187
c910b4d9
A
188/*
189 * Power-management-specific SPI to:
190 * - register a callout function, and
191 * - request the callout (if registered) on a given cpu.
192 */
193extern void PM_interrupt_register(void (*fn)(void));
194extern void cpu_PM_interrupt(int cpu);
195
91447636
A
196__END_DECLS
197
55e303ae
A
198#if MP_DEBUG
199typedef struct {
200 uint64_t time;
201 int cpu;
202 mp_event_t event;
203} cpu_signal_event_t;
204
205#define LOG_NENTRIES 100
206typedef struct {
207 uint64_t count[MP_LAST];
208 int next_entry;
209 cpu_signal_event_t entry[LOG_NENTRIES];
210} cpu_signal_event_log_t;
211
91447636
A
212extern cpu_signal_event_log_t *cpu_signal[];
213extern cpu_signal_event_log_t *cpu_handle[];
55e303ae
A
214
215#define DBGLOG(log,_cpu,_event) { \
91447636
A
216 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
217 cpu_signal_event_log_t *logp = log[cpu_number()]; \
55e303ae
A
218 int next = logp->next_entry; \
219 cpu_signal_event_t *eventp = &logp->entry[next]; \
55e303ae
A
220 \
221 logp->count[_event]++; \
222 \
223 eventp->time = rdtsc64(); \
224 eventp->cpu = _cpu; \
225 eventp->event = _event; \
226 if (next == (LOG_NENTRIES - 1)) \
227 logp->next_entry = 0; \
228 else \
229 logp->next_entry++; \
230 \
231 (void) ml_set_interrupts_enabled(spl); \
232}
91447636
A
233
234#define DBGLOG_CPU_INIT(cpu) { \
235 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
236 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
237 \
238 if (*sig_logpp == NULL && \
239 kmem_alloc(kernel_map, \
240 (vm_offset_t *) sig_logpp, \
241 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
242 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
243 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
244 if (*hdl_logpp == NULL && \
245 kmem_alloc(kernel_map, \
246 (vm_offset_t *) hdl_logpp, \
247 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
248 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
b0d623f7 249 bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
91447636 250}
55e303ae
A
251#else /* MP_DEBUG */
252#define DBGLOG(log,_cpu,_event)
91447636 253#define DBGLOG_CPU_INIT(cpu)
55e303ae
A
254#endif /* MP_DEBUG */
255
1c79356b
A
256#endif /* ASSEMBLER */
257
b0d623f7
A
258#ifdef ASSEMBLER
259#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
260#else
6d2010ae 261__attribute__((always_inline)) static inline long
b0d623f7 262i_bit_impl(long word, long bit) {
6d2010ae
A
263 long bitmask = 1L << bit;
264 return word & bitmask;
b0d623f7
A
265}
266#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
267#endif
1c79356b 268
1c79356b 269#if MACH_RT
b0d623f7
A
270
271#if defined(__i386__)
272
91447636
A
273#define _DISABLE_PREEMPTION \
274 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b 275
91447636
A
276#define _ENABLE_PREEMPTION \
277 decl %gs:CPU_PREEMPTION_LEVEL ; \
1c79356b
A
278 jne 9f ; \
279 pushl %eax ; \
280 pushl %ecx ; \
281 pushl %edx ; \
282 call EXT(kernel_preempt_check) ; \
283 popl %edx ; \
284 popl %ecx ; \
285 popl %eax ; \
2869:
287
91447636
A
288#define _ENABLE_PREEMPTION_NO_CHECK \
289 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 290
b0d623f7
A
291#elif defined(__x86_64__)
292
293#define _DISABLE_PREEMPTION \
294 incl %gs:CPU_PREEMPTION_LEVEL
295
296#define _ENABLE_PREEMPTION \
297 decl %gs:CPU_PREEMPTION_LEVEL ; \
298 jne 9f ; \
299 call EXT(kernel_preempt_check) ; \
3009:
301
302#define _ENABLE_PREEMPTION_NO_CHECK \
303 decl %gs:CPU_PREEMPTION_LEVEL
304
305#else
306#error Unsupported architecture
307#endif
308
309/* x86_64 just calls through to the other macro directly */
310#if MACH_ASSERT && defined(__i386__)
91447636 311#define DISABLE_PREEMPTION \
1c79356b
A
312 pushl %eax; \
313 pushl %ecx; \
314 pushl %edx; \
315 call EXT(_disable_preemption); \
316 popl %edx; \
317 popl %ecx; \
318 popl %eax
91447636 319#define ENABLE_PREEMPTION \
1c79356b
A
320 pushl %eax; \
321 pushl %ecx; \
322 pushl %edx; \
323 call EXT(_enable_preemption); \
324 popl %edx; \
325 popl %ecx; \
326 popl %eax
91447636 327#define ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
328 pushl %eax; \
329 pushl %ecx; \
330 pushl %edx; \
331 call EXT(_enable_preemption_no_check); \
332 popl %edx; \
333 popl %ecx; \
334 popl %eax
91447636 335#define MP_DISABLE_PREEMPTION \
1c79356b
A
336 pushl %eax; \
337 pushl %ecx; \
338 pushl %edx; \
339 call EXT(_mp_disable_preemption); \
340 popl %edx; \
341 popl %ecx; \
342 popl %eax
91447636 343#define MP_ENABLE_PREEMPTION \
1c79356b
A
344 pushl %eax; \
345 pushl %ecx; \
346 pushl %edx; \
347 call EXT(_mp_enable_preemption); \
348 popl %edx; \
349 popl %ecx; \
350 popl %eax
91447636 351#define MP_ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
352 pushl %eax; \
353 pushl %ecx; \
354 pushl %edx; \
355 call EXT(_mp_enable_preemption_no_check); \
356 popl %edx; \
357 popl %ecx; \
358 popl %eax
1c79356b 359#else /* MACH_ASSERT */
91447636
A
360#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
361#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
362#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
363#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
364#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
365#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
366#endif /* MACH_ASSERT */
367
368#else /* MACH_RT */
91447636
A
369#define DISABLE_PREEMPTION
370#define ENABLE_PREEMPTION
371#define ENABLE_PREEMPTION_NO_CHECK
372#define MP_DISABLE_PREEMPTION
373#define MP_ENABLE_PREEMPTION
374#define MP_ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
375#endif /* MACH_RT */
376
593a1d5f 377#endif /* _I386_MP_H_ */
91447636
A
378
379#endif /* KERNEL_PRIVATE */