]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp.h
1e5a13dc94e556db9a76b82d4ba0fb822878883b
[apple/xnu.git] / osfmk / i386 / mp.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59 #ifdef KERNEL_PRIVATE
60
61 #ifndef _I386_MP_H_
62 #define _I386_MP_H_
63
64 //#define MP_DEBUG 1
65
66 #include <i386/apic.h>
67 #include <i386/mp_events.h>
68
69 #define MAX_CPUS 32 /* (8*sizeof(long)) */
70
71 #ifndef ASSEMBLER
72 #include <stdint.h>
73 #include <sys/cdefs.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/i386/thread_status.h>
77 #include <mach/vm_types.h>
78 #include <kern/lock.h>
79
80 __BEGIN_DECLS
81
82 extern kern_return_t intel_startCPU(int slot_num);
83 extern kern_return_t intel_startCPU_fast(int slot_num);
84 extern void i386_init_slave(void);
85 extern void i386_init_slave_fast(void);
86 extern void smp_init(void);
87
88 extern void cpu_interrupt(int cpu);
89 __END_DECLS
90
91 extern unsigned int real_ncpus; /* real number of cpus */
92 extern unsigned int max_ncpus; /* max number of cpus */
93 decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
94
95 __BEGIN_DECLS
96
97 extern void console_init(void);
98 extern void *console_cpu_alloc(boolean_t boot_cpu);
99 extern void console_cpu_free(void *console_buf);
100
101 extern int kdb_cpu; /* current cpu running kdb */
102 extern int kdb_debug;
103 extern int kdb_active[];
104
105 extern volatile boolean_t mp_kdp_trap;
106 extern volatile boolean_t force_immediate_debugger_NMI;
107 extern volatile boolean_t pmap_tlb_flush_timeout;
108 extern volatile usimple_lock_t spinlock_timed_out;
109 extern volatile uint32_t spinlock_owner_cpu;
110
111 extern uint64_t LastDebuggerEntryAllowance;
112
113 extern void mp_kdp_enter(void);
114 extern void mp_kdp_exit(void);
115
116 extern boolean_t mp_recent_debugger_activity(void);
117
118 /*
119 * All cpu rendezvous:
120 */
121 extern void mp_rendezvous(
122 void (*setup_func)(void *),
123 void (*action_func)(void *),
124 void (*teardown_func)(void *),
125 void *arg);
126 extern void mp_rendezvous_no_intrs(
127 void (*action_func)(void *),
128 void *arg);
129 extern void mp_rendezvous_break_lock(void);
130
131 /*
132 * All cpu broadcast.
133 * Called from thread context, this blocks until all active cpus have
134 * run action_func:
135 */
136 extern void mp_broadcast(
137 void (*action_func)(void *),
138 void *arg);
139 #if MACH_KDP
140 typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
141
142 extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
143 kdp_x86_xcpu_func_t func,
144 void *arg0, void *arg1);
145 typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
146 #endif
147
148 typedef uint32_t cpu_t;
149 typedef uint32_t cpumask_t;
150 static inline cpumask_t
151 cpu_to_cpumask(cpu_t cpu)
152 {
153 return (cpu < 32) ? (1 << cpu) : 0;
154 }
155 #define CPUMASK_ALL 0xffffffff
156 #define CPUMASK_SELF cpu_to_cpumask(cpu_number())
157 #define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
158
159 /*
160 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
161 * The mask may include the local cpu.
162 * If the mode is:
163 * - ASYNC: other cpus make their calls in parallel
164 * - SYNC: the calls are performed serially in logical cpu order
165 * - NOSYNC: the calls are queued
166 * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
167 * called on all specified cpus.
168 * The return value is the number of cpus where the call was made or queued.
169 * The action function is called with interrupts disabled.
170 */
171 extern cpu_t mp_cpus_call(
172 cpumask_t cpus,
173 mp_sync_t mode,
174 void (*action_func)(void *),
175 void *arg);
176 extern cpu_t mp_cpus_call1(
177 cpumask_t cpus,
178 mp_sync_t mode,
179 void (*action_func)(void *, void*),
180 void *arg0,
181 void *arg1,
182 cpumask_t *cpus_calledp,
183 cpumask_t *cpus_notcalledp);
184
185 /*
186 * Power-management-specific SPI to:
187 * - register a callout function, and
188 * - request the callout (if registered) on a given cpu.
189 */
190 extern void PM_interrupt_register(void (*fn)(void));
191 extern void cpu_PM_interrupt(int cpu);
192
193 __END_DECLS
194
195 #if MP_DEBUG
196 typedef struct {
197 uint64_t time;
198 int cpu;
199 mp_event_t event;
200 } cpu_signal_event_t;
201
202 #define LOG_NENTRIES 100
203 typedef struct {
204 uint64_t count[MP_LAST];
205 int next_entry;
206 cpu_signal_event_t entry[LOG_NENTRIES];
207 } cpu_signal_event_log_t;
208
209 extern cpu_signal_event_log_t *cpu_signal[];
210 extern cpu_signal_event_log_t *cpu_handle[];
211
212 #define DBGLOG(log,_cpu,_event) { \
213 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
214 cpu_signal_event_log_t *logp = log[cpu_number()]; \
215 int next = logp->next_entry; \
216 cpu_signal_event_t *eventp = &logp->entry[next]; \
217 \
218 logp->count[_event]++; \
219 \
220 eventp->time = rdtsc64(); \
221 eventp->cpu = _cpu; \
222 eventp->event = _event; \
223 if (next == (LOG_NENTRIES - 1)) \
224 logp->next_entry = 0; \
225 else \
226 logp->next_entry++; \
227 \
228 (void) ml_set_interrupts_enabled(spl); \
229 }
230
231 #define DBGLOG_CPU_INIT(cpu) { \
232 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
233 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
234 \
235 if (*sig_logpp == NULL && \
236 kmem_alloc(kernel_map, \
237 (vm_offset_t *) sig_logpp, \
238 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
239 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
240 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
241 if (*hdl_logpp == NULL && \
242 kmem_alloc(kernel_map, \
243 (vm_offset_t *) hdl_logpp, \
244 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
245 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
246 bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \
247 }
248 #else /* MP_DEBUG */
249 #define DBGLOG(log,_cpu,_event)
250 #define DBGLOG_CPU_INIT(cpu)
251 #endif /* MP_DEBUG */
252
253 #endif /* ASSEMBLER */
254
255 #ifdef ASSEMBLER
256 #define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
257 #else
258 __attribute__((always_inline)) static inline long
259 i_bit_impl(long word, long bit) {
260 long bitmask = 1L << bit;
261 return word & bitmask;
262 }
263 #define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
264 #endif
265
266 #if MACH_RT
267
268 #if defined(__x86_64__)
269
270 #define _DISABLE_PREEMPTION \
271 incl %gs:CPU_PREEMPTION_LEVEL
272
273 #define _ENABLE_PREEMPTION \
274 decl %gs:CPU_PREEMPTION_LEVEL ; \
275 jne 9f ; \
276 call EXT(kernel_preempt_check) ; \
277 9:
278
279 #define _ENABLE_PREEMPTION_NO_CHECK \
280 decl %gs:CPU_PREEMPTION_LEVEL
281
282 #else
283 #error Unsupported architecture
284 #endif
285
286 /* x86_64 just calls through to the other macro directly */
287 #define DISABLE_PREEMPTION _DISABLE_PREEMPTION
288 #define ENABLE_PREEMPTION _ENABLE_PREEMPTION
289 #define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
290 #define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
291 #define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
292 #define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
293
294 #else /* MACH_RT */
295 #define DISABLE_PREEMPTION
296 #define ENABLE_PREEMPTION
297 #define ENABLE_PREEMPTION_NO_CHECK
298 #define MP_DISABLE_PREEMPTION
299 #define MP_ENABLE_PREEMPTION
300 #define MP_ENABLE_PREEMPTION_NO_CHECK
301 #endif /* MACH_RT */
302
303 #endif /* _I386_MP_H_ */
304
305 #endif /* KERNEL_PRIVATE */