]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
593a1d5f | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | /* | |
58 | */ | |
91447636 | 59 | #ifdef KERNEL_PRIVATE |
1c79356b | 60 | |
593a1d5f A |
61 | #ifndef _I386_MP_H_ |
62 | #define _I386_MP_H_ | |
1c79356b | 63 | |
91447636 | 64 | //#define MP_DEBUG 1 |
55e303ae | 65 | |
1c79356b | 66 | #include <i386/apic.h> |
55e303ae A |
67 | #include <i386/mp_events.h> |
68 | ||
593a1d5f | 69 | #define MAX_CPUS 32 /* (8*sizeof(long)) */ |
55e303ae A |
70 | |
71 | #ifndef ASSEMBLER | |
593a1d5f | 72 | #include <stdint.h> |
91447636 A |
73 | #include <sys/cdefs.h> |
74 | #include <mach/boolean.h> | |
75 | #include <mach/kern_return.h> | |
0c530ab8 | 76 | #include <mach/i386/thread_status.h> |
b0d623f7 | 77 | #include <mach/vm_types.h> |
593a1d5f | 78 | #include <kern/lock.h> |
91447636 A |
79 | |
80 | __BEGIN_DECLS | |
81 | ||
82 | extern kern_return_t intel_startCPU(int slot_num); | |
593a1d5f | 83 | extern kern_return_t intel_startCPU_fast(int slot_num); |
91447636 | 84 | extern void i386_init_slave(void); |
593a1d5f | 85 | extern void i386_init_slave_fast(void); |
91447636 A |
86 | extern void smp_init(void); |
87 | ||
88 | extern void cpu_interrupt(int cpu); | |
91447636 A |
89 | __END_DECLS |
90 | ||
91447636 A |
91 | extern unsigned int real_ncpus; /* real number of cpus */ |
92 | extern unsigned int max_ncpus; /* max number of cpus */ | |
1c79356b | 93 | decl_simple_lock_data(extern,kdb_lock) /* kdb lock */ |
91447636 A |
94 | |
95 | __BEGIN_DECLS | |
96 | ||
97 | extern void console_init(void); | |
98 | extern void *console_cpu_alloc(boolean_t boot_cpu); | |
99 | extern void console_cpu_free(void *console_buf); | |
1c79356b A |
100 | |
101 | extern int kdb_cpu; /* current cpu running kdb */ | |
102 | extern int kdb_debug; | |
1c79356b | 103 | extern int kdb_active[]; |
55e303ae A |
104 | |
105 | extern volatile boolean_t mp_kdp_trap; | |
b0d623f7 | 106 | extern volatile boolean_t force_immediate_debugger_NMI; |
935ed37a | 107 | extern volatile boolean_t pmap_tlb_flush_timeout; |
b0d623f7 | 108 | extern uint64_t LastDebuggerEntryAllowance; |
2d21ac55 | 109 | |
91447636 A |
110 | extern void mp_kdp_enter(void); |
111 | extern void mp_kdp_exit(void); | |
55e303ae | 112 | |
b0d623f7 | 113 | extern boolean_t mp_recent_debugger_activity(void); |
0c530ab8 A |
114 | #if MACH_KDB |
115 | extern void mp_kdb_exit(void); | |
116 | #endif | |
117 | ||
55e303ae A |
118 | /* |
119 | * All cpu rendezvous: | |
120 | */ | |
0c530ab8 A |
121 | extern void mp_rendezvous( |
122 | void (*setup_func)(void *), | |
123 | void (*action_func)(void *), | |
124 | void (*teardown_func)(void *), | |
125 | void *arg); | |
126 | extern void mp_rendezvous_no_intrs( | |
127 | void (*action_func)(void *), | |
128 | void *arg); | |
129 | extern void mp_rendezvous_break_lock(void); | |
55e303ae | 130 | |
2d21ac55 A |
131 | /* |
132 | * All cpu broadcast. | |
133 | * Called from thread context, this blocks until all active cpus have | |
134 | * run action_func: | |
135 | */ | |
136 | extern void mp_broadcast( | |
137 | void (*action_func)(void *), | |
138 | void *arg); | |
b0d623f7 A |
139 | #if MACH_KDP |
140 | typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu); | |
141 | ||
142 | extern long kdp_x86_xcpu_invoke(const uint16_t lcpu, | |
143 | kdp_x86_xcpu_func_t func, | |
144 | void *arg0, void *arg1); | |
145 | typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t; | |
146 | #endif | |
2d21ac55 A |
147 | |
148 | typedef uint32_t cpu_t; | |
149 | typedef uint32_t cpumask_t; | |
150 | static inline cpumask_t | |
151 | cpu_to_cpumask(cpu_t cpu) | |
152 | { | |
153 | return (cpu < 32) ? (1 << cpu) : 0; | |
154 | } | |
155 | #define CPUMASK_ALL 0xffffffff | |
156 | #define CPUMASK_SELF cpu_to_cpumask(cpu_number()) | |
157 | #define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF) | |
158 | ||
159 | /* | |
160 | * Invoke a function (possibly NULL) on a set of cpus specified by a mask. | |
161 | * The mask may include the local cpu. | |
162 | * If the mode is: | |
163 | * - ASYNC: other cpus make their calls in parallel. | |
164 | * - SYNC: the calls are performed serially in logical cpu order. | |
165 | * This call returns when the function has been run on all specified cpus. | |
166 | * The return value is the number of cpus on which the call was made. | |
167 | * The action function is called with interrupts disabled. | |
168 | */ | |
169 | extern cpu_t mp_cpus_call( | |
170 | cpumask_t cpus, | |
171 | mp_sync_t mode, | |
172 | void (*action_func)(void *), | |
173 | void *arg); | |
174 | ||
c910b4d9 A |
175 | /* |
176 | * Power-management-specific SPI to: | |
177 | * - register a callout function, and | |
178 | * - request the callout (if registered) on a given cpu. | |
179 | */ | |
180 | extern void PM_interrupt_register(void (*fn)(void)); | |
181 | extern void cpu_PM_interrupt(int cpu); | |
182 | ||
183 | ||
91447636 A |
184 | __END_DECLS |
185 | ||
55e303ae A |
186 | #if MP_DEBUG |
187 | typedef struct { | |
188 | uint64_t time; | |
189 | int cpu; | |
190 | mp_event_t event; | |
191 | } cpu_signal_event_t; | |
192 | ||
193 | #define LOG_NENTRIES 100 | |
194 | typedef struct { | |
195 | uint64_t count[MP_LAST]; | |
196 | int next_entry; | |
197 | cpu_signal_event_t entry[LOG_NENTRIES]; | |
198 | } cpu_signal_event_log_t; | |
199 | ||
91447636 A |
200 | extern cpu_signal_event_log_t *cpu_signal[]; |
201 | extern cpu_signal_event_log_t *cpu_handle[]; | |
55e303ae A |
202 | |
203 | #define DBGLOG(log,_cpu,_event) { \ | |
91447636 A |
204 | boolean_t spl = ml_set_interrupts_enabled(FALSE); \ |
205 | cpu_signal_event_log_t *logp = log[cpu_number()]; \ | |
55e303ae A |
206 | int next = logp->next_entry; \ |
207 | cpu_signal_event_t *eventp = &logp->entry[next]; \ | |
55e303ae A |
208 | \ |
209 | logp->count[_event]++; \ | |
210 | \ | |
211 | eventp->time = rdtsc64(); \ | |
212 | eventp->cpu = _cpu; \ | |
213 | eventp->event = _event; \ | |
214 | if (next == (LOG_NENTRIES - 1)) \ | |
215 | logp->next_entry = 0; \ | |
216 | else \ | |
217 | logp->next_entry++; \ | |
218 | \ | |
219 | (void) ml_set_interrupts_enabled(spl); \ | |
220 | } | |
91447636 A |
221 | |
222 | #define DBGLOG_CPU_INIT(cpu) { \ | |
223 | cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \ | |
224 | cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \ | |
225 | \ | |
226 | if (*sig_logpp == NULL && \ | |
227 | kmem_alloc(kernel_map, \ | |
228 | (vm_offset_t *) sig_logpp, \ | |
229 | sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ | |
230 | panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\ | |
231 | bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \ | |
232 | if (*hdl_logpp == NULL && \ | |
233 | kmem_alloc(kernel_map, \ | |
234 | (vm_offset_t *) hdl_logpp, \ | |
235 | sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ | |
236 | panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\ | |
b0d623f7 | 237 | bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \ |
91447636 | 238 | } |
55e303ae A |
239 | #else /* MP_DEBUG */ |
240 | #define DBGLOG(log,_cpu,_event) | |
91447636 | 241 | #define DBGLOG_CPU_INIT(cpu) |
55e303ae A |
242 | #endif /* MP_DEBUG */ |
243 | ||
1c79356b A |
244 | #endif /* ASSEMBLER */ |
245 | ||
b0d623f7 A |
246 | #ifdef ASSEMBLER |
247 | #define i_bit(bit, word) ((long)(*(word)) & (1L << (bit))) | |
248 | #else | |
249 | // Workaround for 6640051 | |
250 | static inline long | |
251 | i_bit_impl(long word, long bit) { | |
252 | return word & 1L << bit; | |
253 | } | |
254 | #define i_bit(bit, word) i_bit_impl((long)(*(word)), bit) | |
255 | #endif | |
1c79356b A |
256 | |
257 | ||
258 | /* | |
259 | * Device driver synchronization. | |
260 | * | |
261 | * at386_io_lock(op) and at386_io_unlock() are called | |
262 | * by device drivers when accessing H/W. The underlying | |
263 | * Processing is machine dependant. But the op argument | |
264 | * to the at386_io_lock is generic | |
265 | */ | |
266 | ||
267 | #define MP_DEV_OP_MAX 4 | |
268 | #define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */ | |
269 | ||
270 | /* | |
271 | * If the caller specifies an op value different than MP_DEV_WAIT, the | |
272 | * at386_io_lock function must return true if lock was successful else | |
273 | * false | |
274 | */ | |
275 | ||
276 | #define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */ | |
277 | #define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */ | |
278 | #define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */ | |
279 | #define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */ | |
280 | ||
1c79356b | 281 | #if MACH_RT |
b0d623f7 A |
282 | |
283 | #if defined(__i386__) | |
284 | ||
91447636 A |
285 | #define _DISABLE_PREEMPTION \ |
286 | incl %gs:CPU_PREEMPTION_LEVEL | |
1c79356b | 287 | |
91447636 A |
288 | #define _ENABLE_PREEMPTION \ |
289 | decl %gs:CPU_PREEMPTION_LEVEL ; \ | |
1c79356b A |
290 | jne 9f ; \ |
291 | pushl %eax ; \ | |
292 | pushl %ecx ; \ | |
293 | pushl %edx ; \ | |
294 | call EXT(kernel_preempt_check) ; \ | |
295 | popl %edx ; \ | |
296 | popl %ecx ; \ | |
297 | popl %eax ; \ | |
298 | 9: | |
299 | ||
91447636 A |
300 | #define _ENABLE_PREEMPTION_NO_CHECK \ |
301 | decl %gs:CPU_PREEMPTION_LEVEL | |
1c79356b | 302 | |
b0d623f7 A |
303 | #elif defined(__x86_64__) |
304 | ||
305 | #define _DISABLE_PREEMPTION \ | |
306 | incl %gs:CPU_PREEMPTION_LEVEL | |
307 | ||
308 | #define _ENABLE_PREEMPTION \ | |
309 | decl %gs:CPU_PREEMPTION_LEVEL ; \ | |
310 | jne 9f ; \ | |
311 | call EXT(kernel_preempt_check) ; \ | |
312 | 9: | |
313 | ||
314 | #define _ENABLE_PREEMPTION_NO_CHECK \ | |
315 | decl %gs:CPU_PREEMPTION_LEVEL | |
316 | ||
317 | #else | |
318 | #error Unsupported architecture | |
319 | #endif | |
320 | ||
321 | /* x86_64 just calls through to the other macro directly */ | |
322 | #if MACH_ASSERT && defined(__i386__) | |
91447636 | 323 | #define DISABLE_PREEMPTION \ |
1c79356b A |
324 | pushl %eax; \ |
325 | pushl %ecx; \ | |
326 | pushl %edx; \ | |
327 | call EXT(_disable_preemption); \ | |
328 | popl %edx; \ | |
329 | popl %ecx; \ | |
330 | popl %eax | |
91447636 | 331 | #define ENABLE_PREEMPTION \ |
1c79356b A |
332 | pushl %eax; \ |
333 | pushl %ecx; \ | |
334 | pushl %edx; \ | |
335 | call EXT(_enable_preemption); \ | |
336 | popl %edx; \ | |
337 | popl %ecx; \ | |
338 | popl %eax | |
91447636 | 339 | #define ENABLE_PREEMPTION_NO_CHECK \ |
1c79356b A |
340 | pushl %eax; \ |
341 | pushl %ecx; \ | |
342 | pushl %edx; \ | |
343 | call EXT(_enable_preemption_no_check); \ | |
344 | popl %edx; \ | |
345 | popl %ecx; \ | |
346 | popl %eax | |
91447636 | 347 | #define MP_DISABLE_PREEMPTION \ |
1c79356b A |
348 | pushl %eax; \ |
349 | pushl %ecx; \ | |
350 | pushl %edx; \ | |
351 | call EXT(_mp_disable_preemption); \ | |
352 | popl %edx; \ | |
353 | popl %ecx; \ | |
354 | popl %eax | |
91447636 | 355 | #define MP_ENABLE_PREEMPTION \ |
1c79356b A |
356 | pushl %eax; \ |
357 | pushl %ecx; \ | |
358 | pushl %edx; \ | |
359 | call EXT(_mp_enable_preemption); \ | |
360 | popl %edx; \ | |
361 | popl %ecx; \ | |
362 | popl %eax | |
91447636 | 363 | #define MP_ENABLE_PREEMPTION_NO_CHECK \ |
1c79356b A |
364 | pushl %eax; \ |
365 | pushl %ecx; \ | |
366 | pushl %edx; \ | |
367 | call EXT(_mp_enable_preemption_no_check); \ | |
368 | popl %edx; \ | |
369 | popl %ecx; \ | |
370 | popl %eax | |
1c79356b | 371 | #else /* MACH_ASSERT */ |
91447636 A |
372 | #define DISABLE_PREEMPTION _DISABLE_PREEMPTION |
373 | #define ENABLE_PREEMPTION _ENABLE_PREEMPTION | |
374 | #define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK | |
375 | #define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION | |
376 | #define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION | |
377 | #define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK | |
1c79356b A |
378 | #endif /* MACH_ASSERT */ |
379 | ||
380 | #else /* MACH_RT */ | |
91447636 A |
381 | #define DISABLE_PREEMPTION |
382 | #define ENABLE_PREEMPTION | |
383 | #define ENABLE_PREEMPTION_NO_CHECK | |
384 | #define MP_DISABLE_PREEMPTION | |
385 | #define MP_ENABLE_PREEMPTION | |
386 | #define MP_ENABLE_PREEMPTION_NO_CHECK | |
1c79356b A |
387 | #endif /* MACH_RT */ |
388 | ||
593a1d5f | 389 | #endif /* _I386_MP_H_ */ |
91447636 A |
390 | |
391 | #endif /* KERNEL_PRIVATE */ |