]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | /* | |
58 | */ | |
59 | #ifdef KERNEL_PRIVATE | |
60 | ||
61 | #ifndef _I386_MP_H_ | |
62 | #define _I386_MP_H_ | |
63 | ||
64 | //#define MP_DEBUG 1 | |
65 | ||
66 | #include <i386/apic.h> | |
67 | #include <i386/mp_events.h> | |
68 | #include <machine/limits.h> | |
69 | ||
70 | #define MAX_CPUS 64 /* 8 * sizeof(cpumask_t) */ | |
71 | ||
72 | #ifndef ASSEMBLER | |
73 | #include <stdint.h> | |
74 | #include <sys/cdefs.h> | |
75 | #include <mach/boolean.h> | |
76 | #include <mach/kern_return.h> | |
77 | #include <mach/i386/thread_status.h> | |
78 | #include <mach/vm_types.h> | |
79 | #include <kern/simple_lock.h> | |
80 | #include <kern/assert.h> | |
81 | ||
82 | __BEGIN_DECLS | |
83 | ||
84 | extern kern_return_t intel_startCPU(int slot_num); | |
85 | extern kern_return_t intel_startCPU_fast(int slot_num); | |
86 | extern void i386_init_slave(void) __dead2; | |
87 | extern void i386_init_slave_fast(void) __dead2; | |
88 | extern void smp_init(void); | |
89 | ||
90 | extern void cpu_interrupt(int cpu); | |
91 | __END_DECLS | |
92 | ||
93 | extern unsigned int real_ncpus; /* real number of cpus */ | |
94 | extern unsigned int max_ncpus; /* max number of cpus */ | |
95 | extern unsigned int max_cpus_from_firmware; /* actual max cpus, from firmware (ACPI) */ | |
96 | decl_simple_lock_data(extern, kdb_lock); /* kdb lock */ | |
97 | ||
98 | __BEGIN_DECLS | |
99 | ||
100 | extern void console_init(void); | |
101 | extern void *console_cpu_alloc(boolean_t boot_cpu); | |
102 | extern void console_cpu_free(void *console_buf); | |
103 | ||
104 | extern int kdb_cpu; /* current cpu running kdb */ | |
105 | extern int kdb_debug; | |
106 | extern int kdb_active[]; | |
107 | ||
108 | extern volatile boolean_t mp_kdp_trap; | |
109 | extern volatile boolean_t mp_kdp_is_NMI; | |
110 | extern volatile boolean_t force_immediate_debugger_NMI; | |
111 | extern volatile boolean_t pmap_tlb_flush_timeout; | |
112 | extern volatile usimple_lock_t spinlock_timed_out; | |
113 | extern volatile uint32_t spinlock_owner_cpu; | |
114 | extern uint32_t spinlock_timeout_NMI(uintptr_t thread_addr); | |
115 | ||
116 | extern uint64_t LastDebuggerEntryAllowance; | |
117 | ||
118 | extern void mp_kdp_enter(boolean_t proceed_on_failure); | |
119 | extern void mp_kdp_exit(void); | |
120 | extern boolean_t mp_kdp_all_cpus_halted(void); | |
121 | ||
122 | extern boolean_t mp_recent_debugger_activity(void); | |
123 | extern void kernel_spin(uint64_t spin_ns); | |
124 | ||
125 | /* | |
126 | * All cpu rendezvous: | |
127 | */ | |
128 | extern void mp_rendezvous( | |
129 | void (*setup_func)(void *), | |
130 | void (*action_func)(void *), | |
131 | void (*teardown_func)(void *), | |
132 | void *arg); | |
133 | extern void mp_rendezvous_no_intrs( | |
134 | void (*action_func)(void *), | |
135 | void *arg); | |
136 | extern void mp_rendezvous_break_lock(void); | |
137 | extern void mp_rendezvous_lock(void); | |
138 | extern void mp_rendezvous_unlock(void); | |
139 | ||
140 | /* | |
141 | * All cpu broadcast. | |
142 | * Called from thread context, this blocks until all active cpus have | |
143 | * run action_func: | |
144 | */ | |
145 | extern void mp_broadcast( | |
146 | void (*action_func)(void *), | |
147 | void *arg); | |
148 | #if MACH_KDP | |
149 | typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu); | |
150 | ||
151 | extern long kdp_x86_xcpu_invoke(const uint16_t lcpu, | |
152 | kdp_x86_xcpu_func_t func, | |
153 | void *arg0, void *arg1); | |
154 | typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t; | |
155 | #endif | |
156 | ||
157 | typedef uint32_t cpu_t; | |
158 | typedef volatile uint64_t cpumask_t; | |
159 | ||
160 | static_assert(sizeof(cpumask_t) * CHAR_BIT >= MAX_CPUS, "cpumask_t bitvector is too small for current MAX_CPUS value"); | |
161 | ||
162 | static inline cpumask_t | |
163 | cpu_to_cpumask(cpu_t cpu) | |
164 | { | |
165 | return (cpu < MAX_CPUS) ? (1ULL << cpu) : 0; | |
166 | } | |
167 | #define CPUMASK_ALL 0xffffffffffffffffULL | |
168 | #define CPUMASK_SELF cpu_to_cpumask(cpu_number()) | |
169 | #define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF) | |
170 | ||
171 | /* Initialation routing called at processor registration */ | |
172 | extern void mp_cpus_call_cpu_init(int cpu); | |
173 | ||
174 | /* | |
175 | * Invoke a function (possibly NULL) on a set of cpus specified by a mask. | |
176 | * The mask may include the local cpu. | |
177 | * If the mode is: | |
178 | * - ASYNC: other cpus make their calls in parallel | |
179 | * - SYNC: the calls are performed serially in logical cpu order | |
180 | * - NOSYNC: the calls are queued | |
181 | * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been | |
182 | * called on all specified cpus. | |
183 | * The return value is the number of cpus where the call was made or queued. | |
184 | * The action function is called with interrupts disabled. | |
185 | */ | |
186 | extern cpu_t mp_cpus_call( | |
187 | cpumask_t cpus, | |
188 | mp_sync_t mode, | |
189 | void (*action_func)(void *), | |
190 | void *arg); | |
191 | extern cpu_t mp_cpus_call1( | |
192 | cpumask_t cpus, | |
193 | mp_sync_t mode, | |
194 | void (*action_func)(void *, void*), | |
195 | void *arg0, | |
196 | void *arg1, | |
197 | cpumask_t *cpus_calledp); | |
198 | ||
199 | typedef enum { | |
200 | NONE = 0, | |
201 | SPINLOCK_TIMEOUT, | |
202 | TLB_FLUSH_TIMEOUT, | |
203 | CROSSCALL_TIMEOUT, | |
204 | INTERRUPT_WATCHDOG | |
205 | } NMI_reason_t; | |
206 | extern void NMIPI_panic(cpumask_t cpus, NMI_reason_t reason); | |
207 | ||
208 | /* Interrupt a set of cpus, forcing an exit out of non-root mode */ | |
209 | extern void mp_cpus_kick(cpumask_t cpus); | |
210 | /* | |
211 | * Power-management-specific SPI to: | |
212 | * - register a callout function, and | |
213 | * - request the callout (if registered) on a given cpu. | |
214 | */ | |
215 | extern void PM_interrupt_register(void (*fn)(void)); | |
216 | extern void cpu_PM_interrupt(int cpu); | |
217 | ||
218 | __END_DECLS | |
219 | ||
220 | #if MP_DEBUG | |
221 | typedef struct { | |
222 | uint64_t time; | |
223 | int cpu; | |
224 | mp_event_t event; | |
225 | } cpu_signal_event_t; | |
226 | ||
227 | #define LOG_NENTRIES 100 | |
228 | typedef struct { | |
229 | uint64_t count[MP_LAST]; | |
230 | int next_entry; | |
231 | cpu_signal_event_t entry[LOG_NENTRIES]; | |
232 | } cpu_signal_event_log_t; | |
233 | ||
234 | extern cpu_signal_event_log_t *cpu_signal[]; | |
235 | extern cpu_signal_event_log_t *cpu_handle[]; | |
236 | ||
237 | #define DBGLOG(log, _cpu, _event) { \ | |
238 | boolean_t spl = ml_set_interrupts_enabled(FALSE); \ | |
239 | cpu_signal_event_log_t *logp = log[cpu_number()]; \ | |
240 | int next = logp->next_entry; \ | |
241 | cpu_signal_event_t *eventp = &logp->entry[next]; \ | |
242 | \ | |
243 | logp->count[_event]++; \ | |
244 | \ | |
245 | eventp->time = rdtsc64(); \ | |
246 | eventp->cpu = _cpu; \ | |
247 | eventp->event = _event; \ | |
248 | if (next == (LOG_NENTRIES - 1)) \ | |
249 | logp->next_entry = 0; \ | |
250 | else \ | |
251 | logp->next_entry++; \ | |
252 | \ | |
253 | (void) ml_set_interrupts_enabled(spl); \ | |
254 | } | |
255 | ||
256 | #define DBGLOG_CPU_INIT(cpu) { \ | |
257 | cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \ | |
258 | cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \ | |
259 | \ | |
260 | if (*sig_logpp == NULL && \ | |
261 | kmem_alloc(kernel_map, \ | |
262 | (vm_offset_t *) sig_logpp, \ | |
263 | sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ | |
264 | panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\ | |
265 | bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \ | |
266 | if (*hdl_logpp == NULL && \ | |
267 | kmem_alloc(kernel_map, \ | |
268 | (vm_offset_t *) hdl_logpp, \ | |
269 | sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ | |
270 | panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\ | |
271 | bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \ | |
272 | } | |
273 | #else /* MP_DEBUG */ | |
274 | #define DBGLOG(log, _cpu, _event) | |
275 | #define DBGLOG_CPU_INIT(cpu) | |
276 | #endif /* MP_DEBUG */ | |
277 | ||
278 | #endif /* ASSEMBLER */ | |
279 | ||
280 | #ifdef ASSEMBLER | |
281 | #define i_bit(bit, word) ((long)(*(word)) & (1L << (bit))) | |
282 | #else | |
283 | __attribute__((always_inline)) static inline long | |
284 | i_bit_impl(long word, long bit) | |
285 | { | |
286 | long bitmask = 1L << bit; | |
287 | return word & bitmask; | |
288 | } | |
289 | #define i_bit(bit, word) i_bit_impl((long)(*(word)), bit) | |
290 | #endif | |
291 | ||
292 | ||
293 | #endif /* _I386_MP_H_ */ | |
294 | ||
295 | #endif /* KERNEL_PRIVATE */ |