]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
32 | #include <mach_rt.h> | |
33 | #include <mach_kdb.h> | |
34 | #include <mach_kdp.h> | |
35 | #include <mach_ldebug.h> | |
36 | #include <gprof.h> | |
37 | ||
38 | #include <mach/mach_types.h> | |
39 | #include <mach/kern_return.h> | |
40 | ||
41 | #include <kern/kern_types.h> | |
42 | #include <kern/startup.h> | |
43 | #include <kern/processor.h> | |
44 | #include <kern/cpu_number.h> | |
45 | #include <kern/cpu_data.h> | |
46 | #include <kern/assert.h> | |
47 | #include <kern/machine.h> | |
48 | ||
49 | #include <vm/vm_map.h> | |
50 | #include <vm/vm_kern.h> | |
51 | ||
52 | #include <profiling/profile-mk.h> | |
53 | ||
54 | #include <i386/mp.h> | |
55 | #include <i386/mp_events.h> | |
56 | #include <i386/mp_slave_boot.h> | |
57 | #include <i386/apic.h> | |
58 | #include <i386/ipl.h> | |
59 | #include <i386/fpu.h> | |
60 | #include <i386/pio.h> | |
61 | #include <i386/cpuid.h> | |
62 | #include <i386/proc_reg.h> | |
63 | #include <i386/machine_cpu.h> | |
64 | #include <i386/misc_protos.h> | |
65 | #include <i386/mtrr.h> | |
66 | #include <i386/postcode.h> | |
67 | #include <i386/perfmon.h> | |
68 | #include <i386/cpu_threads.h> | |
69 | #include <i386/mp_desc.h> | |
70 | ||
71 | #if MP_DEBUG | |
72 | #define PAUSE delay(1000000) | |
73 | #define DBG(x...) kprintf(x) | |
74 | #else | |
75 | #define DBG(x...) | |
76 | #define PAUSE | |
77 | #endif /* MP_DEBUG */ | |
78 | ||
79 | /* | |
80 | * By default, use high vectors to leave vector space for systems | |
81 | * with multiple I/O APIC's. However some systems that boot with | |
82 | * local APIC disabled will hang in SMM when vectors greater than | |
83 | * 0x5F are used. Those systems are not expected to have I/O APIC | |
84 | * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect. | |
85 | */ | |
86 | #define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0 | |
87 | #define LAPIC_REDUCED_INTERRUPT_BASE 0x50 | |
88 | /* | |
89 | * Specific lapic interrupts are relative to this base: | |
90 | */ | |
91 | #define LAPIC_PERFCNT_INTERRUPT 0xB | |
92 | #define LAPIC_TIMER_INTERRUPT 0xC | |
93 | #define LAPIC_SPURIOUS_INTERRUPT 0xD | |
94 | #define LAPIC_INTERPROCESSOR_INTERRUPT 0xE | |
95 | #define LAPIC_ERROR_INTERRUPT 0xF | |
96 | ||
97 | /* Initialize lapic_id so cpu_number() works on non SMP systems */ | |
98 | unsigned long lapic_id_initdata = 0; | |
99 | unsigned long lapic_id = (unsigned long)&lapic_id_initdata; | |
100 | vm_offset_t lapic_start; | |
101 | ||
102 | static i386_intr_func_t lapic_timer_func; | |
103 | static i386_intr_func_t lapic_pmi_func; | |
104 | ||
105 | /* TRUE if local APIC was enabled by the OS not by the BIOS */ | |
106 | static boolean_t lapic_os_enabled = FALSE; | |
107 | ||
108 | /* Base vector for local APIC interrupt sources */ | |
109 | int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE; | |
110 | ||
111 | void slave_boot_init(void); | |
112 | ||
113 | static void mp_kdp_wait(void); | |
114 | static void mp_rendezvous_action(void); | |
115 | ||
116 | boolean_t smp_initialized = FALSE; | |
117 | ||
118 | decl_simple_lock_data(,mp_kdp_lock); | |
119 | ||
120 | decl_mutex_data(static, mp_cpu_boot_lock); | |
121 | ||
122 | /* Variables needed for MP rendezvous. */ | |
123 | static void (*mp_rv_setup_func)(void *arg); | |
124 | static void (*mp_rv_action_func)(void *arg); | |
125 | static void (*mp_rv_teardown_func)(void *arg); | |
126 | static void *mp_rv_func_arg; | |
127 | static int mp_rv_ncpus; | |
128 | static long mp_rv_waiters[2]; | |
129 | decl_simple_lock_data(,mp_rv_lock); | |
130 | ||
131 | int lapic_to_cpu[MAX_CPUS]; | |
132 | int cpu_to_lapic[MAX_CPUS]; | |
133 | ||
134 | static void | |
135 | lapic_cpu_map_init(void) | |
136 | { | |
137 | int i; | |
138 | ||
139 | for (i = 0; i < MAX_CPUS; i++) { | |
140 | lapic_to_cpu[i] = -1; | |
141 | cpu_to_lapic[i] = -1; | |
142 | } | |
143 | } | |
144 | ||
145 | void | |
146 | lapic_cpu_map(int apic_id, int cpu) | |
147 | { | |
148 | cpu_to_lapic[cpu] = apic_id; | |
149 | lapic_to_cpu[apic_id] = cpu; | |
150 | } | |
151 | ||
152 | #ifdef MP_DEBUG | |
153 | static void | |
154 | lapic_cpu_map_dump(void) | |
155 | { | |
156 | int i; | |
157 | ||
158 | for (i = 0; i < MAX_CPUS; i++) { | |
159 | if (cpu_to_lapic[i] == -1) | |
160 | continue; | |
161 | kprintf("cpu_to_lapic[%d]: %d\n", | |
162 | i, cpu_to_lapic[i]); | |
163 | } | |
164 | for (i = 0; i < MAX_CPUS; i++) { | |
165 | if (lapic_to_cpu[i] == -1) | |
166 | continue; | |
167 | kprintf("lapic_to_cpu[%d]: %d\n", | |
168 | i, lapic_to_cpu[i]); | |
169 | } | |
170 | } | |
171 | #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump() | |
172 | #define LAPIC_DUMP() lapic_dump() | |
173 | #else | |
174 | #define LAPIC_CPU_MAP_DUMP() | |
175 | #define LAPIC_DUMP() | |
176 | #endif /* MP_DEBUG */ | |
177 | ||
178 | #define LAPIC_REG(reg) \ | |
179 | (*((volatile int *)(lapic_start + LAPIC_##reg))) | |
180 | #define LAPIC_REG_OFFSET(reg,off) \ | |
181 | (*((volatile int *)(lapic_start + LAPIC_##reg + (off)))) | |
182 | ||
183 | #define LAPIC_VECTOR(src) \ | |
184 | (lapic_interrupt_base + LAPIC_##src##_INTERRUPT) | |
185 | ||
186 | #define LAPIC_ISR_IS_SET(base,src) \ | |
187 | (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \ | |
188 | (1 <<((base + LAPIC_##src##_INTERRUPT)%32))) | |
189 | ||
190 | #if GPROF | |
191 | /* | |
192 | * Initialize dummy structs for profiling. These aren't used but | |
193 | * allows hertz_tick() to be built with GPROF defined. | |
194 | */ | |
195 | struct profile_vars _profile_vars; | |
196 | struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars }; | |
197 | #define GPROF_INIT() \ | |
198 | { \ | |
199 | int i; \ | |
200 | \ | |
201 | /* Hack to initialize pointers to unused profiling structs */ \ | |
202 | for (i = 1; i < MAX_CPUS; i++) \ | |
203 | _profile_vars_cpus[i] = &_profile_vars; \ | |
204 | } | |
205 | #else | |
206 | #define GPROF_INIT() | |
207 | #endif /* GPROF */ | |
208 | ||
209 | extern void master_up(void); | |
210 | ||
211 | void | |
212 | smp_init(void) | |
213 | { | |
214 | int result; | |
215 | vm_map_entry_t entry; | |
216 | uint32_t lo; | |
217 | uint32_t hi; | |
218 | boolean_t is_boot_processor; | |
219 | boolean_t is_lapic_enabled; | |
220 | vm_offset_t lapic_base; | |
221 | ||
222 | simple_lock_init(&mp_kdp_lock, 0); | |
223 | simple_lock_init(&mp_rv_lock, 0); | |
224 | mutex_init(&mp_cpu_boot_lock, 0); | |
225 | console_init(); | |
226 | ||
227 | /* Local APIC? */ | |
228 | if (!lapic_probe()) | |
229 | return; | |
230 | ||
231 | /* Examine the local APIC state */ | |
232 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); | |
233 | is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0; | |
234 | is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; | |
235 | lapic_base = (lo & MSR_IA32_APIC_BASE_BASE); | |
236 | kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base, | |
237 | is_lapic_enabled ? "enabled" : "disabled", | |
238 | is_boot_processor ? "BSP" : "AP"); | |
239 | if (!is_boot_processor || !is_lapic_enabled) | |
240 | panic("Unexpected local APIC state\n"); | |
241 | ||
242 | /* Establish a map to the local apic */ | |
243 | lapic_start = vm_map_min(kernel_map); | |
244 | result = vm_map_find_space(kernel_map, &lapic_start, | |
245 | round_page(LAPIC_SIZE), 0, &entry); | |
246 | if (result != KERN_SUCCESS) { | |
247 | panic("smp_init: vm_map_find_entry FAILED (err=%d)", result); | |
248 | } | |
249 | vm_map_unlock(kernel_map); | |
250 | pmap_enter(pmap_kernel(), | |
251 | lapic_start, | |
252 | (ppnum_t) i386_btop(lapic_base), | |
253 | VM_PROT_READ|VM_PROT_WRITE, | |
254 | VM_WIMG_USE_DEFAULT, | |
255 | TRUE); | |
256 | lapic_id = (unsigned long)(lapic_start + LAPIC_ID); | |
257 | ||
258 | if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) { | |
259 | printf("Local APIC version not 0x14 as expected\n"); | |
260 | } | |
261 | ||
262 | /* Set up the lapic_id <-> cpu_number map and add this boot processor */ | |
263 | lapic_cpu_map_init(); | |
264 | lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0); | |
265 | ||
266 | lapic_init(); | |
267 | ||
268 | cpu_thread_init(); | |
269 | ||
270 | if (pmc_init() != KERN_SUCCESS) | |
271 | printf("Performance counters not available\n"); | |
272 | ||
273 | GPROF_INIT(); | |
274 | DBGLOG_CPU_INIT(master_cpu); | |
275 | ||
276 | slave_boot_init(); | |
277 | master_up(); | |
278 | ||
279 | smp_initialized = TRUE; | |
280 | ||
281 | return; | |
282 | } | |
283 | ||
284 | ||
285 | static int | |
286 | lapic_esr_read(void) | |
287 | { | |
288 | /* write-read register */ | |
289 | LAPIC_REG(ERROR_STATUS) = 0; | |
290 | return LAPIC_REG(ERROR_STATUS); | |
291 | } | |
292 | ||
293 | static void | |
294 | lapic_esr_clear(void) | |
295 | { | |
296 | LAPIC_REG(ERROR_STATUS) = 0; | |
297 | LAPIC_REG(ERROR_STATUS) = 0; | |
298 | } | |
299 | ||
300 | static const char *DM[8] = { | |
301 | "Fixed", | |
302 | "Lowest Priority", | |
303 | "Invalid", | |
304 | "Invalid", | |
305 | "NMI", | |
306 | "Reset", | |
307 | "Invalid", | |
308 | "ExtINT"}; | |
309 | ||
310 | void | |
311 | lapic_dump(void) | |
312 | { | |
313 | int i; | |
314 | ||
315 | #define BOOL(a) ((a)?' ':'!') | |
316 | ||
317 | kprintf("LAPIC %d at 0x%x version 0x%x\n", | |
318 | (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, | |
319 | lapic_start, | |
320 | LAPIC_REG(VERSION)&LAPIC_VERSION_MASK); | |
321 | kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n", | |
322 | LAPIC_REG(TPR)&LAPIC_TPR_MASK, | |
323 | LAPIC_REG(APR)&LAPIC_APR_MASK, | |
324 | LAPIC_REG(PPR)&LAPIC_PPR_MASK); | |
325 | kprintf("Destination Format 0x%x Logical Destination 0x%x\n", | |
326 | LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT, | |
327 | LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT); | |
328 | kprintf("%cEnabled %cFocusChecking SV 0x%x\n", | |
329 | BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE), | |
330 | BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)), | |
331 | LAPIC_REG(SVR) & LAPIC_SVR_MASK); | |
332 | kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n", | |
333 | LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK, | |
334 | (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", | |
335 | BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED), | |
336 | (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot"); | |
337 | kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT)); | |
338 | kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT)); | |
339 | kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG)); | |
340 | kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n", | |
341 | LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK, | |
342 | DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], | |
343 | (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", | |
344 | BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED)); | |
345 | kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", | |
346 | LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK, | |
347 | DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], | |
348 | (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ", | |
349 | (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High", | |
350 | (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", | |
351 | BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED)); | |
352 | kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", | |
353 | LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK, | |
354 | DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], | |
355 | (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ", | |
356 | (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High", | |
357 | (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", | |
358 | BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED)); | |
359 | kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n", | |
360 | LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK, | |
361 | (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", | |
362 | BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED)); | |
363 | kprintf("ESR: %08x \n", lapic_esr_read()); | |
364 | kprintf(" "); | |
365 | for(i=0xf; i>=0; i--) | |
366 | kprintf("%x%x%x%x",i,i,i,i); | |
367 | kprintf("\n"); | |
368 | kprintf("TMR: 0x"); | |
369 | for(i=7; i>=0; i--) | |
370 | kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10)); | |
371 | kprintf("\n"); | |
372 | kprintf("IRR: 0x"); | |
373 | for(i=7; i>=0; i--) | |
374 | kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10)); | |
375 | kprintf("\n"); | |
376 | kprintf("ISR: 0x"); | |
377 | for(i=7; i >= 0; i--) | |
378 | kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10)); | |
379 | kprintf("\n"); | |
380 | } | |
381 | ||
382 | boolean_t | |
383 | lapic_probe(void) | |
384 | { | |
385 | uint32_t lo; | |
386 | uint32_t hi; | |
387 | ||
388 | if (cpuid_features() & CPUID_FEATURE_APIC) | |
389 | return TRUE; | |
390 | ||
391 | if (cpuid_family() == 6 || cpuid_family() == 15) { | |
392 | /* | |
393 | * Mobile Pentiums: | |
394 | * There may be a local APIC which wasn't enabled by BIOS. | |
395 | * So we try to enable it explicitly. | |
396 | */ | |
397 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); | |
398 | lo &= ~MSR_IA32_APIC_BASE_BASE; | |
399 | lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START; | |
400 | lo |= MSR_IA32_APIC_BASE_ENABLE; | |
401 | wrmsr(MSR_IA32_APIC_BASE, lo, hi); | |
402 | ||
403 | /* | |
404 | * Re-initialize cpu features info and re-check. | |
405 | */ | |
406 | set_cpu_model(); | |
407 | if (cpuid_features() & CPUID_FEATURE_APIC) { | |
408 | printf("Local APIC discovered and enabled\n"); | |
409 | lapic_os_enabled = TRUE; | |
410 | lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE; | |
411 | return TRUE; | |
412 | } | |
413 | } | |
414 | ||
415 | return FALSE; | |
416 | } | |
417 | ||
418 | void | |
419 | lapic_shutdown(void) | |
420 | { | |
421 | uint32_t lo; | |
422 | uint32_t hi; | |
423 | uint32_t value; | |
424 | ||
425 | /* Shutdown if local APIC was enabled by OS */ | |
426 | if (lapic_os_enabled == FALSE) | |
427 | return; | |
428 | ||
429 | mp_disable_preemption(); | |
430 | ||
431 | /* ExtINT: masked */ | |
432 | if (get_cpu_number() == master_cpu) { | |
433 | value = LAPIC_REG(LVT_LINT0); | |
434 | value |= LAPIC_LVT_MASKED; | |
435 | LAPIC_REG(LVT_LINT0) = value; | |
436 | } | |
437 | ||
438 | /* Timer: masked */ | |
439 | LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED; | |
440 | ||
441 | /* Perfmon: masked */ | |
442 | LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED; | |
443 | ||
444 | /* Error: masked */ | |
445 | LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED; | |
446 | ||
447 | /* APIC software disabled */ | |
448 | LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE; | |
449 | ||
450 | /* Bypass the APIC completely and update cpu features */ | |
451 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); | |
452 | lo &= ~MSR_IA32_APIC_BASE_ENABLE; | |
453 | wrmsr(MSR_IA32_APIC_BASE, lo, hi); | |
454 | set_cpu_model(); | |
455 | ||
456 | mp_enable_preemption(); | |
457 | } | |
458 | ||
459 | void | |
460 | lapic_init(void) | |
461 | { | |
462 | int value; | |
463 | ||
464 | /* Set flat delivery model, logical processor id */ | |
465 | LAPIC_REG(DFR) = LAPIC_DFR_FLAT; | |
466 | LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT; | |
467 | ||
468 | /* Accept all */ | |
469 | LAPIC_REG(TPR) = 0; | |
470 | ||
471 | LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE; | |
472 | ||
473 | /* ExtINT */ | |
474 | if (get_cpu_number() == master_cpu) { | |
475 | value = LAPIC_REG(LVT_LINT0); | |
476 | value &= ~LAPIC_LVT_MASKED; | |
477 | value |= LAPIC_LVT_DM_EXTINT; | |
478 | LAPIC_REG(LVT_LINT0) = value; | |
479 | } | |
480 | ||
481 | /* Timer: unmasked, one-shot */ | |
482 | LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER); | |
483 | ||
484 | /* Perfmon: unmasked */ | |
485 | LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT); | |
486 | ||
487 | lapic_esr_clear(); | |
488 | ||
489 | LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR); | |
490 | ||
491 | } | |
492 | ||
493 | void | |
494 | lapic_set_timer_func(i386_intr_func_t func) | |
495 | { | |
496 | lapic_timer_func = func; | |
497 | } | |
498 | ||
499 | void | |
500 | lapic_set_timer( | |
501 | boolean_t interrupt, | |
502 | lapic_timer_mode_t mode, | |
503 | lapic_timer_divide_t divisor, | |
504 | lapic_timer_count_t initial_count) | |
505 | { | |
506 | boolean_t state; | |
507 | uint32_t timer_vector; | |
508 | ||
509 | state = ml_set_interrupts_enabled(FALSE); | |
510 | timer_vector = LAPIC_REG(LVT_TIMER); | |
511 | timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);; | |
512 | timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED; | |
513 | timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0; | |
514 | LAPIC_REG(LVT_TIMER) = timer_vector; | |
515 | LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor; | |
516 | LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count; | |
517 | ml_set_interrupts_enabled(state); | |
518 | } | |
519 | ||
520 | void | |
521 | lapic_get_timer( | |
522 | lapic_timer_mode_t *mode, | |
523 | lapic_timer_divide_t *divisor, | |
524 | lapic_timer_count_t *initial_count, | |
525 | lapic_timer_count_t *current_count) | |
526 | { | |
527 | boolean_t state; | |
528 | ||
529 | state = ml_set_interrupts_enabled(FALSE); | |
530 | if (mode) | |
531 | *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ? | |
532 | periodic : one_shot; | |
533 | if (divisor) | |
534 | *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK; | |
535 | if (initial_count) | |
536 | *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT); | |
537 | if (current_count) | |
538 | *current_count = LAPIC_REG(TIMER_CURRENT_COUNT); | |
539 | ml_set_interrupts_enabled(state); | |
540 | } | |
541 | ||
542 | void | |
543 | lapic_set_pmi_func(i386_intr_func_t func) | |
544 | { | |
545 | lapic_pmi_func = func; | |
546 | } | |
547 | ||
548 | static inline void | |
549 | _lapic_end_of_interrupt(void) | |
550 | { | |
551 | LAPIC_REG(EOI) = 0; | |
552 | } | |
553 | ||
554 | void | |
555 | lapic_end_of_interrupt(void) | |
556 | { | |
557 | _lapic_end_of_interrupt(); | |
558 | } | |
559 | ||
560 | int | |
561 | lapic_interrupt(int interrupt, void *state) | |
562 | { | |
563 | interrupt -= lapic_interrupt_base; | |
564 | if (interrupt < 0) | |
565 | return 0; | |
566 | ||
567 | switch(interrupt) { | |
568 | case LAPIC_PERFCNT_INTERRUPT: | |
569 | if (lapic_pmi_func != NULL) | |
570 | (*lapic_pmi_func)( | |
571 | (struct i386_interrupt_state *) state); | |
572 | /* Clear interrupt masked */ | |
573 | LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT); | |
574 | _lapic_end_of_interrupt(); | |
575 | return 1; | |
576 | case LAPIC_TIMER_INTERRUPT: | |
577 | _lapic_end_of_interrupt(); | |
578 | if (lapic_timer_func != NULL) | |
579 | (*lapic_timer_func)( | |
580 | (struct i386_interrupt_state *) state); | |
581 | return 1; | |
582 | case LAPIC_ERROR_INTERRUPT: | |
583 | lapic_dump(); | |
584 | panic("Local APIC error\n"); | |
585 | _lapic_end_of_interrupt(); | |
586 | return 1; | |
587 | case LAPIC_SPURIOUS_INTERRUPT: | |
588 | kprintf("SPIV\n"); | |
589 | /* No EOI required here */ | |
590 | return 1; | |
591 | case LAPIC_INTERPROCESSOR_INTERRUPT: | |
592 | cpu_signal_handler((struct i386_interrupt_state *) state); | |
593 | _lapic_end_of_interrupt(); | |
594 | return 1; | |
595 | } | |
596 | return 0; | |
597 | } | |
598 | ||
599 | void | |
600 | lapic_smm_restore(void) | |
601 | { | |
602 | boolean_t state; | |
603 | ||
604 | if (lapic_os_enabled == FALSE) | |
605 | return; | |
606 | ||
607 | state = ml_set_interrupts_enabled(FALSE); | |
608 | ||
609 | if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) { | |
610 | /* | |
611 | * Bogus SMI handler enables interrupts but does not know about | |
612 | * local APIC interrupt sources. When APIC timer counts down to | |
613 | * zero while in SMM, local APIC will end up waiting for an EOI | |
614 | * but no interrupt was delivered to the OS. | |
615 | */ | |
616 | _lapic_end_of_interrupt(); | |
617 | ||
618 | /* | |
619 | * timer is one-shot, trigger another quick countdown to trigger | |
620 | * another timer interrupt. | |
621 | */ | |
622 | if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) { | |
623 | LAPIC_REG(TIMER_INITIAL_COUNT) = 1; | |
624 | } | |
625 | ||
626 | kprintf("lapic_smm_restore\n"); | |
627 | } | |
628 | ||
629 | ml_set_interrupts_enabled(state); | |
630 | } | |
631 | ||
632 | kern_return_t | |
633 | intel_startCPU( | |
634 | int slot_num) | |
635 | { | |
636 | ||
637 | int i = 1000; | |
638 | int lapic = cpu_to_lapic[slot_num]; | |
639 | ||
640 | assert(lapic != -1); | |
641 | ||
642 | DBGLOG_CPU_INIT(slot_num); | |
643 | ||
644 | DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic); | |
645 | DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD); | |
646 | ||
647 | /* Initialize (or re-initialize) the descriptor tables for this cpu. */ | |
648 | mp_desc_init(cpu_datap(slot_num), FALSE); | |
649 | ||
650 | /* Serialize use of the slave boot stack. */ | |
651 | mutex_lock(&mp_cpu_boot_lock); | |
652 | ||
653 | mp_disable_preemption(); | |
654 | if (slot_num == get_cpu_number()) { | |
655 | mp_enable_preemption(); | |
656 | mutex_unlock(&mp_cpu_boot_lock); | |
657 | return KERN_SUCCESS; | |
658 | } | |
659 | ||
660 | LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT; | |
661 | LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT; | |
662 | delay(10000); | |
663 | ||
664 | LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT; | |
665 | LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12); | |
666 | delay(200); | |
667 | ||
668 | LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT; | |
669 | LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12); | |
670 | delay(200); | |
671 | ||
672 | #ifdef POSTCODE_DELAY | |
673 | /* Wait much longer if postcodes are displayed for a delay period. */ | |
674 | i *= 10000; | |
675 | #endif | |
676 | while(i-- > 0) { | |
677 | if (cpu_datap(slot_num)->cpu_running) | |
678 | break; | |
679 | delay(10000); | |
680 | } | |
681 | ||
682 | mp_enable_preemption(); | |
683 | mutex_unlock(&mp_cpu_boot_lock); | |
684 | ||
685 | if (!cpu_datap(slot_num)->cpu_running) { | |
686 | DBG("Failed to start CPU %02d\n", slot_num); | |
687 | printf("Failed to start CPU %02d, rebooting...\n", slot_num); | |
688 | delay(1000000); | |
689 | cpu_shutdown(); | |
690 | return KERN_SUCCESS; | |
691 | } else { | |
692 | DBG("Started CPU %02d\n", slot_num); | |
693 | printf("Started CPU %02d\n", slot_num); | |
694 | return KERN_SUCCESS; | |
695 | } | |
696 | } | |
697 | ||
698 | extern char slave_boot_base[]; | |
699 | extern char slave_boot_end[]; | |
700 | extern void pstart(void); | |
701 | ||
702 | void | |
703 | slave_boot_init(void) | |
704 | { | |
705 | DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n", | |
706 | slave_boot_base, | |
707 | kvtophys((vm_offset_t) slave_boot_base), | |
708 | MP_BOOT, | |
709 | slave_boot_end-slave_boot_base); | |
710 | ||
711 | /* | |
712 | * Copy the boot entry code to the real-mode vector area MP_BOOT. | |
713 | * This is in page 1 which has been reserved for this purpose by | |
714 | * machine_startup() from the boot processor. | |
715 | * The slave boot code is responsible for switching to protected | |
716 | * mode and then jumping to the common startup, _start(). | |
717 | */ | |
718 | bcopy_phys((addr64_t) kvtophys((vm_offset_t) slave_boot_base), | |
719 | (addr64_t) MP_BOOT, | |
720 | slave_boot_end-slave_boot_base); | |
721 | ||
722 | /* | |
723 | * Zero a stack area above the boot code. | |
724 | */ | |
725 | DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400); | |
726 | bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400); | |
727 | ||
728 | /* | |
729 | * Set the location at the base of the stack to point to the | |
730 | * common startup entry. | |
731 | */ | |
732 | DBG("writing 0x%x at phys 0x%x\n", | |
733 | kvtophys((vm_offset_t) &pstart), MP_MACH_START+MP_BOOT); | |
734 | ml_phys_write_word(MP_MACH_START+MP_BOOT, | |
735 | kvtophys((vm_offset_t) &pstart)); | |
736 | ||
737 | /* Flush caches */ | |
738 | __asm__("wbinvd"); | |
739 | } | |
740 | ||
741 | #if MP_DEBUG | |
742 | cpu_signal_event_log_t *cpu_signal[MAX_CPUS]; | |
743 | cpu_signal_event_log_t *cpu_handle[MAX_CPUS]; | |
744 | ||
745 | MP_EVENT_NAME_DECL(); | |
746 | ||
747 | #endif /* MP_DEBUG */ | |
748 | ||
749 | void | |
750 | cpu_signal_handler(__unused struct i386_interrupt_state *regs) | |
751 | { | |
752 | int my_cpu; | |
753 | volatile int *my_word; | |
754 | #if MACH_KDB && MACH_ASSERT | |
755 | int i=100; | |
756 | #endif /* MACH_KDB && MACH_ASSERT */ | |
757 | ||
758 | mp_disable_preemption(); | |
759 | ||
760 | my_cpu = cpu_number(); | |
761 | my_word = ¤t_cpu_datap()->cpu_signals; | |
762 | ||
763 | do { | |
764 | #if MACH_KDB && MACH_ASSERT | |
765 | if (i-- <= 0) | |
766 | Debugger("cpu_signal_handler"); | |
767 | #endif /* MACH_KDB && MACH_ASSERT */ | |
768 | #if MACH_KDP | |
769 | if (i_bit(MP_KDP, my_word)) { | |
770 | DBGLOG(cpu_handle,my_cpu,MP_KDP); | |
771 | i_bit_clear(MP_KDP, my_word); | |
772 | mp_kdp_wait(); | |
773 | } else | |
774 | #endif /* MACH_KDP */ | |
775 | if (i_bit(MP_TLB_FLUSH, my_word)) { | |
776 | DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH); | |
777 | i_bit_clear(MP_TLB_FLUSH, my_word); | |
778 | pmap_update_interrupt(); | |
779 | } else if (i_bit(MP_AST, my_word)) { | |
780 | DBGLOG(cpu_handle,my_cpu,MP_AST); | |
781 | i_bit_clear(MP_AST, my_word); | |
782 | ast_check(cpu_to_processor(my_cpu)); | |
783 | #if MACH_KDB | |
784 | } else if (i_bit(MP_KDB, my_word)) { | |
785 | extern kdb_is_slave[]; | |
786 | ||
787 | i_bit_clear(MP_KDB, my_word); | |
788 | kdb_is_slave[my_cpu]++; | |
789 | kdb_kintr(); | |
790 | #endif /* MACH_KDB */ | |
791 | } else if (i_bit(MP_RENDEZVOUS, my_word)) { | |
792 | DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS); | |
793 | i_bit_clear(MP_RENDEZVOUS, my_word); | |
794 | mp_rendezvous_action(); | |
795 | } | |
796 | } while (*my_word); | |
797 | ||
798 | mp_enable_preemption(); | |
799 | ||
800 | } | |
801 | ||
802 | #ifdef MP_DEBUG | |
803 | extern int max_lock_loops; | |
804 | #endif /* MP_DEBUG */ | |
805 | void | |
806 | cpu_interrupt(int cpu) | |
807 | { | |
808 | boolean_t state; | |
809 | ||
810 | if (smp_initialized) { | |
811 | ||
812 | /* Wait for previous interrupt to be delivered... */ | |
813 | #ifdef MP_DEBUG | |
814 | int pending_busy_count = 0; | |
815 | while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) { | |
816 | if (++pending_busy_count > max_lock_loops) | |
817 | panic("cpus_interrupt() deadlock\n"); | |
818 | #else | |
819 | while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) { | |
820 | #endif /* MP_DEBUG */ | |
821 | cpu_pause(); | |
822 | } | |
823 | ||
824 | state = ml_set_interrupts_enabled(FALSE); | |
825 | LAPIC_REG(ICRD) = | |
826 | cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT; | |
827 | LAPIC_REG(ICR) = | |
828 | LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED; | |
829 | (void) ml_set_interrupts_enabled(state); | |
830 | } | |
831 | ||
832 | } | |
833 | ||
834 | void | |
835 | i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode) | |
836 | { | |
837 | volatile int *signals = &cpu_datap(cpu)->cpu_signals; | |
838 | uint64_t tsc_timeout; | |
839 | ||
840 | ||
841 | if (!cpu_datap(cpu)->cpu_running) | |
842 | return; | |
843 | ||
844 | DBGLOG(cpu_signal, cpu, event); | |
845 | ||
846 | i_bit_set(event, signals); | |
847 | cpu_interrupt(cpu); | |
848 | if (mode == SYNC) { | |
849 | again: | |
850 | tsc_timeout = rdtsc64() + (1000*1000*1000); | |
851 | while (i_bit(event, signals) && rdtsc64() < tsc_timeout) { | |
852 | cpu_pause(); | |
853 | } | |
854 | if (i_bit(event, signals)) { | |
855 | DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n", | |
856 | cpu, event); | |
857 | goto again; | |
858 | } | |
859 | } | |
860 | } | |
861 | ||
862 | void | |
863 | i386_signal_cpus(mp_event_t event, mp_sync_t mode) | |
864 | { | |
865 | unsigned int cpu; | |
866 | unsigned int my_cpu = cpu_number(); | |
867 | ||
868 | for (cpu = 0; cpu < real_ncpus; cpu++) { | |
869 | if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) | |
870 | continue; | |
871 | i386_signal_cpu(cpu, event, mode); | |
872 | } | |
873 | } | |
874 | ||
875 | int | |
876 | i386_active_cpus(void) | |
877 | { | |
878 | unsigned int cpu; | |
879 | unsigned int ncpus = 0; | |
880 | ||
881 | for (cpu = 0; cpu < real_ncpus; cpu++) { | |
882 | if (cpu_datap(cpu)->cpu_running) | |
883 | ncpus++; | |
884 | } | |
885 | return(ncpus); | |
886 | } | |
887 | ||
888 | /* | |
889 | * All-CPU rendezvous: | |
890 | * - CPUs are signalled, | |
891 | * - all execute the setup function (if specified), | |
892 | * - rendezvous (i.e. all cpus reach a barrier), | |
893 | * - all execute the action function (if specified), | |
894 | * - rendezvous again, | |
895 | * - execute the teardown function (if specified), and then | |
896 | * - resume. | |
897 | * | |
898 | * Note that the supplied external functions _must_ be reentrant and aware | |
899 | * that they are running in parallel and in an unknown lock context. | |
900 | */ | |
901 | ||
902 | static void | |
903 | mp_rendezvous_action(void) | |
904 | { | |
905 | ||
906 | /* setup function */ | |
907 | if (mp_rv_setup_func != NULL) | |
908 | mp_rv_setup_func(mp_rv_func_arg); | |
909 | /* spin on entry rendezvous */ | |
910 | atomic_incl(&mp_rv_waiters[0], 1); | |
911 | while (*((volatile long *) &mp_rv_waiters[0]) < mp_rv_ncpus) | |
912 | cpu_pause(); | |
913 | /* action function */ | |
914 | if (mp_rv_action_func != NULL) | |
915 | mp_rv_action_func(mp_rv_func_arg); | |
916 | /* spin on exit rendezvous */ | |
917 | atomic_incl(&mp_rv_waiters[1], 1); | |
918 | while (*((volatile long *) &mp_rv_waiters[1]) < mp_rv_ncpus) | |
919 | cpu_pause(); | |
920 | /* teardown function */ | |
921 | if (mp_rv_teardown_func != NULL) | |
922 | mp_rv_teardown_func(mp_rv_func_arg); | |
923 | } | |
924 | ||
925 | void | |
926 | mp_rendezvous(void (*setup_func)(void *), | |
927 | void (*action_func)(void *), | |
928 | void (*teardown_func)(void *), | |
929 | void *arg) | |
930 | { | |
931 | ||
932 | if (!smp_initialized) { | |
933 | if (setup_func != NULL) | |
934 | setup_func(arg); | |
935 | if (action_func != NULL) | |
936 | action_func(arg); | |
937 | if (teardown_func != NULL) | |
938 | teardown_func(arg); | |
939 | return; | |
940 | } | |
941 | ||
942 | /* obtain rendezvous lock */ | |
943 | simple_lock(&mp_rv_lock); | |
944 | ||
945 | /* set static function pointers */ | |
946 | mp_rv_setup_func = setup_func; | |
947 | mp_rv_action_func = action_func; | |
948 | mp_rv_teardown_func = teardown_func; | |
949 | mp_rv_func_arg = arg; | |
950 | ||
951 | mp_rv_waiters[0] = 0; /* entry rendezvous count */ | |
952 | mp_rv_waiters[1] = 0; /* exit rendezvous count */ | |
953 | mp_rv_ncpus = i386_active_cpus(); | |
954 | ||
955 | /* | |
956 | * signal other processors, which will call mp_rendezvous_action() | |
957 | * with interrupts disabled | |
958 | */ | |
959 | i386_signal_cpus(MP_RENDEZVOUS, ASYNC); | |
960 | ||
961 | /* call executor function on this cpu */ | |
962 | mp_rendezvous_action(); | |
963 | ||
964 | /* release lock */ | |
965 | simple_unlock(&mp_rv_lock); | |
966 | } | |
967 | ||
968 | #if MACH_KDP | |
969 | volatile boolean_t mp_kdp_trap = FALSE; | |
970 | long mp_kdp_ncpus; | |
971 | boolean_t mp_kdp_state; | |
972 | ||
973 | ||
974 | void | |
975 | mp_kdp_enter(void) | |
976 | { | |
977 | unsigned int cpu; | |
978 | unsigned int ncpus; | |
979 | unsigned int my_cpu = cpu_number(); | |
980 | uint64_t tsc_timeout; | |
981 | ||
982 | DBG("mp_kdp_enter()\n"); | |
983 | ||
984 | /* | |
985 | * Here to enter the debugger. | |
986 | * In case of races, only one cpu is allowed to enter kdp after | |
987 | * stopping others. | |
988 | */ | |
989 | mp_kdp_state = ml_set_interrupts_enabled(FALSE); | |
990 | simple_lock(&mp_kdp_lock); | |
991 | while (mp_kdp_trap) { | |
992 | simple_unlock(&mp_kdp_lock); | |
993 | DBG("mp_kdp_enter() race lost\n"); | |
994 | mp_kdp_wait(); | |
995 | simple_lock(&mp_kdp_lock); | |
996 | } | |
997 | mp_kdp_ncpus = 1; /* self */ | |
998 | mp_kdp_trap = TRUE; | |
999 | simple_unlock(&mp_kdp_lock); | |
1000 | ||
1001 | /* Deliver a nudge to other cpus, counting how many */ | |
1002 | DBG("mp_kdp_enter() signaling other processors\n"); | |
1003 | for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) { | |
1004 | if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) | |
1005 | continue; | |
1006 | ncpus++; | |
1007 | i386_signal_cpu(cpu, MP_KDP, ASYNC); | |
1008 | } | |
1009 | ||
1010 | /* Wait other processors to spin. */ | |
1011 | DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus); | |
1012 | tsc_timeout = rdtsc64() + (1000*1000*1000); | |
1013 | while (*((volatile unsigned int *) &mp_kdp_ncpus) != ncpus | |
1014 | && rdtsc64() < tsc_timeout) { | |
1015 | cpu_pause(); | |
1016 | } | |
1017 | DBG("mp_kdp_enter() %d processors done %s\n", | |
1018 | mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out"); | |
1019 | postcode(MP_KDP_ENTER); | |
1020 | } | |
1021 | ||
1022 | static void | |
1023 | mp_kdp_wait(void) | |
1024 | { | |
1025 | boolean_t state; | |
1026 | ||
1027 | state = ml_set_interrupts_enabled(TRUE); | |
1028 | DBG("mp_kdp_wait()\n"); | |
1029 | atomic_incl(&mp_kdp_ncpus, 1); | |
1030 | while (mp_kdp_trap) { | |
1031 | cpu_pause(); | |
1032 | } | |
1033 | atomic_decl(&mp_kdp_ncpus, 1); | |
1034 | DBG("mp_kdp_wait() done\n"); | |
1035 | (void) ml_set_interrupts_enabled(state); | |
1036 | } | |
1037 | ||
1038 | void | |
1039 | mp_kdp_exit(void) | |
1040 | { | |
1041 | DBG("mp_kdp_exit()\n"); | |
1042 | atomic_decl(&mp_kdp_ncpus, 1); | |
1043 | mp_kdp_trap = FALSE; | |
1044 | ||
1045 | /* Wait other processors to stop spinning. XXX needs timeout */ | |
1046 | DBG("mp_kdp_exit() waiting for processors to resume\n"); | |
1047 | while (*((volatile long *) &mp_kdp_ncpus) > 0) { | |
1048 | cpu_pause(); | |
1049 | } | |
1050 | DBG("mp_kdp_exit() done\n"); | |
1051 | (void) ml_set_interrupts_enabled(mp_kdp_state); | |
1052 | postcode(0); | |
1053 | } | |
1054 | #endif /* MACH_KDP */ | |
1055 | ||
1056 | /*ARGSUSED*/ | |
1057 | void | |
1058 | init_ast_check( | |
1059 | __unused processor_t processor) | |
1060 | { | |
1061 | } | |
1062 | ||
1063 | void | |
1064 | cause_ast_check( | |
1065 | processor_t processor) | |
1066 | { | |
1067 | int cpu = PROCESSOR_DATA(processor, slot_num); | |
1068 | ||
1069 | if (cpu != cpu_number()) { | |
1070 | i386_signal_cpu(cpu, MP_AST, ASYNC); | |
1071 | } | |
1072 | } | |
1073 | ||
1074 | /* | |
1075 | * invoke kdb on slave processors | |
1076 | */ | |
1077 | ||
1078 | void | |
1079 | remote_kdb(void) | |
1080 | { | |
1081 | unsigned int my_cpu = cpu_number(); | |
1082 | unsigned int cpu; | |
1083 | ||
1084 | mp_disable_preemption(); | |
1085 | for (cpu = 0; cpu < real_ncpus; cpu++) { | |
1086 | if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) | |
1087 | continue; | |
1088 | i386_signal_cpu(cpu, MP_KDB, SYNC); | |
1089 | } | |
1090 | mp_enable_preemption(); | |
1091 | } | |
1092 | ||
1093 | /* | |
1094 | * Clear kdb interrupt | |
1095 | */ | |
1096 | ||
1097 | void | |
1098 | clear_kdb_intr(void) | |
1099 | { | |
1100 | mp_disable_preemption(); | |
1101 | i_bit_clear(MP_KDB, ¤t_cpu_datap()->cpu_signals); | |
1102 | mp_enable_preemption(); | |
1103 | } | |
1104 | ||
1105 | /* | |
1106 | * i386_init_slave() is called from pstart. | |
1107 | * We're in the cpu's interrupt stack with interrupts disabled. | |
1108 | */ | |
1109 | void | |
1110 | i386_init_slave(void) | |
1111 | { | |
1112 | postcode(I386_INIT_SLAVE); | |
1113 | ||
1114 | /* Ensure that caching and write-through are enabled */ | |
1115 | set_cr0(get_cr0() & ~(CR0_NW|CR0_CD)); | |
1116 | ||
1117 | DBG("i386_init_slave() CPU%d: phys (%d) active.\n", | |
1118 | get_cpu_number(), get_cpu_phys_number()); | |
1119 | ||
1120 | lapic_init(); | |
1121 | ||
1122 | LAPIC_DUMP(); | |
1123 | LAPIC_CPU_MAP_DUMP(); | |
1124 | ||
1125 | mtrr_update_cpu(); | |
1126 | ||
1127 | pat_init(); | |
1128 | ||
1129 | cpu_init(); | |
1130 | ||
1131 | slave_main(); | |
1132 | ||
1133 | panic("i386_init_slave() returned from slave_main()"); | |
1134 | } | |
1135 | ||
1136 | void | |
1137 | slave_machine_init(void) | |
1138 | { | |
1139 | /* | |
1140 | * Here in process context. | |
1141 | */ | |
1142 | DBG("slave_machine_init() CPU%d\n", get_cpu_number()); | |
1143 | ||
1144 | init_fpu(); | |
1145 | ||
1146 | cpu_thread_init(); | |
1147 | ||
1148 | pmc_init(); | |
1149 | ||
1150 | cpu_machine_init(); | |
1151 | ||
1152 | clock_init(); | |
1153 | } | |
1154 | ||
1155 | #undef cpu_number() | |
1156 | int cpu_number(void) | |
1157 | { | |
1158 | return get_cpu_number(); | |
1159 | } | |
1160 | ||
1161 | #if MACH_KDB | |
1162 | #include <ddb/db_output.h> | |
1163 | ||
1164 | #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */ | |
1165 | ||
1166 | ||
1167 | #if TRAP_DEBUG | |
1168 | #define MTRAPS 100 | |
1169 | struct mp_trap_hist_struct { | |
1170 | unsigned char type; | |
1171 | unsigned char data[5]; | |
1172 | } trap_hist[MTRAPS], *cur_trap_hist = trap_hist, | |
1173 | *max_trap_hist = &trap_hist[MTRAPS]; | |
1174 | ||
1175 | void db_trap_hist(void); | |
1176 | ||
1177 | /* | |
1178 | * SPL: | |
1179 | * 1: new spl | |
1180 | * 2: old spl | |
1181 | * 3: new tpr | |
1182 | * 4: old tpr | |
1183 | * INT: | |
1184 | * 1: int vec | |
1185 | * 2: old spl | |
1186 | * 3: new spl | |
1187 | * 4: post eoi tpr | |
1188 | * 5: exit tpr | |
1189 | */ | |
1190 | ||
1191 | void | |
1192 | db_trap_hist(void) | |
1193 | { | |
1194 | int i,j; | |
1195 | for(i=0;i<MTRAPS;i++) | |
1196 | if (trap_hist[i].type == 1 || trap_hist[i].type == 2) { | |
1197 | db_printf("%s%s", | |
1198 | (&trap_hist[i]>=cur_trap_hist)?"*":" ", | |
1199 | (trap_hist[i].type == 1)?"SPL":"INT"); | |
1200 | for(j=0;j<5;j++) | |
1201 | db_printf(" %02x", trap_hist[i].data[j]); | |
1202 | db_printf("\n"); | |
1203 | } | |
1204 | ||
1205 | } | |
1206 | #endif /* TRAP_DEBUG */ | |
1207 | ||
1208 | void db_lapic(int cpu); | |
1209 | unsigned int db_remote_read(int cpu, int reg); | |
1210 | void db_ioapic(unsigned int); | |
1211 | void kdb_console(void); | |
1212 | ||
1213 | void | |
1214 | kdb_console(void) | |
1215 | { | |
1216 | } | |
1217 | ||
1218 | #define BOOLP(a) ((a)?' ':'!') | |
1219 | ||
1220 | static char *DM[8] = { | |
1221 | "Fixed", | |
1222 | "Lowest Priority", | |
1223 | "Invalid", | |
1224 | "Invalid", | |
1225 | "NMI", | |
1226 | "Reset", | |
1227 | "Invalid", | |
1228 | "ExtINT"}; | |
1229 | ||
1230 | unsigned int | |
1231 | db_remote_read(int cpu, int reg) | |
1232 | { | |
1233 | return -1; | |
1234 | } | |
1235 | ||
1236 | void | |
1237 | db_lapic(int cpu) | |
1238 | { | |
1239 | } | |
1240 | ||
1241 | void | |
1242 | db_ioapic(unsigned int ind) | |
1243 | { | |
1244 | } | |
1245 | ||
1246 | #endif /* MACH_KDB */ | |
1247 |