]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <mach_rt.h>
27 #include <mach_kdb.h>
28 #include <mach_kdp.h>
29 #include <mach_ldebug.h>
30 #include <gprof.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/startup.h>
37 #include <kern/processor.h>
38 #include <kern/cpu_number.h>
39 #include <kern/cpu_data.h>
40 #include <kern/assert.h>
41 #include <kern/machine.h>
42 #include <kern/pms.h>
43
44 #include <vm/vm_map.h>
45 #include <vm/vm_kern.h>
46
47 #include <profiling/profile-mk.h>
48
49 #include <i386/mp.h>
50 #include <i386/mp_events.h>
51 #include <i386/mp_slave_boot.h>
52 #include <i386/apic.h>
53 #include <i386/ipl.h>
54 #include <i386/fpu.h>
55 #include <i386/cpuid.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
58 #include <i386/misc_protos.h>
59 #include <i386/mtrr.h>
60 #include <i386/postcode.h>
61 #include <i386/perfmon.h>
62 #include <i386/cpu_threads.h>
63 #include <i386/mp_desc.h>
64 #include <i386/trap.h>
65 #include <i386/machine_routines.h>
66 #include <i386/pmCPU.h>
67 #include <i386/hpet.h>
68
69 #include <chud/chud_xnu.h>
70 #include <chud/chud_xnu_private.h>
71
72 #include <sys/kdebug.h>
73 #if MACH_KDB
74 #include <i386/db_machdep.h>
75 #include <ddb/db_aout.h>
76 #include <ddb/db_access.h>
77 #include <ddb/db_sym.h>
78 #include <ddb/db_variables.h>
79 #include <ddb/db_command.h>
80 #include <ddb/db_output.h>
81 #include <ddb/db_expr.h>
82 #endif
83
84 #if MP_DEBUG
85 #define PAUSE delay(1000000)
86 #define DBG(x...) kprintf(x)
87 #else
88 #define DBG(x...)
89 #define PAUSE
90 #endif /* MP_DEBUG */
91
92 /* Initialize lapic_id so cpu_number() works on non SMP systems */
93 unsigned long lapic_id_initdata = 0;
94 unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
95 vm_offset_t lapic_start;
96
97 static i386_intr_func_t lapic_timer_func;
98 static i386_intr_func_t lapic_pmi_func;
99 static i386_intr_func_t lapic_thermal_func;
100
101 /* TRUE if local APIC was enabled by the OS not by the BIOS */
102 static boolean_t lapic_os_enabled = FALSE;
103
104 /* Base vector for local APIC interrupt sources */
105 int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
106
107 void slave_boot_init(void);
108
109 #if MACH_KDB
110 static void mp_kdb_wait(void);
111 volatile boolean_t mp_kdb_trap = FALSE;
112 volatile long mp_kdb_ncpus = 0;
113 #endif
114
115 static void mp_kdp_wait(void);
116 static void mp_rendezvous_action(void);
117
118 static int NMIInterruptHandler(void *regs);
119 static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
120 static void cpu_NMI_interrupt(int cpu);
121
122 boolean_t smp_initialized = FALSE;
123
124 decl_simple_lock_data(,mp_kdp_lock);
125
126 decl_mutex_data(static, mp_cpu_boot_lock);
127
128 /* Variables needed for MP rendezvous. */
129 decl_simple_lock_data(,mp_rv_lock);
130 static void (*mp_rv_setup_func)(void *arg);
131 static void (*mp_rv_action_func)(void *arg);
132 static void (*mp_rv_teardown_func)(void *arg);
133 static void *mp_rv_func_arg;
134 static int mp_rv_ncpus;
135 /* Cache-aligned barriers: */
136 static volatile long mp_rv_entry __attribute__((aligned(64)));
137 static volatile long mp_rv_exit __attribute__((aligned(64)));
138 static volatile long mp_rv_complete __attribute__((aligned(64)));
139
140 int lapic_to_cpu[MAX_CPUS];
141 int cpu_to_lapic[MAX_CPUS];
142
143 static void
144 lapic_cpu_map_init(void)
145 {
146 int i;
147
148 for (i = 0; i < MAX_CPUS; i++) {
149 lapic_to_cpu[i] = -1;
150 cpu_to_lapic[i] = -1;
151 }
152 }
153
154 void
155 lapic_cpu_map(int apic_id, int cpu)
156 {
157 cpu_to_lapic[cpu] = apic_id;
158 lapic_to_cpu[apic_id] = cpu;
159 }
160
161 /*
162 * Retrieve the local apic ID a cpu.
163 *
164 * Returns the local apic ID for the given processor.
165 * If the processor does not exist or apic not configured, returns -1.
166 */
167
168 uint32_t
169 ml_get_apicid(uint32_t cpu)
170 {
171 if(cpu >= (uint32_t)MAX_CPUS)
172 return 0xFFFFFFFF; /* Return -1 if cpu too big */
173
174 /* Return the apic ID (or -1 if not configured) */
175 return (uint32_t)cpu_to_lapic[cpu];
176
177 }
178
179 #ifdef MP_DEBUG
180 static void
181 lapic_cpu_map_dump(void)
182 {
183 int i;
184
185 for (i = 0; i < MAX_CPUS; i++) {
186 if (cpu_to_lapic[i] == -1)
187 continue;
188 kprintf("cpu_to_lapic[%d]: %d\n",
189 i, cpu_to_lapic[i]);
190 }
191 for (i = 0; i < MAX_CPUS; i++) {
192 if (lapic_to_cpu[i] == -1)
193 continue;
194 kprintf("lapic_to_cpu[%d]: %d\n",
195 i, lapic_to_cpu[i]);
196 }
197 }
198 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
199 #define LAPIC_DUMP() lapic_dump()
200 #else
201 #define LAPIC_CPU_MAP_DUMP()
202 #define LAPIC_DUMP()
203 #endif /* MP_DEBUG */
204
205 #if GPROF
206 /*
207 * Initialize dummy structs for profiling. These aren't used but
208 * allows hertz_tick() to be built with GPROF defined.
209 */
210 struct profile_vars _profile_vars;
211 struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
212 #define GPROF_INIT() \
213 { \
214 int i; \
215 \
216 /* Hack to initialize pointers to unused profiling structs */ \
217 for (i = 1; i < MAX_CPUS; i++) \
218 _profile_vars_cpus[i] = &_profile_vars; \
219 }
220 #else
221 #define GPROF_INIT()
222 #endif /* GPROF */
223
224 void
225 smp_init(void)
226 {
227 int result;
228 vm_map_entry_t entry;
229 uint32_t lo;
230 uint32_t hi;
231 boolean_t is_boot_processor;
232 boolean_t is_lapic_enabled;
233 vm_offset_t lapic_base;
234
235 simple_lock_init(&mp_kdp_lock, 0);
236 simple_lock_init(&mp_rv_lock, 0);
237 mutex_init(&mp_cpu_boot_lock, 0);
238 console_init();
239
240 /* Local APIC? */
241 if (!lapic_probe())
242 return;
243
244 /* Examine the local APIC state */
245 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
246 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
247 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
248 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
249 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
250 is_lapic_enabled ? "enabled" : "disabled",
251 is_boot_processor ? "BSP" : "AP");
252 if (!is_boot_processor || !is_lapic_enabled)
253 panic("Unexpected local APIC state\n");
254
255 /* Establish a map to the local apic */
256 lapic_start = vm_map_min(kernel_map);
257 result = vm_map_find_space(kernel_map,
258 (vm_map_address_t *) &lapic_start,
259 round_page(LAPIC_SIZE), 0,
260 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
261 if (result != KERN_SUCCESS) {
262 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
263 }
264 vm_map_unlock(kernel_map);
265 /* Map in the local APIC non-cacheable, as recommended by Intel
266 * in section 8.4.1 of the "System Programming Guide".
267 */
268 pmap_enter(pmap_kernel(),
269 lapic_start,
270 (ppnum_t) i386_btop(lapic_base),
271 VM_PROT_READ|VM_PROT_WRITE,
272 VM_WIMG_IO,
273 TRUE);
274 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
275
276 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
277 printf("Local APIC version not 0x14 as expected\n");
278 }
279
280 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
281 lapic_cpu_map_init();
282 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
283 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
284
285 lapic_init();
286
287 cpu_thread_init();
288
289 GPROF_INIT();
290 DBGLOG_CPU_INIT(master_cpu);
291
292 slave_boot_init();
293
294 smp_initialized = TRUE;
295
296 return;
297 }
298
299
300 static int
301 lapic_esr_read(void)
302 {
303 /* write-read register */
304 LAPIC_REG(ERROR_STATUS) = 0;
305 return LAPIC_REG(ERROR_STATUS);
306 }
307
308 static void
309 lapic_esr_clear(void)
310 {
311 LAPIC_REG(ERROR_STATUS) = 0;
312 LAPIC_REG(ERROR_STATUS) = 0;
313 }
314
315 static const char *DM[8] = {
316 "Fixed",
317 "Lowest Priority",
318 "Invalid",
319 "Invalid",
320 "NMI",
321 "Reset",
322 "Invalid",
323 "ExtINT"};
324
325 void
326 lapic_dump(void)
327 {
328 int i;
329
330 #define BOOL(a) ((a)?' ':'!')
331
332 kprintf("LAPIC %d at 0x%x version 0x%x\n",
333 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
334 lapic_start,
335 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
336 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
337 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
338 LAPIC_REG(APR)&LAPIC_APR_MASK,
339 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
340 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
341 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
342 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
343 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
344 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
345 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
346 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
347 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
348 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
349 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
350 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
351 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
352 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
353 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
354 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
355 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
356 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
357 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
358 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
359 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
360 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
361 LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_VECTOR_MASK,
362 DM[(LAPIC_REG(LVT_THERMAL)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
363 (LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
364 BOOL(LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_MASKED));
365 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
366 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
367 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
368 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
369 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
370 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
371 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
372 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
373 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
374 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
375 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
376 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
377 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
378 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
379 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
380 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
381 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
382 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
383 kprintf("ESR: %08x \n", lapic_esr_read());
384 kprintf(" ");
385 for(i=0xf; i>=0; i--)
386 kprintf("%x%x%x%x",i,i,i,i);
387 kprintf("\n");
388 kprintf("TMR: 0x");
389 for(i=7; i>=0; i--)
390 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
391 kprintf("\n");
392 kprintf("IRR: 0x");
393 for(i=7; i>=0; i--)
394 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
395 kprintf("\n");
396 kprintf("ISR: 0x");
397 for(i=7; i >= 0; i--)
398 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
399 kprintf("\n");
400 }
401
402 #if MACH_KDB
403 /*
404 * Displays apic junk
405 *
406 * da
407 */
408 void
409 db_apic(__unused db_expr_t addr,
410 __unused int have_addr,
411 __unused db_expr_t count,
412 __unused char *modif)
413 {
414
415 lapic_dump();
416
417 return;
418 }
419
420 #endif
421
422 boolean_t
423 lapic_probe(void)
424 {
425 uint32_t lo;
426 uint32_t hi;
427
428 if (cpuid_features() & CPUID_FEATURE_APIC)
429 return TRUE;
430
431 if (cpuid_family() == 6 || cpuid_family() == 15) {
432 /*
433 * Mobile Pentiums:
434 * There may be a local APIC which wasn't enabled by BIOS.
435 * So we try to enable it explicitly.
436 */
437 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
438 lo &= ~MSR_IA32_APIC_BASE_BASE;
439 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
440 lo |= MSR_IA32_APIC_BASE_ENABLE;
441 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
442
443 /*
444 * Re-initialize cpu features info and re-check.
445 */
446 cpuid_set_info();
447 if (cpuid_features() & CPUID_FEATURE_APIC) {
448 printf("Local APIC discovered and enabled\n");
449 lapic_os_enabled = TRUE;
450 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
451 return TRUE;
452 }
453 }
454
455 return FALSE;
456 }
457
458 void
459 lapic_shutdown(void)
460 {
461 uint32_t lo;
462 uint32_t hi;
463 uint32_t value;
464
465 /* Shutdown if local APIC was enabled by OS */
466 if (lapic_os_enabled == FALSE)
467 return;
468
469 mp_disable_preemption();
470
471 /* ExtINT: masked */
472 if (get_cpu_number() == master_cpu) {
473 value = LAPIC_REG(LVT_LINT0);
474 value |= LAPIC_LVT_MASKED;
475 LAPIC_REG(LVT_LINT0) = value;
476 }
477
478 /* Timer: masked */
479 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
480
481 /* Perfmon: masked */
482 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
483
484 /* Error: masked */
485 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
486
487 /* APIC software disabled */
488 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
489
490 /* Bypass the APIC completely and update cpu features */
491 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
492 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
493 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
494 cpuid_set_info();
495
496 mp_enable_preemption();
497 }
498
499 void
500 lapic_init(void)
501 {
502 int value;
503
504 /* Set flat delivery model, logical processor id */
505 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
506 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
507
508 /* Accept all */
509 LAPIC_REG(TPR) = 0;
510
511 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
512
513 /* ExtINT */
514 if (get_cpu_number() == master_cpu) {
515 value = LAPIC_REG(LVT_LINT0);
516 value &= ~LAPIC_LVT_MASKED;
517 value |= LAPIC_LVT_DM_EXTINT;
518 LAPIC_REG(LVT_LINT0) = value;
519 }
520
521 /* Timer: unmasked, one-shot */
522 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
523
524 /* Perfmon: unmasked */
525 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
526
527 /* Thermal: unmasked */
528 LAPIC_REG(LVT_THERMAL) = LAPIC_VECTOR(THERMAL);
529
530 lapic_esr_clear();
531
532 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
533 }
534
535 void
536 lapic_set_timer_func(i386_intr_func_t func)
537 {
538 lapic_timer_func = func;
539 }
540
541 void
542 lapic_set_timer(
543 boolean_t interrupt,
544 lapic_timer_mode_t mode,
545 lapic_timer_divide_t divisor,
546 lapic_timer_count_t initial_count)
547 {
548 boolean_t state;
549 uint32_t timer_vector;
550
551 state = ml_set_interrupts_enabled(FALSE);
552 timer_vector = LAPIC_REG(LVT_TIMER);
553 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
554 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
555 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
556 LAPIC_REG(LVT_TIMER) = timer_vector;
557 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
558 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
559 ml_set_interrupts_enabled(state);
560 }
561
562 void
563 lapic_get_timer(
564 lapic_timer_mode_t *mode,
565 lapic_timer_divide_t *divisor,
566 lapic_timer_count_t *initial_count,
567 lapic_timer_count_t *current_count)
568 {
569 boolean_t state;
570
571 state = ml_set_interrupts_enabled(FALSE);
572 if (mode)
573 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
574 periodic : one_shot;
575 if (divisor)
576 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
577 if (initial_count)
578 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
579 if (current_count)
580 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
581 ml_set_interrupts_enabled(state);
582 }
583
584 void
585 lapic_set_pmi_func(i386_intr_func_t func)
586 {
587 lapic_pmi_func = func;
588 }
589
590 void
591 lapic_set_thermal_func(i386_intr_func_t func)
592 {
593 lapic_thermal_func = func;
594 }
595
596 static inline void
597 _lapic_end_of_interrupt(void)
598 {
599 LAPIC_REG(EOI) = 0;
600 }
601
602 void
603 lapic_end_of_interrupt(void)
604 {
605 _lapic_end_of_interrupt();
606 }
607
608 int
609 lapic_interrupt(int interrupt, x86_saved_state_t *state)
610 {
611 int retval = 0;
612
613 /* Did we just field an interruption for the HPET comparator? */
614 if(current_cpu_datap()->cpu_pmHpetVec == ((uint32_t)interrupt - 0x40)) {
615 /* Yes, go handle it... */
616 retval = HPETInterrupt();
617 /* Was it really handled? */
618 if(retval) {
619 /* If so, EOI the 'rupt */
620 _lapic_end_of_interrupt();
621 /*
622 * and then leave,
623 * indicating that this has been handled
624 */
625 return 1;
626 }
627 }
628
629 interrupt -= lapic_interrupt_base;
630 if (interrupt < 0) {
631 if (interrupt == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base)) {
632 retval = NMIInterruptHandler(state);
633 _lapic_end_of_interrupt();
634 return retval;
635 }
636 else
637 return 0;
638 }
639
640 switch(interrupt) {
641 case LAPIC_PERFCNT_INTERRUPT:
642 if (lapic_pmi_func != NULL)
643 (*lapic_pmi_func)(NULL);
644 /* Clear interrupt masked */
645 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
646 _lapic_end_of_interrupt();
647 retval = 1;
648 break;
649 case LAPIC_TIMER_INTERRUPT:
650 _lapic_end_of_interrupt();
651 if (lapic_timer_func != NULL)
652 (*lapic_timer_func)(state);
653 retval = 1;
654 break;
655 case LAPIC_THERMAL_INTERRUPT:
656 if (lapic_thermal_func != NULL)
657 (*lapic_thermal_func)(NULL);
658 _lapic_end_of_interrupt();
659 retval = 1;
660 break;
661 case LAPIC_ERROR_INTERRUPT:
662 lapic_dump();
663 panic("Local APIC error\n");
664 _lapic_end_of_interrupt();
665 retval = 1;
666 break;
667 case LAPIC_SPURIOUS_INTERRUPT:
668 kprintf("SPIV\n");
669 /* No EOI required here */
670 retval = 1;
671 break;
672 case LAPIC_INTERPROCESSOR_INTERRUPT:
673 _lapic_end_of_interrupt();
674 cpu_signal_handler(state);
675 retval = 1;
676 break;
677 }
678
679 return retval;
680 }
681
682 void
683 lapic_smm_restore(void)
684 {
685 boolean_t state;
686
687 if (lapic_os_enabled == FALSE)
688 return;
689
690 state = ml_set_interrupts_enabled(FALSE);
691
692 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
693 /*
694 * Bogus SMI handler enables interrupts but does not know about
695 * local APIC interrupt sources. When APIC timer counts down to
696 * zero while in SMM, local APIC will end up waiting for an EOI
697 * but no interrupt was delivered to the OS.
698 */
699 _lapic_end_of_interrupt();
700
701 /*
702 * timer is one-shot, trigger another quick countdown to trigger
703 * another timer interrupt.
704 */
705 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
706 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
707 }
708
709 kprintf("lapic_smm_restore\n");
710 }
711
712 ml_set_interrupts_enabled(state);
713 }
714
715 kern_return_t
716 intel_startCPU(
717 int slot_num)
718 {
719
720 int i = 1000;
721 int lapic = cpu_to_lapic[slot_num];
722
723 assert(lapic != -1);
724
725 DBGLOG_CPU_INIT(slot_num);
726
727 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
728 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
729
730 /*
731 * Initialize (or re-initialize) the descriptor tables for this cpu.
732 * Propagate processor mode to slave.
733 */
734 if (cpu_mode_is64bit())
735 cpu_desc_init64(cpu_datap(slot_num), FALSE);
736 else
737 cpu_desc_init(cpu_datap(slot_num), FALSE);
738
739 /* Serialize use of the slave boot stack. */
740 mutex_lock(&mp_cpu_boot_lock);
741
742 mp_disable_preemption();
743 if (slot_num == get_cpu_number()) {
744 mp_enable_preemption();
745 mutex_unlock(&mp_cpu_boot_lock);
746 return KERN_SUCCESS;
747 }
748
749 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
750 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
751 delay(10000);
752
753 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
754 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
755 delay(200);
756
757 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
758 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
759 delay(200);
760
761 #ifdef POSTCODE_DELAY
762 /* Wait much longer if postcodes are displayed for a delay period. */
763 i *= 10000;
764 #endif
765 while(i-- > 0) {
766 if (cpu_datap(slot_num)->cpu_running)
767 break;
768 delay(10000);
769 }
770
771 mp_enable_preemption();
772 mutex_unlock(&mp_cpu_boot_lock);
773
774 if (!cpu_datap(slot_num)->cpu_running) {
775 kprintf("Failed to start CPU %02d\n", slot_num);
776 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
777 delay(1000000);
778 cpu_shutdown();
779 return KERN_SUCCESS;
780 } else {
781 kprintf("Started cpu %d (lapic id %p)\n", slot_num, lapic);
782 printf("Started CPU %02d\n", slot_num);
783 return KERN_SUCCESS;
784 }
785 }
786
787 extern char slave_boot_base[];
788 extern char slave_boot_end[];
789 extern void slave_pstart(void);
790
791 void
792 slave_boot_init(void)
793 {
794 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
795 slave_boot_base,
796 kvtophys((vm_offset_t) slave_boot_base),
797 MP_BOOT,
798 slave_boot_end-slave_boot_base);
799
800 /*
801 * Copy the boot entry code to the real-mode vector area MP_BOOT.
802 * This is in page 1 which has been reserved for this purpose by
803 * machine_startup() from the boot processor.
804 * The slave boot code is responsible for switching to protected
805 * mode and then jumping to the common startup, _start().
806 */
807 bcopy_phys(kvtophys((vm_offset_t) slave_boot_base),
808 (addr64_t) MP_BOOT,
809 slave_boot_end-slave_boot_base);
810
811 /*
812 * Zero a stack area above the boot code.
813 */
814 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
815 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
816
817 /*
818 * Set the location at the base of the stack to point to the
819 * common startup entry.
820 */
821 DBG("writing 0x%x at phys 0x%x\n",
822 kvtophys((vm_offset_t) &slave_pstart), MP_MACH_START+MP_BOOT);
823 ml_phys_write_word(MP_MACH_START+MP_BOOT,
824 (unsigned int)kvtophys((vm_offset_t) &slave_pstart));
825
826 /* Flush caches */
827 __asm__("wbinvd");
828 }
829
830 #if MP_DEBUG
831 cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
832 cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
833
834 MP_EVENT_NAME_DECL();
835
836 #endif /* MP_DEBUG */
837
838 void
839 cpu_signal_handler(x86_saved_state_t *regs)
840 {
841 int my_cpu;
842 volatile int *my_word;
843 #if MACH_KDB && MACH_ASSERT
844 int i=100;
845 #endif /* MACH_KDB && MACH_ASSERT */
846
847 mp_disable_preemption();
848
849 my_cpu = cpu_number();
850 my_word = &current_cpu_datap()->cpu_signals;
851
852 do {
853 #if MACH_KDB && MACH_ASSERT
854 if (i-- <= 0)
855 Debugger("cpu_signal_handler: signals did not clear");
856 #endif /* MACH_KDB && MACH_ASSERT */
857 #if MACH_KDP
858 if (i_bit(MP_KDP, my_word)) {
859 DBGLOG(cpu_handle,my_cpu,MP_KDP);
860 i_bit_clear(MP_KDP, my_word);
861 /* Ensure that the i386_kernel_state at the base of the
862 * current thread's stack (if any) is synchronized with the
863 * context at the moment of the interrupt, to facilitate
864 * access through the debugger.
865 * XXX 64-bit state?
866 */
867 sync_iss_to_iks(saved_state32(regs));
868 mp_kdp_wait();
869 } else
870 #endif /* MACH_KDP */
871 if (i_bit(MP_TLB_FLUSH, my_word)) {
872 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
873 i_bit_clear(MP_TLB_FLUSH, my_word);
874 pmap_update_interrupt();
875 } else if (i_bit(MP_AST, my_word)) {
876 DBGLOG(cpu_handle,my_cpu,MP_AST);
877 i_bit_clear(MP_AST, my_word);
878 ast_check(cpu_to_processor(my_cpu));
879 #if MACH_KDB
880 } else if (i_bit(MP_KDB, my_word)) {
881
882 i_bit_clear(MP_KDB, my_word);
883 current_cpu_datap()->cpu_kdb_is_slave++;
884 mp_kdb_wait();
885 current_cpu_datap()->cpu_kdb_is_slave--;
886 #endif /* MACH_KDB */
887 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
888 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
889 i_bit_clear(MP_RENDEZVOUS, my_word);
890 mp_rendezvous_action();
891 } else if (i_bit(MP_CHUD, my_word)) {
892 DBGLOG(cpu_handle,my_cpu,MP_CHUD);
893 i_bit_clear(MP_CHUD, my_word);
894 chudxnu_cpu_signal_handler();
895 }
896 } while (*my_word);
897
898 mp_enable_preemption();
899
900 }
901
902
903 /* We want this to show up in backtraces, so mark it noinline
904 */
905 static int __attribute__((noinline))
906 NMIInterruptHandler(void *regs)
907 {
908 boolean_t state = ml_set_interrupts_enabled(FALSE);
909 sync_iss_to_iks_unconditionally(regs);
910 mp_kdp_wait();
911 (void) ml_set_interrupts_enabled(state);
912 return 1;
913 }
914
915 #ifdef MP_DEBUG
916 extern int max_lock_loops;
917 #endif /* MP_DEBUG */
918
919 int trappedalready = 0; /* (BRINGUP */
920
921 void
922 cpu_interrupt(int cpu)
923 {
924 boolean_t state;
925
926 if(cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */
927 kprintf("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu);
928 }
929
930 if (smp_initialized) {
931
932 #if MACH_KDB
933 // if(!trappedalready && (cpu_datap(cpu)->cpu_signals & 6)) { /* (BRINGUP) */
934 // if(kdb_cpu != cpu_number()) {
935 // trappedalready = 1;
936 // panic("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n",
937 // cpu_datap(cpu)->cpu_signals, cpu, kdb_cpu);
938 // }
939 // }
940 #endif
941
942 /* Wait for previous interrupt to be delivered... */
943 #ifdef MP_DEBUG
944 int pending_busy_count = 0;
945 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
946 if (++pending_busy_count > max_lock_loops)
947 panic("cpus_interrupt() deadlock\n");
948 #else
949 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
950 #endif /* MP_DEBUG */
951 cpu_pause();
952 }
953
954 state = ml_set_interrupts_enabled(FALSE);
955 LAPIC_REG(ICRD) =
956 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
957 LAPIC_REG(ICR) =
958 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
959 (void) ml_set_interrupts_enabled(state);
960 }
961
962 }
963
964 /*
965 * Send a true NMI via the local APIC to the specified CPU.
966 */
967 static void
968 cpu_NMI_interrupt(int cpu)
969 {
970 boolean_t state;
971
972 if (smp_initialized) {
973 state = ml_set_interrupts_enabled(FALSE);
974 LAPIC_REG(ICRD) =
975 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
976 /* The vector is ignored in this case, the other CPU will come in on the
977 * NMI vector.
978 */
979 LAPIC_REG(ICR) =
980 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI;
981 (void) ml_set_interrupts_enabled(state);
982 }
983
984 }
985
986 void
987 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
988 {
989 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
990 uint64_t tsc_timeout;
991
992
993 if (!cpu_datap(cpu)->cpu_running)
994 return;
995
996 if (event == MP_TLB_FLUSH)
997 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START, cpu, 0, 0, 0, 0);
998
999 DBGLOG(cpu_signal, cpu, event);
1000
1001 i_bit_set(event, signals);
1002 cpu_interrupt(cpu);
1003 if (mode == SYNC) {
1004 again:
1005 tsc_timeout = rdtsc64() + (1000*1000*1000);
1006 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
1007 cpu_pause();
1008 }
1009 if (i_bit(event, signals)) {
1010 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
1011 cpu, event);
1012 goto again;
1013 }
1014 }
1015 if (event == MP_TLB_FLUSH)
1016 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END, cpu, 0, 0, 0, 0);
1017 }
1018
1019 void
1020 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
1021 {
1022 unsigned int cpu;
1023 unsigned int my_cpu = cpu_number();
1024
1025 for (cpu = 0; cpu < real_ncpus; cpu++) {
1026 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1027 continue;
1028 i386_signal_cpu(cpu, event, mode);
1029 }
1030 }
1031
1032 int
1033 i386_active_cpus(void)
1034 {
1035 unsigned int cpu;
1036 unsigned int ncpus = 0;
1037
1038 for (cpu = 0; cpu < real_ncpus; cpu++) {
1039 if (cpu_datap(cpu)->cpu_running)
1040 ncpus++;
1041 }
1042 return(ncpus);
1043 }
1044
1045 /*
1046 * All-CPU rendezvous:
1047 * - CPUs are signalled,
1048 * - all execute the setup function (if specified),
1049 * - rendezvous (i.e. all cpus reach a barrier),
1050 * - all execute the action function (if specified),
1051 * - rendezvous again,
1052 * - execute the teardown function (if specified), and then
1053 * - resume.
1054 *
1055 * Note that the supplied external functions _must_ be reentrant and aware
1056 * that they are running in parallel and in an unknown lock context.
1057 */
1058
1059 static void
1060 mp_rendezvous_action(void)
1061 {
1062
1063 /* setup function */
1064 if (mp_rv_setup_func != NULL)
1065 mp_rv_setup_func(mp_rv_func_arg);
1066 /* spin on entry rendezvous */
1067 atomic_incl(&mp_rv_entry, 1);
1068 while (mp_rv_entry < mp_rv_ncpus) {
1069 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1070 /* poll for pesky tlb flushes */
1071 handle_pending_TLB_flushes();
1072 ml_set_interrupts_enabled(intr);
1073 cpu_pause();
1074 }
1075 /* action function */
1076 if (mp_rv_action_func != NULL)
1077 mp_rv_action_func(mp_rv_func_arg);
1078 /* spin on exit rendezvous */
1079 atomic_incl(&mp_rv_exit, 1);
1080 while (mp_rv_exit < mp_rv_ncpus)
1081 cpu_pause();
1082
1083 /* teardown function */
1084 if (mp_rv_teardown_func != NULL)
1085 mp_rv_teardown_func(mp_rv_func_arg);
1086
1087 /* Bump completion count */
1088 atomic_incl(&mp_rv_complete, 1);
1089 }
1090
1091 void
1092 mp_rendezvous(void (*setup_func)(void *),
1093 void (*action_func)(void *),
1094 void (*teardown_func)(void *),
1095 void *arg)
1096 {
1097
1098 if (!smp_initialized) {
1099 if (setup_func != NULL)
1100 setup_func(arg);
1101 if (action_func != NULL)
1102 action_func(arg);
1103 if (teardown_func != NULL)
1104 teardown_func(arg);
1105 return;
1106 }
1107
1108 /* obtain rendezvous lock */
1109 simple_lock(&mp_rv_lock);
1110
1111 /* set static function pointers */
1112 mp_rv_setup_func = setup_func;
1113 mp_rv_action_func = action_func;
1114 mp_rv_teardown_func = teardown_func;
1115 mp_rv_func_arg = arg;
1116
1117 mp_rv_entry = 0;
1118 mp_rv_exit = 0;
1119 mp_rv_complete = 0;
1120
1121 /*
1122 * signal other processors, which will call mp_rendezvous_action()
1123 */
1124 mp_rv_ncpus = i386_active_cpus();
1125 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
1126
1127 /* call executor function on this cpu */
1128 mp_rendezvous_action();
1129
1130 /*
1131 * Spin for everyone to complete.
1132 * This is necessary to ensure that all processors have proceeded
1133 * from the exit barrier before we release the rendezvous structure.
1134 */
1135 while (mp_rv_complete < mp_rv_ncpus) {
1136 cpu_pause();
1137 }
1138
1139 /* release lock */
1140 simple_unlock(&mp_rv_lock);
1141 }
1142
1143 void
1144 mp_rendezvous_break_lock(void)
1145 {
1146 simple_lock_init(&mp_rv_lock, 0);
1147 }
1148
1149 static void
1150 setup_disable_intrs(__unused void * param_not_used)
1151 {
1152 /* disable interrupts before the first barrier */
1153 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1154
1155 current_cpu_datap()->cpu_iflag = intr;
1156 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1157 }
1158
1159 static void
1160 teardown_restore_intrs(__unused void * param_not_used)
1161 {
1162 /* restore interrupt flag following MTRR changes */
1163 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
1164 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1165 }
1166
1167 /*
1168 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
1169 * This is exported for use by kexts.
1170 */
1171 void
1172 mp_rendezvous_no_intrs(
1173 void (*action_func)(void *),
1174 void *arg)
1175 {
1176 mp_rendezvous(setup_disable_intrs,
1177 action_func,
1178 teardown_restore_intrs,
1179 arg);
1180 }
1181
1182 void
1183 handle_pending_TLB_flushes(void)
1184 {
1185 volatile int *my_word = &current_cpu_datap()->cpu_signals;
1186
1187 if (i_bit(MP_TLB_FLUSH, my_word)) {
1188 DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
1189 i_bit_clear(MP_TLB_FLUSH, my_word);
1190 pmap_update_interrupt();
1191 }
1192 }
1193
1194 #if MACH_KDP
1195 volatile boolean_t mp_kdp_trap = FALSE;
1196 volatile long mp_kdp_ncpus;
1197 boolean_t mp_kdp_state;
1198
1199
1200 void
1201 mp_kdp_enter(void)
1202 {
1203 unsigned int cpu;
1204 unsigned int ncpus;
1205 unsigned int my_cpu = cpu_number();
1206 uint64_t tsc_timeout;
1207
1208 DBG("mp_kdp_enter()\n");
1209
1210 /*
1211 * Here to enter the debugger.
1212 * In case of races, only one cpu is allowed to enter kdp after
1213 * stopping others.
1214 */
1215 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
1216 simple_lock(&mp_kdp_lock);
1217
1218 while (mp_kdp_trap) {
1219 simple_unlock(&mp_kdp_lock);
1220 DBG("mp_kdp_enter() race lost\n");
1221 mp_kdp_wait();
1222 simple_lock(&mp_kdp_lock);
1223 }
1224 mp_kdp_ncpus = 1; /* self */
1225 mp_kdp_trap = TRUE;
1226 simple_unlock(&mp_kdp_lock);
1227
1228 /*
1229 * Deliver a nudge to other cpus, counting how many
1230 */
1231 DBG("mp_kdp_enter() signaling other processors\n");
1232 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1233 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1234 continue;
1235 ncpus++;
1236 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1237 }
1238 /*
1239 * Wait other processors to synchronize
1240 */
1241 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
1242
1243 tsc_timeout = rdtsc64() + (ncpus * 100 * 1000 * 1000);
1244
1245 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1246 /*
1247 * A TLB shootdown request may be pending... this would
1248 * result in the requesting processor waiting in
1249 * PMAP_UPDATE_TLBS() until this processor deals with it.
1250 * Process it, so it can now enter mp_kdp_wait()
1251 */
1252 handle_pending_TLB_flushes();
1253 cpu_pause();
1254 }
1255 /* If we've timed out, and some processor(s) are still unresponsive,
1256 * interrupt them with an NMI via the local APIC.
1257 */
1258 if (mp_kdp_ncpus != ncpus) {
1259 for (cpu = 0; cpu < real_ncpus; cpu++) {
1260 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1261 continue;
1262 if (cpu_signal_pending(cpu, MP_KDP))
1263 cpu_NMI_interrupt(cpu);
1264 }
1265 }
1266
1267 DBG("mp_kdp_enter() %d processors done %s\n",
1268 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
1269
1270 postcode(MP_KDP_ENTER);
1271 }
1272
1273 static boolean_t
1274 cpu_signal_pending(int cpu, mp_event_t event)
1275 {
1276 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
1277 boolean_t retval = FALSE;
1278
1279 if (i_bit(event, signals))
1280 retval = TRUE;
1281 return retval;
1282 }
1283
1284 static void
1285 mp_kdp_wait(void)
1286 {
1287 DBG("mp_kdp_wait()\n");
1288
1289 panic_io_port_read();
1290
1291 atomic_incl(&mp_kdp_ncpus, 1);
1292 while (mp_kdp_trap) {
1293 /*
1294 * a TLB shootdown request may be pending... this would result in the requesting
1295 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1296 * Process it, so it can now enter mp_kdp_wait()
1297 */
1298 handle_pending_TLB_flushes();
1299
1300 cpu_pause();
1301 }
1302 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
1303 DBG("mp_kdp_wait() done\n");
1304 }
1305
1306 void
1307 mp_kdp_exit(void)
1308 {
1309 DBG("mp_kdp_exit()\n");
1310 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
1311 mp_kdp_trap = FALSE;
1312 __asm__ volatile("mfence");
1313
1314 /* Wait other processors to stop spinning. XXX needs timeout */
1315 DBG("mp_kdp_exit() waiting for processors to resume\n");
1316 while (mp_kdp_ncpus > 0) {
1317 /*
1318 * a TLB shootdown request may be pending... this would result in the requesting
1319 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1320 * Process it, so it can now enter mp_kdp_wait()
1321 */
1322 handle_pending_TLB_flushes();
1323
1324 cpu_pause();
1325 }
1326 DBG("mp_kdp_exit() done\n");
1327 (void) ml_set_interrupts_enabled(mp_kdp_state);
1328 postcode(0);
1329 }
1330 #endif /* MACH_KDP */
1331
1332 /*ARGSUSED*/
1333 void
1334 init_ast_check(
1335 __unused processor_t processor)
1336 {
1337 }
1338
1339 void
1340 cause_ast_check(
1341 processor_t processor)
1342 {
1343 int cpu = PROCESSOR_DATA(processor, slot_num);
1344
1345 if (cpu != cpu_number()) {
1346 i386_signal_cpu(cpu, MP_AST, ASYNC);
1347 }
1348 }
1349
1350 #if MACH_KDB
1351 /*
1352 * invoke kdb on slave processors
1353 */
1354
1355 void
1356 remote_kdb(void)
1357 {
1358 unsigned int my_cpu = cpu_number();
1359 unsigned int cpu;
1360 int kdb_ncpus;
1361 uint64_t tsc_timeout = 0;
1362
1363 mp_kdb_trap = TRUE;
1364 mp_kdb_ncpus = 1;
1365 for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1366 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1367 continue;
1368 kdb_ncpus++;
1369 i386_signal_cpu(cpu, MP_KDB, ASYNC);
1370 }
1371 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
1372
1373 tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
1374
1375 while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
1376 /*
1377 * a TLB shootdown request may be pending... this would result in the requesting
1378 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1379 * Process it, so it can now enter mp_kdp_wait()
1380 */
1381 handle_pending_TLB_flushes();
1382
1383 cpu_pause();
1384 }
1385 DBG("mp_kdp_enter() %d processors done %s\n",
1386 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
1387 }
1388
1389 static void
1390 mp_kdb_wait(void)
1391 {
1392 DBG("mp_kdb_wait()\n");
1393
1394 panic_io_port_read();
1395
1396 atomic_incl(&mp_kdb_ncpus, 1);
1397 while (mp_kdb_trap) {
1398 /*
1399 * a TLB shootdown request may be pending... this would result in the requesting
1400 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1401 * Process it, so it can now enter mp_kdp_wait()
1402 */
1403 handle_pending_TLB_flushes();
1404
1405 cpu_pause();
1406 }
1407 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1408 DBG("mp_kdb_wait() done\n");
1409 }
1410
1411 /*
1412 * Clear kdb interrupt
1413 */
1414
1415 void
1416 clear_kdb_intr(void)
1417 {
1418 mp_disable_preemption();
1419 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
1420 mp_enable_preemption();
1421 }
1422
1423 void
1424 mp_kdb_exit(void)
1425 {
1426 DBG("mp_kdb_exit()\n");
1427 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1428 mp_kdb_trap = FALSE;
1429 __asm__ volatile("mfence");
1430
1431 while (mp_kdb_ncpus > 0) {
1432 /*
1433 * a TLB shootdown request may be pending... this would result in the requesting
1434 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1435 * Process it, so it can now enter mp_kdp_wait()
1436 */
1437 handle_pending_TLB_flushes();
1438
1439 cpu_pause();
1440 }
1441 DBG("mp_kdb_exit() done\n");
1442 }
1443
1444 #endif /* MACH_KDB */
1445
1446 /*
1447 * i386_init_slave() is called from pstart.
1448 * We're in the cpu's interrupt stack with interrupts disabled.
1449 * At this point we are in legacy mode. We need to switch on IA32e
1450 * if the mode is set to 64-bits.
1451 */
1452 void
1453 i386_init_slave(void)
1454 {
1455 postcode(I386_INIT_SLAVE);
1456
1457 /* Ensure that caching and write-through are enabled */
1458 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1459
1460 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1461 get_cpu_number(), get_cpu_phys_number());
1462
1463 assert(!ml_get_interrupts_enabled());
1464 if (cpu_mode_is64bit()) {
1465 cpu_IA32e_enable(current_cpu_datap());
1466 cpu_desc_load64(current_cpu_datap());
1467 fast_syscall_init64();
1468 } else {
1469 fast_syscall_init();
1470 }
1471
1472 mca_cpu_init();
1473
1474 lapic_init();
1475 LAPIC_DUMP();
1476 LAPIC_CPU_MAP_DUMP();
1477
1478 init_fpu();
1479
1480 mtrr_update_cpu();
1481
1482 pat_init();
1483
1484 cpu_thread_init();
1485
1486 cpu_init(); /* Sets cpu_running which starter cpu waits for */
1487
1488 slave_main();
1489
1490 panic("i386_init_slave() returned from slave_main()");
1491 }
1492
1493 void
1494 slave_machine_init(void)
1495 {
1496 /*
1497 * Here in process context, but with interrupts disabled.
1498 */
1499 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1500
1501 clock_init();
1502
1503 cpu_machine_init(); /* Interrupts enabled hereafter */
1504 }
1505
1506 #undef cpu_number()
1507 int cpu_number(void)
1508 {
1509 return get_cpu_number();
1510 }
1511
1512 #if MACH_KDB
1513 #include <ddb/db_output.h>
1514
1515 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1516
1517
1518 #if TRAP_DEBUG
1519 #define MTRAPS 100
1520 struct mp_trap_hist_struct {
1521 unsigned char type;
1522 unsigned char data[5];
1523 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1524 *max_trap_hist = &trap_hist[MTRAPS];
1525
1526 void db_trap_hist(void);
1527
1528 /*
1529 * SPL:
1530 * 1: new spl
1531 * 2: old spl
1532 * 3: new tpr
1533 * 4: old tpr
1534 * INT:
1535 * 1: int vec
1536 * 2: old spl
1537 * 3: new spl
1538 * 4: post eoi tpr
1539 * 5: exit tpr
1540 */
1541
1542 void
1543 db_trap_hist(void)
1544 {
1545 int i,j;
1546 for(i=0;i<MTRAPS;i++)
1547 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1548 db_printf("%s%s",
1549 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1550 (trap_hist[i].type == 1)?"SPL":"INT");
1551 for(j=0;j<5;j++)
1552 db_printf(" %02x", trap_hist[i].data[j]);
1553 db_printf("\n");
1554 }
1555
1556 }
1557 #endif /* TRAP_DEBUG */
1558 #endif /* MACH_KDB */
1559