]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <mach_rt.h>
27 #include <mach_kdb.h>
28 #include <mach_kdp.h>
29 #include <mach_ldebug.h>
30 #include <gprof.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/startup.h>
37 #include <kern/processor.h>
38 #include <kern/cpu_number.h>
39 #include <kern/cpu_data.h>
40 #include <kern/assert.h>
41 #include <kern/machine.h>
42 #include <kern/pms.h>
43
44 #include <vm/vm_map.h>
45 #include <vm/vm_kern.h>
46
47 #include <profiling/profile-mk.h>
48
49 #include <i386/mp.h>
50 #include <i386/mp_events.h>
51 #include <i386/mp_slave_boot.h>
52 #include <i386/apic.h>
53 #include <i386/ipl.h>
54 #include <i386/fpu.h>
55 #include <i386/cpuid.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
58 #include <i386/misc_protos.h>
59 #include <i386/mtrr.h>
60 #include <i386/postcode.h>
61 #include <i386/perfmon.h>
62 #include <i386/cpu_threads.h>
63 #include <i386/mp_desc.h>
64 #include <i386/trap.h>
65 #include <i386/machine_routines.h>
66 #include <i386/pmCPU.h>
67 #include <i386/hpet.h>
68
69 #include <chud/chud_xnu.h>
70 #include <chud/chud_xnu_private.h>
71
72 #include <sys/kdebug.h>
73 #if MACH_KDB
74 #include <i386/db_machdep.h>
75 #include <ddb/db_aout.h>
76 #include <ddb/db_access.h>
77 #include <ddb/db_sym.h>
78 #include <ddb/db_variables.h>
79 #include <ddb/db_command.h>
80 #include <ddb/db_output.h>
81 #include <ddb/db_expr.h>
82 #endif
83
84 #if MP_DEBUG
85 #define PAUSE delay(1000000)
86 #define DBG(x...) kprintf(x)
87 #else
88 #define DBG(x...)
89 #define PAUSE
90 #endif /* MP_DEBUG */
91
92 /* Initialize lapic_id so cpu_number() works on non SMP systems */
93 unsigned long lapic_id_initdata = 0;
94 unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
95 vm_offset_t lapic_start;
96
97 static i386_intr_func_t lapic_timer_func;
98 static i386_intr_func_t lapic_pmi_func;
99 static i386_intr_func_t lapic_thermal_func;
100
101 /* TRUE if local APIC was enabled by the OS not by the BIOS */
102 static boolean_t lapic_os_enabled = FALSE;
103
104 /* Base vector for local APIC interrupt sources */
105 int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
106
107 void slave_boot_init(void);
108
109 #if MACH_KDB
110 static void mp_kdb_wait(void);
111 volatile boolean_t mp_kdb_trap = FALSE;
112 volatile long mp_kdb_ncpus = 0;
113 #endif
114
115 static void mp_kdp_wait(void);
116 static void mp_rendezvous_action(void);
117
118 static int NMIInterruptHandler(void *regs);
119 static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
120 static void cpu_NMI_interrupt(int cpu);
121
122 boolean_t smp_initialized = FALSE;
123
124 decl_simple_lock_data(,mp_kdp_lock);
125
126 decl_mutex_data(static, mp_cpu_boot_lock);
127
128 /* Variables needed for MP rendezvous. */
129 static void (*mp_rv_setup_func)(void *arg);
130 static void (*mp_rv_action_func)(void *arg);
131 static void (*mp_rv_teardown_func)(void *arg);
132 static void *mp_rv_func_arg;
133 static int mp_rv_ncpus;
134 static volatile long mp_rv_waiters[2];
135 decl_simple_lock_data(,mp_rv_lock);
136
137 int lapic_to_cpu[MAX_CPUS];
138 int cpu_to_lapic[MAX_CPUS];
139
140 static void
141 lapic_cpu_map_init(void)
142 {
143 int i;
144
145 for (i = 0; i < MAX_CPUS; i++) {
146 lapic_to_cpu[i] = -1;
147 cpu_to_lapic[i] = -1;
148 }
149 }
150
151 void
152 lapic_cpu_map(int apic_id, int cpu)
153 {
154 cpu_to_lapic[cpu] = apic_id;
155 lapic_to_cpu[apic_id] = cpu;
156 }
157
158 /*
159 * Retrieve the local apic ID a cpu.
160 *
161 * Returns the local apic ID for the given processor.
162 * If the processor does not exist or apic not configured, returns -1.
163 */
164
165 uint32_t
166 ml_get_apicid(uint32_t cpu)
167 {
168 if(cpu >= (uint32_t)MAX_CPUS)
169 return 0xFFFFFFFF; /* Return -1 if cpu too big */
170
171 /* Return the apic ID (or -1 if not configured) */
172 return (uint32_t)cpu_to_lapic[cpu];
173
174 }
175
176 #ifdef MP_DEBUG
177 static void
178 lapic_cpu_map_dump(void)
179 {
180 int i;
181
182 for (i = 0; i < MAX_CPUS; i++) {
183 if (cpu_to_lapic[i] == -1)
184 continue;
185 kprintf("cpu_to_lapic[%d]: %d\n",
186 i, cpu_to_lapic[i]);
187 }
188 for (i = 0; i < MAX_CPUS; i++) {
189 if (lapic_to_cpu[i] == -1)
190 continue;
191 kprintf("lapic_to_cpu[%d]: %d\n",
192 i, lapic_to_cpu[i]);
193 }
194 }
195 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
196 #define LAPIC_DUMP() lapic_dump()
197 #else
198 #define LAPIC_CPU_MAP_DUMP()
199 #define LAPIC_DUMP()
200 #endif /* MP_DEBUG */
201
202 #if GPROF
203 /*
204 * Initialize dummy structs for profiling. These aren't used but
205 * allows hertz_tick() to be built with GPROF defined.
206 */
207 struct profile_vars _profile_vars;
208 struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
209 #define GPROF_INIT() \
210 { \
211 int i; \
212 \
213 /* Hack to initialize pointers to unused profiling structs */ \
214 for (i = 1; i < MAX_CPUS; i++) \
215 _profile_vars_cpus[i] = &_profile_vars; \
216 }
217 #else
218 #define GPROF_INIT()
219 #endif /* GPROF */
220
221 void
222 smp_init(void)
223 {
224 int result;
225 vm_map_entry_t entry;
226 uint32_t lo;
227 uint32_t hi;
228 boolean_t is_boot_processor;
229 boolean_t is_lapic_enabled;
230 vm_offset_t lapic_base;
231
232 simple_lock_init(&mp_kdp_lock, 0);
233 simple_lock_init(&mp_rv_lock, 0);
234 mutex_init(&mp_cpu_boot_lock, 0);
235 console_init();
236
237 /* Local APIC? */
238 if (!lapic_probe())
239 return;
240
241 /* Examine the local APIC state */
242 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
243 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
244 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
245 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
246 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
247 is_lapic_enabled ? "enabled" : "disabled",
248 is_boot_processor ? "BSP" : "AP");
249 if (!is_boot_processor || !is_lapic_enabled)
250 panic("Unexpected local APIC state\n");
251
252 /* Establish a map to the local apic */
253 lapic_start = vm_map_min(kernel_map);
254 result = vm_map_find_space(kernel_map, &lapic_start,
255 round_page(LAPIC_SIZE), 0,
256 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
257 if (result != KERN_SUCCESS) {
258 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
259 }
260 vm_map_unlock(kernel_map);
261 /* Map in the local APIC non-cacheable, as recommended by Intel
262 * in section 8.4.1 of the "System Programming Guide".
263 */
264 pmap_enter(pmap_kernel(),
265 lapic_start,
266 (ppnum_t) i386_btop(lapic_base),
267 VM_PROT_READ|VM_PROT_WRITE,
268 VM_WIMG_IO,
269 TRUE);
270 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
271
272 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
273 printf("Local APIC version not 0x14 as expected\n");
274 }
275
276 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
277 lapic_cpu_map_init();
278 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
279 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
280
281 lapic_init();
282
283 cpu_thread_init();
284
285 GPROF_INIT();
286 DBGLOG_CPU_INIT(master_cpu);
287
288 slave_boot_init();
289
290 smp_initialized = TRUE;
291
292 return;
293 }
294
295
296 static int
297 lapic_esr_read(void)
298 {
299 /* write-read register */
300 LAPIC_REG(ERROR_STATUS) = 0;
301 return LAPIC_REG(ERROR_STATUS);
302 }
303
304 static void
305 lapic_esr_clear(void)
306 {
307 LAPIC_REG(ERROR_STATUS) = 0;
308 LAPIC_REG(ERROR_STATUS) = 0;
309 }
310
311 static const char *DM[8] = {
312 "Fixed",
313 "Lowest Priority",
314 "Invalid",
315 "Invalid",
316 "NMI",
317 "Reset",
318 "Invalid",
319 "ExtINT"};
320
321 void
322 lapic_dump(void)
323 {
324 int i;
325
326 #define BOOL(a) ((a)?' ':'!')
327
328 kprintf("LAPIC %d at 0x%x version 0x%x\n",
329 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
330 lapic_start,
331 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
332 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
333 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
334 LAPIC_REG(APR)&LAPIC_APR_MASK,
335 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
336 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
337 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
338 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
339 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
340 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
341 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
342 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
343 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
344 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
345 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
346 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
347 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
348 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
349 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
350 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
351 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
352 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
353 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
354 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
355 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
356 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
357 LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_VECTOR_MASK,
358 DM[(LAPIC_REG(LVT_THERMAL)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
359 (LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
360 BOOL(LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_MASKED));
361 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
362 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
363 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
364 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
365 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
366 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
367 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
368 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
369 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
370 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
371 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
372 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
373 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
374 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
375 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
376 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
377 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
378 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
379 kprintf("ESR: %08x \n", lapic_esr_read());
380 kprintf(" ");
381 for(i=0xf; i>=0; i--)
382 kprintf("%x%x%x%x",i,i,i,i);
383 kprintf("\n");
384 kprintf("TMR: 0x");
385 for(i=7; i>=0; i--)
386 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
387 kprintf("\n");
388 kprintf("IRR: 0x");
389 for(i=7; i>=0; i--)
390 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
391 kprintf("\n");
392 kprintf("ISR: 0x");
393 for(i=7; i >= 0; i--)
394 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
395 kprintf("\n");
396 }
397
398 #if MACH_KDB
399 /*
400 * Displays apic junk
401 *
402 * da
403 */
404 void
405 db_apic(__unused db_expr_t addr,
406 __unused int have_addr,
407 __unused db_expr_t count,
408 __unused char *modif)
409 {
410
411 lapic_dump();
412
413 return;
414 }
415
416 #endif
417
418 boolean_t
419 lapic_probe(void)
420 {
421 uint32_t lo;
422 uint32_t hi;
423
424 if (cpuid_features() & CPUID_FEATURE_APIC)
425 return TRUE;
426
427 if (cpuid_family() == 6 || cpuid_family() == 15) {
428 /*
429 * Mobile Pentiums:
430 * There may be a local APIC which wasn't enabled by BIOS.
431 * So we try to enable it explicitly.
432 */
433 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
434 lo &= ~MSR_IA32_APIC_BASE_BASE;
435 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
436 lo |= MSR_IA32_APIC_BASE_ENABLE;
437 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
438
439 /*
440 * Re-initialize cpu features info and re-check.
441 */
442 cpuid_set_info();
443 if (cpuid_features() & CPUID_FEATURE_APIC) {
444 printf("Local APIC discovered and enabled\n");
445 lapic_os_enabled = TRUE;
446 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
447 return TRUE;
448 }
449 }
450
451 return FALSE;
452 }
453
454 void
455 lapic_shutdown(void)
456 {
457 uint32_t lo;
458 uint32_t hi;
459 uint32_t value;
460
461 /* Shutdown if local APIC was enabled by OS */
462 if (lapic_os_enabled == FALSE)
463 return;
464
465 mp_disable_preemption();
466
467 /* ExtINT: masked */
468 if (get_cpu_number() == master_cpu) {
469 value = LAPIC_REG(LVT_LINT0);
470 value |= LAPIC_LVT_MASKED;
471 LAPIC_REG(LVT_LINT0) = value;
472 }
473
474 /* Timer: masked */
475 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
476
477 /* Perfmon: masked */
478 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
479
480 /* Error: masked */
481 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
482
483 /* APIC software disabled */
484 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
485
486 /* Bypass the APIC completely and update cpu features */
487 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
488 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
489 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
490 cpuid_set_info();
491
492 mp_enable_preemption();
493 }
494
495 void
496 lapic_init(void)
497 {
498 int value;
499
500 /* Set flat delivery model, logical processor id */
501 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
502 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
503
504 /* Accept all */
505 LAPIC_REG(TPR) = 0;
506
507 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
508
509 /* ExtINT */
510 if (get_cpu_number() == master_cpu) {
511 value = LAPIC_REG(LVT_LINT0);
512 value &= ~LAPIC_LVT_MASKED;
513 value |= LAPIC_LVT_DM_EXTINT;
514 LAPIC_REG(LVT_LINT0) = value;
515 }
516
517 /* Timer: unmasked, one-shot */
518 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
519
520 /* Perfmon: unmasked */
521 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
522
523 /* Thermal: unmasked */
524 LAPIC_REG(LVT_THERMAL) = LAPIC_VECTOR(THERMAL);
525
526 lapic_esr_clear();
527
528 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
529 }
530
531 void
532 lapic_set_timer_func(i386_intr_func_t func)
533 {
534 lapic_timer_func = func;
535 }
536
537 void
538 lapic_set_timer(
539 boolean_t interrupt,
540 lapic_timer_mode_t mode,
541 lapic_timer_divide_t divisor,
542 lapic_timer_count_t initial_count)
543 {
544 boolean_t state;
545 uint32_t timer_vector;
546
547 state = ml_set_interrupts_enabled(FALSE);
548 timer_vector = LAPIC_REG(LVT_TIMER);
549 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
550 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
551 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
552 LAPIC_REG(LVT_TIMER) = timer_vector;
553 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
554 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
555 ml_set_interrupts_enabled(state);
556 }
557
558 void
559 lapic_get_timer(
560 lapic_timer_mode_t *mode,
561 lapic_timer_divide_t *divisor,
562 lapic_timer_count_t *initial_count,
563 lapic_timer_count_t *current_count)
564 {
565 boolean_t state;
566
567 state = ml_set_interrupts_enabled(FALSE);
568 if (mode)
569 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
570 periodic : one_shot;
571 if (divisor)
572 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
573 if (initial_count)
574 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
575 if (current_count)
576 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
577 ml_set_interrupts_enabled(state);
578 }
579
580 void
581 lapic_set_pmi_func(i386_intr_func_t func)
582 {
583 lapic_pmi_func = func;
584 }
585
586 void
587 lapic_set_thermal_func(i386_intr_func_t func)
588 {
589 lapic_thermal_func = func;
590 }
591
592 static inline void
593 _lapic_end_of_interrupt(void)
594 {
595 LAPIC_REG(EOI) = 0;
596 }
597
598 void
599 lapic_end_of_interrupt(void)
600 {
601 _lapic_end_of_interrupt();
602 }
603
604 int
605 lapic_interrupt(int interrupt, x86_saved_state_t *state)
606 {
607 int retval = 0;
608
609 /* Did we just field an interruption for the HPET comparator? */
610 if(current_cpu_datap()->cpu_pmHpetVec == ((uint32_t)interrupt - 0x40)) {
611 /* Yes, go handle it... */
612 retval = HPETInterrupt();
613 /* Was it really handled? */
614 if(retval) {
615 /* If so, EOI the 'rupt */
616 _lapic_end_of_interrupt();
617 /*
618 * and then leave,
619 * indicating that this has been handled
620 */
621 return 1;
622 }
623 }
624
625 interrupt -= lapic_interrupt_base;
626 if (interrupt < 0) {
627 if (interrupt == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base)) {
628 retval = NMIInterruptHandler(state);
629 _lapic_end_of_interrupt();
630 return retval;
631 }
632 else
633 return 0;
634 }
635
636 switch(interrupt) {
637 case LAPIC_PERFCNT_INTERRUPT:
638 if (lapic_pmi_func != NULL)
639 (*lapic_pmi_func)(NULL);
640 /* Clear interrupt masked */
641 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
642 _lapic_end_of_interrupt();
643 retval = 1;
644 break;
645 case LAPIC_TIMER_INTERRUPT:
646 _lapic_end_of_interrupt();
647 if (lapic_timer_func != NULL)
648 (*lapic_timer_func)(state);
649 retval = 1;
650 break;
651 case LAPIC_THERMAL_INTERRUPT:
652 if (lapic_thermal_func != NULL)
653 (*lapic_thermal_func)(NULL);
654 _lapic_end_of_interrupt();
655 retval = 1;
656 break;
657 case LAPIC_ERROR_INTERRUPT:
658 lapic_dump();
659 panic("Local APIC error\n");
660 _lapic_end_of_interrupt();
661 retval = 1;
662 break;
663 case LAPIC_SPURIOUS_INTERRUPT:
664 kprintf("SPIV\n");
665 /* No EOI required here */
666 retval = 1;
667 break;
668 case LAPIC_INTERPROCESSOR_INTERRUPT:
669 _lapic_end_of_interrupt();
670 cpu_signal_handler(state);
671 retval = 1;
672 break;
673 }
674
675 return retval;
676 }
677
678 void
679 lapic_smm_restore(void)
680 {
681 boolean_t state;
682
683 if (lapic_os_enabled == FALSE)
684 return;
685
686 state = ml_set_interrupts_enabled(FALSE);
687
688 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
689 /*
690 * Bogus SMI handler enables interrupts but does not know about
691 * local APIC interrupt sources. When APIC timer counts down to
692 * zero while in SMM, local APIC will end up waiting for an EOI
693 * but no interrupt was delivered to the OS.
694 */
695 _lapic_end_of_interrupt();
696
697 /*
698 * timer is one-shot, trigger another quick countdown to trigger
699 * another timer interrupt.
700 */
701 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
702 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
703 }
704
705 kprintf("lapic_smm_restore\n");
706 }
707
708 ml_set_interrupts_enabled(state);
709 }
710
711 kern_return_t
712 intel_startCPU(
713 int slot_num)
714 {
715
716 int i = 1000;
717 int lapic = cpu_to_lapic[slot_num];
718
719 assert(lapic != -1);
720
721 DBGLOG_CPU_INIT(slot_num);
722
723 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
724 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
725
726 /*
727 * Initialize (or re-initialize) the descriptor tables for this cpu.
728 * Propagate processor mode to slave.
729 */
730 if (cpu_mode_is64bit())
731 cpu_desc_init64(cpu_datap(slot_num), FALSE);
732 else
733 cpu_desc_init(cpu_datap(slot_num), FALSE);
734
735 /* Serialize use of the slave boot stack. */
736 mutex_lock(&mp_cpu_boot_lock);
737
738 mp_disable_preemption();
739 if (slot_num == get_cpu_number()) {
740 mp_enable_preemption();
741 mutex_unlock(&mp_cpu_boot_lock);
742 return KERN_SUCCESS;
743 }
744
745 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
746 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
747 delay(10000);
748
749 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
750 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
751 delay(200);
752
753 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
754 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
755 delay(200);
756
757 #ifdef POSTCODE_DELAY
758 /* Wait much longer if postcodes are displayed for a delay period. */
759 i *= 10000;
760 #endif
761 while(i-- > 0) {
762 if (cpu_datap(slot_num)->cpu_running)
763 break;
764 delay(10000);
765 }
766
767 mp_enable_preemption();
768 mutex_unlock(&mp_cpu_boot_lock);
769
770 if (!cpu_datap(slot_num)->cpu_running) {
771 kprintf("Failed to start CPU %02d\n", slot_num);
772 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
773 delay(1000000);
774 cpu_shutdown();
775 return KERN_SUCCESS;
776 } else {
777 kprintf("Started cpu %d (lapic id %p)\n", slot_num, lapic);
778 printf("Started CPU %02d\n", slot_num);
779 return KERN_SUCCESS;
780 }
781 }
782
783 extern char slave_boot_base[];
784 extern char slave_boot_end[];
785 extern void slave_pstart(void);
786
787 void
788 slave_boot_init(void)
789 {
790 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
791 slave_boot_base,
792 kvtophys((vm_offset_t) slave_boot_base),
793 MP_BOOT,
794 slave_boot_end-slave_boot_base);
795
796 /*
797 * Copy the boot entry code to the real-mode vector area MP_BOOT.
798 * This is in page 1 which has been reserved for this purpose by
799 * machine_startup() from the boot processor.
800 * The slave boot code is responsible for switching to protected
801 * mode and then jumping to the common startup, _start().
802 */
803 bcopy_phys(kvtophys((vm_offset_t) slave_boot_base),
804 (addr64_t) MP_BOOT,
805 slave_boot_end-slave_boot_base);
806
807 /*
808 * Zero a stack area above the boot code.
809 */
810 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
811 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
812
813 /*
814 * Set the location at the base of the stack to point to the
815 * common startup entry.
816 */
817 DBG("writing 0x%x at phys 0x%x\n",
818 kvtophys((vm_offset_t) &slave_pstart), MP_MACH_START+MP_BOOT);
819 ml_phys_write_word(MP_MACH_START+MP_BOOT,
820 (unsigned int)kvtophys((vm_offset_t) &slave_pstart));
821
822 /* Flush caches */
823 __asm__("wbinvd");
824 }
825
826 #if MP_DEBUG
827 cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
828 cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
829
830 MP_EVENT_NAME_DECL();
831
832 #endif /* MP_DEBUG */
833
834 void
835 cpu_signal_handler(x86_saved_state_t *regs)
836 {
837 int my_cpu;
838 volatile int *my_word;
839 #if MACH_KDB && MACH_ASSERT
840 int i=100;
841 #endif /* MACH_KDB && MACH_ASSERT */
842
843 mp_disable_preemption();
844
845 my_cpu = cpu_number();
846 my_word = &current_cpu_datap()->cpu_signals;
847
848 do {
849 #if MACH_KDB && MACH_ASSERT
850 if (i-- <= 0)
851 Debugger("cpu_signal_handler: signals did not clear");
852 #endif /* MACH_KDB && MACH_ASSERT */
853 #if MACH_KDP
854 if (i_bit(MP_KDP, my_word)) {
855 DBGLOG(cpu_handle,my_cpu,MP_KDP);
856 i_bit_clear(MP_KDP, my_word);
857 /* Ensure that the i386_kernel_state at the base of the
858 * current thread's stack (if any) is synchronized with the
859 * context at the moment of the interrupt, to facilitate
860 * access through the debugger.
861 * XXX 64-bit state?
862 */
863 sync_iss_to_iks(saved_state32(regs));
864 mp_kdp_wait();
865 } else
866 #endif /* MACH_KDP */
867 if (i_bit(MP_TLB_FLUSH, my_word)) {
868 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
869 i_bit_clear(MP_TLB_FLUSH, my_word);
870 pmap_update_interrupt();
871 } else if (i_bit(MP_AST, my_word)) {
872 DBGLOG(cpu_handle,my_cpu,MP_AST);
873 i_bit_clear(MP_AST, my_word);
874 ast_check(cpu_to_processor(my_cpu));
875 #if MACH_KDB
876 } else if (i_bit(MP_KDB, my_word)) {
877
878 i_bit_clear(MP_KDB, my_word);
879 current_cpu_datap()->cpu_kdb_is_slave++;
880 mp_kdb_wait();
881 current_cpu_datap()->cpu_kdb_is_slave--;
882 #endif /* MACH_KDB */
883 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
884 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
885 i_bit_clear(MP_RENDEZVOUS, my_word);
886 mp_rendezvous_action();
887 } else if (i_bit(MP_CHUD, my_word)) {
888 DBGLOG(cpu_handle,my_cpu,MP_CHUD);
889 i_bit_clear(MP_CHUD, my_word);
890 chudxnu_cpu_signal_handler();
891 }
892 } while (*my_word);
893
894 mp_enable_preemption();
895
896 }
897
898
899 /* We want this to show up in backtraces, so mark it noinline
900 */
901 static int __attribute__((noinline))
902 NMIInterruptHandler(void *regs)
903 {
904 boolean_t state = ml_set_interrupts_enabled(FALSE);
905 sync_iss_to_iks_unconditionally(regs);
906 mp_kdp_wait();
907 (void) ml_set_interrupts_enabled(state);
908 return 1;
909 }
910
911 #ifdef MP_DEBUG
912 extern int max_lock_loops;
913 #endif /* MP_DEBUG */
914
915 int trappedalready = 0; /* (BRINGUP */
916
917 void
918 cpu_interrupt(int cpu)
919 {
920 boolean_t state;
921
922 if(cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */
923 kprintf("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu);
924 }
925
926 if (smp_initialized) {
927
928 #if MACH_KDB
929 // if(!trappedalready && (cpu_datap(cpu)->cpu_signals & 6)) { /* (BRINGUP) */
930 // if(kdb_cpu != cpu_number()) {
931 // trappedalready = 1;
932 // panic("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n",
933 // cpu_datap(cpu)->cpu_signals, cpu, kdb_cpu);
934 // }
935 // }
936 #endif
937
938 /* Wait for previous interrupt to be delivered... */
939 #ifdef MP_DEBUG
940 int pending_busy_count = 0;
941 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
942 if (++pending_busy_count > max_lock_loops)
943 panic("cpus_interrupt() deadlock\n");
944 #else
945 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
946 #endif /* MP_DEBUG */
947 cpu_pause();
948 }
949
950 state = ml_set_interrupts_enabled(FALSE);
951 LAPIC_REG(ICRD) =
952 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
953 LAPIC_REG(ICR) =
954 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
955 (void) ml_set_interrupts_enabled(state);
956 }
957
958 }
959
960 /*
961 * Send a true NMI via the local APIC to the specified CPU.
962 */
963 static void
964 cpu_NMI_interrupt(int cpu)
965 {
966 boolean_t state;
967
968 if (smp_initialized) {
969 state = ml_set_interrupts_enabled(FALSE);
970 LAPIC_REG(ICRD) =
971 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
972 /* The vector is ignored in this case, the other CPU will come in on the
973 * NMI vector.
974 */
975 LAPIC_REG(ICR) =
976 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI;
977 (void) ml_set_interrupts_enabled(state);
978 }
979
980 }
981
982 void
983 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
984 {
985 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
986 uint64_t tsc_timeout;
987
988
989 if (!cpu_datap(cpu)->cpu_running)
990 return;
991
992 if (event == MP_TLB_FLUSH)
993 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START, cpu, 0, 0, 0, 0);
994
995 DBGLOG(cpu_signal, cpu, event);
996
997 i_bit_set(event, signals);
998 cpu_interrupt(cpu);
999 if (mode == SYNC) {
1000 again:
1001 tsc_timeout = rdtsc64() + (1000*1000*1000);
1002 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
1003 cpu_pause();
1004 }
1005 if (i_bit(event, signals)) {
1006 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
1007 cpu, event);
1008 goto again;
1009 }
1010 }
1011 if (event == MP_TLB_FLUSH)
1012 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END, cpu, 0, 0, 0, 0);
1013 }
1014
1015 void
1016 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
1017 {
1018 unsigned int cpu;
1019 unsigned int my_cpu = cpu_number();
1020
1021 for (cpu = 0; cpu < real_ncpus; cpu++) {
1022 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1023 continue;
1024 i386_signal_cpu(cpu, event, mode);
1025 }
1026 }
1027
1028 int
1029 i386_active_cpus(void)
1030 {
1031 unsigned int cpu;
1032 unsigned int ncpus = 0;
1033
1034 for (cpu = 0; cpu < real_ncpus; cpu++) {
1035 if (cpu_datap(cpu)->cpu_running)
1036 ncpus++;
1037 }
1038 return(ncpus);
1039 }
1040
1041 /*
1042 * All-CPU rendezvous:
1043 * - CPUs are signalled,
1044 * - all execute the setup function (if specified),
1045 * - rendezvous (i.e. all cpus reach a barrier),
1046 * - all execute the action function (if specified),
1047 * - rendezvous again,
1048 * - execute the teardown function (if specified), and then
1049 * - resume.
1050 *
1051 * Note that the supplied external functions _must_ be reentrant and aware
1052 * that they are running in parallel and in an unknown lock context.
1053 */
1054
1055 static void
1056 mp_rendezvous_action(void)
1057 {
1058
1059 /* setup function */
1060 if (mp_rv_setup_func != NULL)
1061 mp_rv_setup_func(mp_rv_func_arg);
1062 /* spin on entry rendezvous */
1063 atomic_incl(&mp_rv_waiters[0], 1);
1064 while (mp_rv_waiters[0] < mp_rv_ncpus) {
1065 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1066 /* poll for pesky tlb flushes */
1067 handle_pending_TLB_flushes();
1068 ml_set_interrupts_enabled(intr);
1069 cpu_pause();
1070 }
1071 /* action function */
1072 if (mp_rv_action_func != NULL)
1073 mp_rv_action_func(mp_rv_func_arg);
1074 /* spin on exit rendezvous */
1075 atomic_incl(&mp_rv_waiters[1], 1);
1076 while (mp_rv_waiters[1] < mp_rv_ncpus)
1077 cpu_pause();
1078 /* teardown function */
1079 if (mp_rv_teardown_func != NULL)
1080 mp_rv_teardown_func(mp_rv_func_arg);
1081 }
1082
1083 void
1084 mp_rendezvous(void (*setup_func)(void *),
1085 void (*action_func)(void *),
1086 void (*teardown_func)(void *),
1087 void *arg)
1088 {
1089
1090 if (!smp_initialized) {
1091 if (setup_func != NULL)
1092 setup_func(arg);
1093 if (action_func != NULL)
1094 action_func(arg);
1095 if (teardown_func != NULL)
1096 teardown_func(arg);
1097 return;
1098 }
1099
1100 /* obtain rendezvous lock */
1101 simple_lock(&mp_rv_lock);
1102
1103 /* set static function pointers */
1104 mp_rv_setup_func = setup_func;
1105 mp_rv_action_func = action_func;
1106 mp_rv_teardown_func = teardown_func;
1107 mp_rv_func_arg = arg;
1108
1109 mp_rv_waiters[0] = 0; /* entry rendezvous count */
1110 mp_rv_waiters[1] = 0; /* exit rendezvous count */
1111 mp_rv_ncpus = i386_active_cpus();
1112
1113 /*
1114 * signal other processors, which will call mp_rendezvous_action()
1115 * with interrupts disabled
1116 */
1117 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
1118
1119 /* call executor function on this cpu */
1120 mp_rendezvous_action();
1121
1122 /* release lock */
1123 simple_unlock(&mp_rv_lock);
1124 }
1125
1126 void
1127 mp_rendezvous_break_lock(void)
1128 {
1129 simple_lock_init(&mp_rv_lock, 0);
1130 }
1131
1132 static void
1133 setup_disable_intrs(__unused void * param_not_used)
1134 {
1135 /* disable interrupts before the first barrier */
1136 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1137
1138 current_cpu_datap()->cpu_iflag = intr;
1139 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1140 }
1141
1142 static void
1143 teardown_restore_intrs(__unused void * param_not_used)
1144 {
1145 /* restore interrupt flag following MTRR changes */
1146 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
1147 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1148 }
1149
1150 /*
1151 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
1152 * This is exported for use by kexts.
1153 */
1154 void
1155 mp_rendezvous_no_intrs(
1156 void (*action_func)(void *),
1157 void *arg)
1158 {
1159 mp_rendezvous(setup_disable_intrs,
1160 action_func,
1161 teardown_restore_intrs,
1162 arg);
1163 }
1164
1165 void
1166 handle_pending_TLB_flushes(void)
1167 {
1168 volatile int *my_word = &current_cpu_datap()->cpu_signals;
1169
1170 if (i_bit(MP_TLB_FLUSH, my_word)) {
1171 DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
1172 i_bit_clear(MP_TLB_FLUSH, my_word);
1173 pmap_update_interrupt();
1174 }
1175 }
1176
1177
1178 #if MACH_KDP
1179 volatile boolean_t mp_kdp_trap = FALSE;
1180 volatile long mp_kdp_ncpus;
1181 boolean_t mp_kdp_state;
1182
1183
1184 void
1185 mp_kdp_enter(void)
1186 {
1187 unsigned int cpu;
1188 unsigned int ncpus;
1189 unsigned int my_cpu = cpu_number();
1190 uint64_t tsc_timeout;
1191
1192 DBG("mp_kdp_enter()\n");
1193
1194 /*
1195 * Here to enter the debugger.
1196 * In case of races, only one cpu is allowed to enter kdp after
1197 * stopping others.
1198 */
1199 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
1200 simple_lock(&mp_kdp_lock);
1201
1202 while (mp_kdp_trap) {
1203 simple_unlock(&mp_kdp_lock);
1204 DBG("mp_kdp_enter() race lost\n");
1205 mp_kdp_wait();
1206 simple_lock(&mp_kdp_lock);
1207 }
1208 mp_kdp_ncpus = 1; /* self */
1209 mp_kdp_trap = TRUE;
1210 simple_unlock(&mp_kdp_lock);
1211
1212 /*
1213 * Deliver a nudge to other cpus, counting how many
1214 */
1215 DBG("mp_kdp_enter() signaling other processors\n");
1216 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1217 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1218 continue;
1219 ncpus++;
1220 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1221 }
1222 /*
1223 * Wait other processors to synchronize
1224 */
1225 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
1226
1227 tsc_timeout = rdtsc64() + (ncpus * 100 * 1000 * 1000);
1228
1229 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1230 /*
1231 * A TLB shootdown request may be pending... this would
1232 * result in the requesting processor waiting in
1233 * PMAP_UPDATE_TLBS() until this processor deals with it.
1234 * Process it, so it can now enter mp_kdp_wait()
1235 */
1236 handle_pending_TLB_flushes();
1237 cpu_pause();
1238 }
1239 /* If we've timed out, and some processor(s) are still unresponsive,
1240 * interrupt them with an NMI via the local APIC.
1241 */
1242 if (mp_kdp_ncpus != ncpus) {
1243 for (cpu = 0; cpu < real_ncpus; cpu++) {
1244 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1245 continue;
1246 if (cpu_signal_pending(cpu, MP_KDP))
1247 cpu_NMI_interrupt(cpu);
1248 }
1249 }
1250
1251 DBG("mp_kdp_enter() %d processors done %s\n",
1252 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
1253
1254 postcode(MP_KDP_ENTER);
1255 }
1256
1257 static boolean_t
1258 cpu_signal_pending(int cpu, mp_event_t event)
1259 {
1260 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
1261 boolean_t retval = FALSE;
1262
1263 if (i_bit(event, signals))
1264 retval = TRUE;
1265 return retval;
1266 }
1267
1268 static void
1269 mp_kdp_wait(void)
1270 {
1271 DBG("mp_kdp_wait()\n");
1272 atomic_incl(&mp_kdp_ncpus, 1);
1273 while (mp_kdp_trap) {
1274 /*
1275 * a TLB shootdown request may be pending... this would result in the requesting
1276 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1277 * Process it, so it can now enter mp_kdp_wait()
1278 */
1279 handle_pending_TLB_flushes();
1280
1281 cpu_pause();
1282 }
1283 atomic_decl(&mp_kdp_ncpus, 1);
1284 DBG("mp_kdp_wait() done\n");
1285 }
1286
1287 void
1288 mp_kdp_exit(void)
1289 {
1290 DBG("mp_kdp_exit()\n");
1291 atomic_decl(&mp_kdp_ncpus, 1);
1292 mp_kdp_trap = FALSE;
1293 __asm__ volatile("mfence");
1294
1295 /* Wait other processors to stop spinning. XXX needs timeout */
1296 DBG("mp_kdp_exit() waiting for processors to resume\n");
1297 while (mp_kdp_ncpus > 0) {
1298 /*
1299 * a TLB shootdown request may be pending... this would result in the requesting
1300 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1301 * Process it, so it can now enter mp_kdp_wait()
1302 */
1303 handle_pending_TLB_flushes();
1304
1305 cpu_pause();
1306 }
1307 DBG("mp_kdp_exit() done\n");
1308 (void) ml_set_interrupts_enabled(mp_kdp_state);
1309 postcode(0);
1310 }
1311 #endif /* MACH_KDP */
1312
1313 /*ARGSUSED*/
1314 void
1315 init_ast_check(
1316 __unused processor_t processor)
1317 {
1318 }
1319
1320 void
1321 cause_ast_check(
1322 processor_t processor)
1323 {
1324 int cpu = PROCESSOR_DATA(processor, slot_num);
1325
1326 if (cpu != cpu_number()) {
1327 i386_signal_cpu(cpu, MP_AST, ASYNC);
1328 }
1329 }
1330
1331 #if MACH_KDB
1332 /*
1333 * invoke kdb on slave processors
1334 */
1335
1336 void
1337 remote_kdb(void)
1338 {
1339 unsigned int my_cpu = cpu_number();
1340 unsigned int cpu;
1341 int kdb_ncpus;
1342 uint64_t tsc_timeout = 0;
1343
1344 mp_kdb_trap = TRUE;
1345 mp_kdb_ncpus = 1;
1346 for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1347 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1348 continue;
1349 kdb_ncpus++;
1350 i386_signal_cpu(cpu, MP_KDB, ASYNC);
1351 }
1352 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
1353
1354 tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
1355
1356 while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
1357 /*
1358 * a TLB shootdown request may be pending... this would result in the requesting
1359 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1360 * Process it, so it can now enter mp_kdp_wait()
1361 */
1362 handle_pending_TLB_flushes();
1363
1364 cpu_pause();
1365 }
1366 DBG("mp_kdp_enter() %d processors done %s\n",
1367 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
1368 }
1369
1370 static void
1371 mp_kdb_wait(void)
1372 {
1373 DBG("mp_kdb_wait()\n");
1374 atomic_incl(&mp_kdb_ncpus, 1);
1375 while (mp_kdb_trap) {
1376 /*
1377 * a TLB shootdown request may be pending... this would result in the requesting
1378 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1379 * Process it, so it can now enter mp_kdp_wait()
1380 */
1381 handle_pending_TLB_flushes();
1382
1383 cpu_pause();
1384 }
1385 atomic_decl(&mp_kdb_ncpus, 1);
1386 DBG("mp_kdb_wait() done\n");
1387 }
1388
1389 /*
1390 * Clear kdb interrupt
1391 */
1392
1393 void
1394 clear_kdb_intr(void)
1395 {
1396 mp_disable_preemption();
1397 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
1398 mp_enable_preemption();
1399 }
1400
1401 void
1402 mp_kdb_exit(void)
1403 {
1404 DBG("mp_kdb_exit()\n");
1405 atomic_decl(&mp_kdb_ncpus, 1);
1406 mp_kdb_trap = FALSE;
1407 __asm__ volatile("mfence");
1408
1409 while (mp_kdb_ncpus > 0) {
1410 /*
1411 * a TLB shootdown request may be pending... this would result in the requesting
1412 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1413 * Process it, so it can now enter mp_kdp_wait()
1414 */
1415 handle_pending_TLB_flushes();
1416
1417 cpu_pause();
1418 }
1419 DBG("mp_kdb_exit() done\n");
1420 }
1421
1422 #endif /* MACH_KDB */
1423
1424 /*
1425 * i386_init_slave() is called from pstart.
1426 * We're in the cpu's interrupt stack with interrupts disabled.
1427 * At this point we are in legacy mode. We need to switch on IA32e
1428 * if the mode is set to 64-bits.
1429 */
1430 void
1431 i386_init_slave(void)
1432 {
1433 postcode(I386_INIT_SLAVE);
1434
1435 /* Ensure that caching and write-through are enabled */
1436 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1437
1438 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1439 get_cpu_number(), get_cpu_phys_number());
1440
1441 assert(!ml_get_interrupts_enabled());
1442 if (cpu_mode_is64bit()) {
1443 cpu_IA32e_enable(current_cpu_datap());
1444 cpu_desc_load64(current_cpu_datap());
1445 fast_syscall_init64();
1446 } else {
1447 fast_syscall_init();
1448 }
1449
1450 lapic_init();
1451
1452 LAPIC_DUMP();
1453 LAPIC_CPU_MAP_DUMP();
1454
1455 init_fpu();
1456
1457 mtrr_update_cpu();
1458
1459 pat_init();
1460
1461 cpu_thread_init();
1462
1463 cpu_init(); /* Sets cpu_running which starter cpu waits for */
1464
1465 slave_main();
1466
1467 panic("i386_init_slave() returned from slave_main()");
1468 }
1469
1470 void
1471 slave_machine_init(void)
1472 {
1473 /*
1474 * Here in process context, but with interrupts disabled.
1475 */
1476 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1477
1478 clock_init();
1479
1480 cpu_machine_init(); /* Interrupts enabled hereafter */
1481 }
1482
1483 #undef cpu_number()
1484 int cpu_number(void)
1485 {
1486 return get_cpu_number();
1487 }
1488
1489 #if MACH_KDB
1490 #include <ddb/db_output.h>
1491
1492 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1493
1494
1495 #if TRAP_DEBUG
1496 #define MTRAPS 100
1497 struct mp_trap_hist_struct {
1498 unsigned char type;
1499 unsigned char data[5];
1500 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1501 *max_trap_hist = &trap_hist[MTRAPS];
1502
1503 void db_trap_hist(void);
1504
1505 /*
1506 * SPL:
1507 * 1: new spl
1508 * 2: old spl
1509 * 3: new tpr
1510 * 4: old tpr
1511 * INT:
1512 * 1: int vec
1513 * 2: old spl
1514 * 3: new spl
1515 * 4: post eoi tpr
1516 * 5: exit tpr
1517 */
1518
1519 void
1520 db_trap_hist(void)
1521 {
1522 int i,j;
1523 for(i=0;i<MTRAPS;i++)
1524 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1525 db_printf("%s%s",
1526 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1527 (trap_hist[i].type == 1)?"SPL":"INT");
1528 for(j=0;j<5;j++)
1529 db_printf(" %02x", trap_hist[i].data[j]);
1530 db_printf("\n");
1531 }
1532
1533 }
1534 #endif /* TRAP_DEBUG */
1535 #endif /* MACH_KDB */
1536