]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <cpus.h>
27 #include <mach_rt.h>
28 #include <mach_kdb.h>
29 #include <mach_kdp.h>
30 #include <mach_ldebug.h>
31
32 #include <i386/mp.h>
33 #include <i386/mp_events.h>
34 #include <i386/mp_slave_boot.h>
35 #include <i386/apic.h>
36 #include <i386/ipl.h>
37 #include <i386/fpu.h>
38 #include <i386/pio.h>
39 #include <i386/cpuid.h>
40 #include <i386/proc_reg.h>
41 #include <i386/machine_cpu.h>
42 #include <i386/misc_protos.h>
43 #include <vm/vm_kern.h>
44 #include <mach/mach_types.h>
45 #include <mach/kern_return.h>
46 #include <kern/startup.h>
47 #include <kern/processor.h>
48 #include <kern/cpu_number.h>
49 #include <kern/cpu_data.h>
50 #include <kern/assert.h>
51
52 #if MP_DEBUG
53 #define PAUSE delay(1000000)
54 #define DBG(x...) kprintf(x)
55 #else
56 #define DBG(x...)
57 #define PAUSE
58 #endif /* MP_DEBUG */
59
60 /* Initialize lapic_id so cpu_number() works on non SMP systems */
61 unsigned long lapic_id_initdata = 0;
62 unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
63 vm_offset_t lapic_start;
64
65 void lapic_init(void);
66 void slave_boot_init(void);
67
68 static void mp_kdp_wait(void);
69 static void mp_rendezvous_action(void);
70
71 boolean_t smp_initialized = FALSE;
72
73 decl_simple_lock_data(,mp_kdp_lock);
74 decl_simple_lock_data(,mp_putc_lock);
75
76 /* Variables needed for MP rendezvous. */
77 static void (*mp_rv_setup_func)(void *arg);
78 static void (*mp_rv_action_func)(void *arg);
79 static void (*mp_rv_teardown_func)(void *arg);
80 static void *mp_rv_func_arg;
81 static int mp_rv_ncpus;
82 static volatile long mp_rv_waiters[2];
83 decl_simple_lock_data(,mp_rv_lock);
84
85 int lapic_to_cpu[LAPIC_ID_MAX+1];
86 int cpu_to_lapic[NCPUS];
87
88 static void
89 lapic_cpu_map_init(void)
90 {
91 int i;
92
93 for (i = 0; i < NCPUS; i++)
94 cpu_to_lapic[i] = -1;
95 for (i = 0; i <= LAPIC_ID_MAX; i++)
96 lapic_to_cpu[i] = -1;
97 }
98
99 void
100 lapic_cpu_map(int apic_id, int cpu_number)
101 {
102 cpu_to_lapic[cpu_number] = apic_id;
103 lapic_to_cpu[apic_id] = cpu_number;
104 }
105
106 #ifdef MP_DEBUG
107 static void
108 lapic_cpu_map_dump(void)
109 {
110 int i;
111
112 for (i = 0; i < NCPUS; i++) {
113 if (cpu_to_lapic[i] == -1)
114 continue;
115 kprintf("cpu_to_lapic[%d]: %d\n",
116 i, cpu_to_lapic[i]);
117 }
118 for (i = 0; i <= LAPIC_ID_MAX; i++) {
119 if (lapic_to_cpu[i] == -1)
120 continue;
121 kprintf("lapic_to_cpu[%d]: %d\n",
122 i, lapic_to_cpu[i]);
123 }
124 }
125 #endif /* MP_DEBUG */
126
127 #define LAPIC_REG(reg) \
128 (*((volatile int *)(lapic_start + LAPIC_##reg)))
129 #define LAPIC_REG_OFFSET(reg,off) \
130 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
131
132
133 void
134 smp_init(void)
135
136 {
137 int result;
138 vm_map_entry_t entry;
139 uint32_t lo;
140 uint32_t hi;
141 boolean_t is_boot_processor;
142 boolean_t is_lapic_enabled;
143
144 /* Local APIC? */
145 if ((cpuid_features() & CPUID_FEATURE_APIC) == 0)
146 return;
147
148 simple_lock_init(&mp_kdp_lock, ETAP_MISC_PRINTF);
149 simple_lock_init(&mp_rv_lock, ETAP_MISC_PRINTF);
150 simple_lock_init(&mp_putc_lock, ETAP_MISC_PRINTF);
151
152 /* Examine the local APIC state */
153 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
154 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
155 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
156 DBG("MSR_IA32_APIC_BASE 0x%x:0x%x %s %s\n", hi, lo,
157 is_lapic_enabled ? "enabled" : "disabled",
158 is_boot_processor ? "BSP" : "AP");
159 assert(is_boot_processor);
160 assert(is_lapic_enabled);
161
162 /* Establish a map to the local apic */
163 lapic_start = vm_map_min(kernel_map);
164 result = vm_map_find_space(kernel_map, &lapic_start,
165 round_page(LAPIC_SIZE), 0, &entry);
166 if (result != KERN_SUCCESS) {
167 printf("smp_init: vm_map_find_entry FAILED (err=%d). "
168 "Only supporting ONE cpu.\n", result);
169 return;
170 }
171 vm_map_unlock(kernel_map);
172 pmap_enter(pmap_kernel(),
173 lapic_start,
174 (ppnum_t) i386_btop(i386_trunc_page(LAPIC_START)),
175 VM_PROT_READ|VM_PROT_WRITE,
176 VM_WIMG_USE_DEFAULT,
177 TRUE);
178 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
179
180 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
181 lapic_cpu_map_init();
182 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
183
184 lapic_init();
185
186 slave_boot_init();
187 master_up();
188
189 smp_initialized = TRUE;
190
191 return;
192 }
193
194
195 int
196 lapic_esr_read(void)
197 {
198 /* write-read register */
199 LAPIC_REG(ERROR_STATUS) = 0;
200 return LAPIC_REG(ERROR_STATUS);
201 }
202
203 void
204 lapic_esr_clear(void)
205 {
206 LAPIC_REG(ERROR_STATUS) = 0;
207 LAPIC_REG(ERROR_STATUS) = 0;
208 }
209
210 static char *DM[8] = {
211 "Fixed",
212 "Lowest Priority",
213 "Invalid",
214 "Invalid",
215 "NMI",
216 "Reset",
217 "Invalid",
218 "ExtINT"};
219
220 void
221 lapic_dump(void)
222 {
223 int i;
224 char buf[128];
225
226 #define BOOL(a) ((a)?' ':'!')
227
228 kprintf("LAPIC %d at 0x%x version 0x%x\n",
229 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
230 lapic_start,
231 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
232 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
233 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
234 LAPIC_REG(APR)&LAPIC_APR_MASK,
235 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
236 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
237 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
238 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
239 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
240 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
241 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
242 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
243 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
244 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
245 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
246 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
247 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
248 kprintf("LVT_PERFCNT: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
249 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
250 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
251 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
252 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
253 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
254 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
255 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
256 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
257 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
258 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
259 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
260 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
261 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
262 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
263 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
264 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
265 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
266 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
267 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
268 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
269 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
270 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
271 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
272 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
273 kprintf("ESR: %08x \n", lapic_esr_read());
274 kprintf(" ");
275 for(i=0xf; i>=0; i--)
276 kprintf("%x%x%x%x",i,i,i,i);
277 kprintf("\n");
278 kprintf("TMR: 0x");
279 for(i=7; i>=0; i--)
280 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
281 kprintf("\n");
282 kprintf("IRR: 0x");
283 for(i=7; i>=0; i--)
284 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
285 kprintf("\n");
286 kprintf("ISR: 0x");
287 for(i=7; i >= 0; i--)
288 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
289 kprintf("\n");
290 }
291
292 void
293 lapic_init(void)
294 {
295 int value;
296
297 mp_disable_preemption();
298
299 /* Set flat delivery model, logical processor id */
300 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
301 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
302
303 /* Accept all */
304 LAPIC_REG(TPR) = 0;
305
306 LAPIC_REG(SVR) = SPURIOUS_INTERRUPT | LAPIC_SVR_ENABLE;
307
308 /* ExtINT */
309 if (get_cpu_number() == master_cpu) {
310 value = LAPIC_REG(LVT_LINT0);
311 value |= LAPIC_LVT_DM_EXTINT;
312 LAPIC_REG(LVT_LINT0) = value;
313 }
314
315 lapic_esr_clear();
316
317 LAPIC_REG(LVT_ERROR) = APIC_ERROR_INTERRUPT;
318
319 mp_enable_preemption();
320 }
321
322
323 void
324 lapic_end_of_interrupt(void)
325 {
326 LAPIC_REG(EOI) = 0;
327 }
328
329 void
330 lapic_interrupt(int interrupt, void *state)
331 {
332
333 switch(interrupt) {
334 case APIC_ERROR_INTERRUPT:
335 panic("Local APIC error\n");
336 break;
337 case SPURIOUS_INTERRUPT:
338 kprintf("SPIV\n");
339 break;
340 case INTERPROCESS_INTERRUPT:
341 cpu_signal_handler((struct i386_interrupt_state *) state);
342 break;
343 }
344 lapic_end_of_interrupt();
345 }
346
347 kern_return_t
348 intel_startCPU(
349 int slot_num)
350 {
351
352 int i = 1000;
353 int lapic_id = cpu_to_lapic[slot_num];
354
355 if (slot_num == get_cpu_number())
356 return KERN_SUCCESS;
357
358 assert(lapic_id != -1);
359
360 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic_id);
361
362 mp_disable_preemption();
363
364 LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT;
365 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
366 delay(10000);
367
368 LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT;
369 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
370 delay(200);
371
372 while(i-- > 0) {
373 delay(10000);
374 if (machine_slot[slot_num].running)
375 break;
376 }
377
378 mp_enable_preemption();
379
380 if (!machine_slot[slot_num].running) {
381 DBG("Failed to start CPU %02d\n", slot_num);
382 printf("Failed to start CPU %02d\n", slot_num);
383 return KERN_SUCCESS;
384 } else {
385 DBG("Started CPU %02d\n", slot_num);
386 printf("Started CPU %02d\n", slot_num);
387 return KERN_SUCCESS;
388 }
389 }
390
391 void
392 slave_boot_init(void)
393 {
394 extern char slave_boot_base[];
395 extern char slave_boot_end[];
396 extern void pstart(void);
397
398 DBG("slave_base=%p slave_end=%p MP_BOOT P=%p V=%p\n",
399 slave_boot_base, slave_boot_end, MP_BOOT, phystokv(MP_BOOT));
400
401 /*
402 * Copy the boot entry code to the real-mode vector area MP_BOOT.
403 * This is in page 1 which has been reserved for this purpose by
404 * machine_startup() from the boot processor.
405 * The slave boot code is responsible for switching to protected
406 * mode and then jumping to the common startup, pstart().
407 */
408 bcopy(slave_boot_base,
409 (char *)phystokv(MP_BOOT),
410 slave_boot_end-slave_boot_base);
411
412 /*
413 * Zero a stack area above the boot code.
414 */
415 bzero((char *)(phystokv(MP_BOOTSTACK+MP_BOOT)-0x400), 0x400);
416
417 /*
418 * Set the location at the base of the stack to point to the
419 * common startup entry.
420 */
421 *((vm_offset_t *) phystokv(MP_MACH_START+MP_BOOT)) =
422 kvtophys((vm_offset_t)&pstart);
423
424 /* Flush caches */
425 __asm__("wbinvd");
426 }
427
428 #if MP_DEBUG
429 cpu_signal_event_log_t cpu_signal[NCPUS] = { 0, 0, 0 };
430 cpu_signal_event_log_t cpu_handle[NCPUS] = { 0, 0, 0 };
431
432 MP_EVENT_NAME_DECL();
433
434 void
435 cpu_signal_dump_last(int cpu)
436 {
437 cpu_signal_event_log_t *logp = &cpu_signal[cpu];
438 int last;
439 cpu_signal_event_t *eventp;
440
441 last = (logp->next_entry == 0) ?
442 LOG_NENTRIES - 1 : logp->next_entry - 1;
443
444 eventp = &logp->entry[last];
445
446 kprintf("cpu%d: tsc=%lld cpu_signal(%d,%s)\n",
447 cpu, eventp->time, eventp->cpu, mp_event_name[eventp->event]);
448 }
449
450 void
451 cpu_handle_dump_last(int cpu)
452 {
453 cpu_signal_event_log_t *logp = &cpu_handle[cpu];
454 int last;
455 cpu_signal_event_t *eventp;
456
457 last = (logp->next_entry == 0) ?
458 LOG_NENTRIES - 1 : logp->next_entry - 1;
459
460 eventp = &logp->entry[last];
461
462 kprintf("cpu%d: tsc=%lld cpu_signal_handle%s\n",
463 cpu, eventp->time, mp_event_name[eventp->event]);
464 }
465 #endif /* MP_DEBUG */
466
467 void
468 cpu_signal_handler(struct i386_interrupt_state *regs)
469 {
470 register my_cpu;
471 volatile int *my_word;
472 #if MACH_KDB && MACH_ASSERT
473 int i=100;
474 #endif /* MACH_KDB && MACH_ASSERT */
475
476 mp_disable_preemption();
477
478 my_cpu = cpu_number();
479 my_word = &cpu_data[my_cpu].cpu_signals;
480
481 do {
482 #if MACH_KDB && MACH_ASSERT
483 if (i-- <= 0)
484 Debugger("cpu_signal_handler");
485 #endif /* MACH_KDB && MACH_ASSERT */
486 #if MACH_KDP
487 if (i_bit(MP_KDP, my_word)) {
488 DBGLOG(cpu_handle,my_cpu,MP_KDP);
489 i_bit_clear(MP_KDP, my_word);
490 mp_kdp_wait();
491 } else
492 #endif /* MACH_KDP */
493 if (i_bit(MP_CLOCK, my_word)) {
494 DBGLOG(cpu_handle,my_cpu,MP_CLOCK);
495 i_bit_clear(MP_CLOCK, my_word);
496 hardclock(regs);
497 } else if (i_bit(MP_TLB_FLUSH, my_word)) {
498 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
499 i_bit_clear(MP_TLB_FLUSH, my_word);
500 pmap_update_interrupt();
501 } else if (i_bit(MP_AST, my_word)) {
502 DBGLOG(cpu_handle,my_cpu,MP_AST);
503 i_bit_clear(MP_AST, my_word);
504 ast_check(cpu_to_processor(my_cpu));
505 #if MACH_KDB
506 } else if (i_bit(MP_KDB, my_word)) {
507 extern kdb_is_slave[];
508
509 i_bit_clear(MP_KDB, my_word);
510 kdb_is_slave[my_cpu]++;
511 kdb_kintr();
512 #endif /* MACH_KDB */
513 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
514 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
515 i_bit_clear(MP_RENDEZVOUS, my_word);
516 mp_rendezvous_action();
517 }
518 } while (*my_word);
519
520 mp_enable_preemption();
521
522 }
523
524 void
525 cpu_interrupt(int cpu)
526 {
527 boolean_t state;
528
529 if (smp_initialized) {
530
531 /* Wait for previous interrupt to be delivered... */
532 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING)
533 cpu_pause();
534
535 state = ml_set_interrupts_enabled(FALSE);
536 LAPIC_REG(ICRD) =
537 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
538 LAPIC_REG(ICR) =
539 INTERPROCESS_INTERRUPT | LAPIC_ICR_DM_FIXED;
540 (void) ml_set_interrupts_enabled(state);
541 }
542
543 }
544
545 void
546 slave_clock(void)
547 {
548 int cpu;
549
550 /*
551 * Clock interrupts are chained from the boot processor
552 * to the next logical processor that is running and from
553 * there on to any further running processor etc.
554 */
555 mp_disable_preemption();
556 for (cpu=cpu_number()+1; cpu<NCPUS; cpu++)
557 if (machine_slot[cpu].running) {
558 i386_signal_cpu(cpu, MP_CLOCK, ASYNC);
559 mp_enable_preemption();
560 return;
561 }
562 mp_enable_preemption();
563
564 }
565
566 void
567 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
568 {
569 volatile int *signals = &cpu_data[cpu].cpu_signals;
570 uint64_t timeout;
571
572
573 if (!cpu_data[cpu].cpu_status)
574 return;
575
576 DBGLOG(cpu_signal, cpu, event);
577
578 i_bit_set(event, signals);
579 cpu_interrupt(cpu);
580 if (mode == SYNC) {
581 again:
582 timeout = rdtsc64() + (1000*1000*1000);
583 while (i_bit(event, signals) && rdtsc64() < timeout) {
584 cpu_pause();
585 }
586 if (i_bit(event, signals)) {
587 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
588 cpu, event);
589 goto again;
590 }
591 }
592 }
593
594 void
595 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
596 {
597 int cpu;
598 int my_cpu = cpu_number();
599
600 for (cpu = 0; cpu < NCPUS; cpu++) {
601 if (cpu == my_cpu || !machine_slot[cpu].running)
602 continue;
603 i386_signal_cpu(cpu, event, mode);
604 }
605 }
606
607 int
608 i386_active_cpus(void)
609 {
610 int cpu;
611 int ncpus = 0;
612
613 for (cpu = 0; cpu < NCPUS; cpu++) {
614 if (machine_slot[cpu].running)
615 ncpus++;
616 }
617 return(ncpus);
618 }
619
620 /*
621 * All-CPU rendezvous:
622 * - CPUs are signalled,
623 * - all execute the setup function (if specified),
624 * - rendezvous (i.e. all cpus reach a barrier),
625 * - all execute the action function (if specified),
626 * - rendezvous again,
627 * - execute the teardown function (if specified), and then
628 * - resume.
629 *
630 * Note that the supplied external functions _must_ be reentrant and aware
631 * that they are running in parallel and in an unknown lock context.
632 */
633
634 static void
635 mp_rendezvous_action(void)
636 {
637
638 /* setup function */
639 if (mp_rv_setup_func != NULL)
640 mp_rv_setup_func(mp_rv_func_arg);
641 /* spin on entry rendezvous */
642 atomic_incl(&mp_rv_waiters[0], 1);
643 while (mp_rv_waiters[0] < mp_rv_ncpus)
644 cpu_pause();
645 /* action function */
646 if (mp_rv_action_func != NULL)
647 mp_rv_action_func(mp_rv_func_arg);
648 /* spin on exit rendezvous */
649 atomic_incl(&mp_rv_waiters[1], 1);
650 while (mp_rv_waiters[1] < mp_rv_ncpus)
651 cpu_pause();
652 /* teardown function */
653 if (mp_rv_teardown_func != NULL)
654 mp_rv_teardown_func(mp_rv_func_arg);
655 }
656
657 void
658 mp_rendezvous(void (*setup_func)(void *),
659 void (*action_func)(void *),
660 void (*teardown_func)(void *),
661 void *arg)
662 {
663
664 if (!smp_initialized) {
665 if (setup_func != NULL)
666 setup_func(arg);
667 if (action_func != NULL)
668 action_func(arg);
669 if (teardown_func != NULL)
670 teardown_func(arg);
671 return;
672 }
673
674 /* obtain rendezvous lock */
675 simple_lock(&mp_rv_lock);
676
677 /* set static function pointers */
678 mp_rv_setup_func = setup_func;
679 mp_rv_action_func = action_func;
680 mp_rv_teardown_func = teardown_func;
681 mp_rv_func_arg = arg;
682
683 mp_rv_waiters[0] = 0; /* entry rendezvous count */
684 mp_rv_waiters[1] = 0; /* exit rendezvous count */
685 mp_rv_ncpus = i386_active_cpus();
686
687 /*
688 * signal other processors, which will call mp_rendezvous_action()
689 * with interrupts disabled
690 */
691 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
692
693 /* call executor function on this cpu */
694 mp_rendezvous_action();
695
696 /* release lock */
697 simple_unlock(&mp_rv_lock);
698 }
699
700 #if MACH_KDP
701 volatile boolean_t mp_kdp_trap = FALSE;
702 long mp_kdp_ncpus;
703
704 void
705 mp_kdp_enter(void)
706 {
707 int cpu;
708 int ncpus;
709 int my_cpu = cpu_number();
710 boolean_t state;
711 uint64_t timeout;
712
713 DBG("mp_kdp_enter()\n");
714
715 /*
716 * Here to enter the debugger.
717 * In case of races, only one cpu is allowed to enter kdp after
718 * stopping others.
719 */
720 state = ml_set_interrupts_enabled(FALSE);
721 simple_lock(&mp_kdp_lock);
722 while (mp_kdp_trap) {
723 simple_unlock(&mp_kdp_lock);
724 DBG("mp_kdp_enter() race lost\n");
725 mp_kdp_wait();
726 simple_lock(&mp_kdp_lock);
727 }
728 mp_kdp_ncpus = 1; /* self */
729 mp_kdp_trap = TRUE;
730 simple_unlock(&mp_kdp_lock);
731 (void) ml_set_interrupts_enabled(state);
732
733 /* Deliver a nudge to other cpus, counting how many */
734 DBG("mp_kdp_enter() signaling other processors\n");
735 for (ncpus = 1, cpu = 0; cpu < NCPUS; cpu++) {
736 if (cpu == my_cpu || !machine_slot[cpu].running)
737 continue;
738 ncpus++;
739 i386_signal_cpu(cpu, MP_KDP, ASYNC);
740 }
741
742 /* Wait other processors to spin. */
743 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
744 timeout = rdtsc64() + (1000*1000*1000);
745 while (*((volatile long *) &mp_kdp_ncpus) != ncpus
746 && rdtsc64() < timeout) {
747 cpu_pause();
748 }
749 DBG("mp_kdp_enter() %d processors done %s\n",
750 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
751 }
752
753 static void
754 mp_kdp_wait(void)
755 {
756 DBG("mp_kdp_wait()\n");
757 atomic_incl(&mp_kdp_ncpus, 1);
758 while (mp_kdp_trap) {
759 cpu_pause();
760 }
761 atomic_decl(&mp_kdp_ncpus, 1);
762 DBG("mp_kdp_wait() done\n");
763 }
764
765 void
766 mp_kdp_exit(void)
767 {
768 DBG("mp_kdp_exit()\n");
769 atomic_decl(&mp_kdp_ncpus, 1);
770 mp_kdp_trap = FALSE;
771
772 /* Wait other processors to stop spinning. XXX needs timeout */
773 DBG("mp_kdp_exit() waiting for processors to resume\n");
774 while (*((volatile long *) &mp_kdp_ncpus) > 0) {
775 cpu_pause();
776 }
777 DBG("mp_kdp_exit() done\n");
778 }
779 #endif /* MACH_KDP */
780
781 void
782 lapic_test(void)
783 {
784 int cpu = 1;
785
786 lapic_dump();
787 i_bit_set(0, &cpu_data[cpu].cpu_signals);
788 cpu_interrupt(1);
789 }
790
791 /*ARGSUSED*/
792 void
793 init_ast_check(
794 processor_t processor)
795 {
796 }
797
798 void
799 cause_ast_check(
800 processor_t processor)
801 {
802 int cpu = processor->slot_num;
803
804 if (cpu != cpu_number()) {
805 i386_signal_cpu(cpu, MP_AST, ASYNC);
806 }
807 }
808
809 /*
810 * invoke kdb on slave processors
811 */
812
813 void
814 remote_kdb(void)
815 {
816 int my_cpu = cpu_number();
817 int cpu;
818
819 mp_disable_preemption();
820 for (cpu = 0; cpu < NCPUS; cpu++) {
821 if (cpu == my_cpu || !machine_slot[cpu].running)
822 continue;
823 i386_signal_cpu(cpu, MP_KDB, SYNC);
824 }
825 mp_enable_preemption();
826 }
827
828 /*
829 * Clear kdb interrupt
830 */
831
832 void
833 clear_kdb_intr(void)
834 {
835 mp_disable_preemption();
836 i_bit_clear(MP_KDB, &cpu_data[cpu_number()].cpu_signals);
837 mp_enable_preemption();
838 }
839
840 void
841 slave_machine_init(void)
842 {
843 int my_cpu;
844
845 /* Ensure that caching and write-through are enabled */
846 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
847
848 mp_disable_preemption();
849 my_cpu = get_cpu_number();
850
851 DBG("slave_machine_init() CPU%d: phys (%d) active.\n",
852 my_cpu, get_cpu_phys_number());
853
854 lapic_init();
855
856 init_fpu();
857
858 cpu_machine_init();
859
860 mp_enable_preemption();
861
862 #ifdef MP_DEBUG
863 lapic_dump();
864 lapic_cpu_map_dump();
865 #endif /* MP_DEBUG */
866
867 }
868
869 #undef cpu_number()
870 int cpu_number(void)
871 {
872 return get_cpu_number();
873 }
874
875 #if MACH_KDB
876 #include <ddb/db_output.h>
877
878 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
879
880
881 #if TRAP_DEBUG
882 #define MTRAPS 100
883 struct mp_trap_hist_struct {
884 unsigned char type;
885 unsigned char data[5];
886 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
887 *max_trap_hist = &trap_hist[MTRAPS];
888
889 void db_trap_hist(void);
890
891 /*
892 * SPL:
893 * 1: new spl
894 * 2: old spl
895 * 3: new tpr
896 * 4: old tpr
897 * INT:
898 * 1: int vec
899 * 2: old spl
900 * 3: new spl
901 * 4: post eoi tpr
902 * 5: exit tpr
903 */
904
905 void
906 db_trap_hist(void)
907 {
908 int i,j;
909 for(i=0;i<MTRAPS;i++)
910 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
911 db_printf("%s%s",
912 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
913 (trap_hist[i].type == 1)?"SPL":"INT");
914 for(j=0;j<5;j++)
915 db_printf(" %02x", trap_hist[i].data[j]);
916 db_printf("\n");
917 }
918
919 }
920 #endif /* TRAP_DEBUG */
921
922 void db_lapic(int cpu);
923 unsigned int db_remote_read(int cpu, int reg);
924 void db_ioapic(unsigned int);
925 void kdb_console(void);
926
927 void
928 kdb_console(void)
929 {
930 }
931
932 #define BOOLP(a) ((a)?' ':'!')
933
934 static char *DM[8] = {
935 "Fixed",
936 "Lowest Priority",
937 "Invalid",
938 "Invalid",
939 "NMI",
940 "Reset",
941 "Invalid",
942 "ExtINT"};
943
944 unsigned int
945 db_remote_read(int cpu, int reg)
946 {
947 return -1;
948 }
949
950 void
951 db_lapic(int cpu)
952 {
953 }
954
955 void
956 db_ioapic(unsigned int ind)
957 {
958 }
959
960 #endif /* MACH_KDB */
961