2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 #include <i386/machine_routines.h>
23 #include <i386/io_map_entries.h>
24 #include <i386/cpuid.h>
26 #include <kern/processor.h>
27 #include <kern/cpu_data.h>
28 #include <kern/thread_act.h>
29 #include <i386/machine_cpu.h>
31 #include <i386/mp_events.h>
33 static int max_cpus_initialized
= 0;
35 #define MAX_CPUS_SET 0x1
36 #define MAX_CPUS_WAIT 0x2
38 /* IO memory map services */
40 /* Map memory map IO space */
41 vm_offset_t
ml_io_map(
42 vm_offset_t phys_addr
,
45 return(io_map(phys_addr
,size
));
48 /* boot memory allocation */
49 vm_offset_t
ml_static_malloc(
52 return((vm_offset_t
)NULL
);
59 return phystokv(paddr
);
70 /* virtual to physical on wired pages */
71 vm_offset_t
ml_vtophys(
74 return kvtophys(vaddr
);
77 /* Interrupt handling */
79 /* Initialize Interrupts */
80 void ml_init_interrupt(void)
82 (void) ml_set_interrupts_enabled(TRUE
);
85 /* Get Interrupts Enabled */
86 boolean_t
ml_get_interrupts_enabled(void)
90 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
91 return (flags
& EFL_IF
) != 0;
94 /* Set Interrupts Enabled */
95 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
99 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
102 __asm__
volatile("sti");
104 __asm__
volatile("cli");
106 return (flags
& EFL_IF
) != 0;
109 /* Check if running at interrupt context */
110 boolean_t
ml_at_interrupt_context(void)
112 return get_interrupt_level() != 0;
115 /* Generate a fake interrupt */
116 void ml_cause_interrupt(void)
118 panic("ml_cause_interrupt not defined yet on Intel");
121 void ml_thread_policy(
124 unsigned policy_info
)
126 if (policy_id
== MACHINE_GROUP
)
127 thread_bind(thread
, master_processor
);
129 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
130 spl_t s
= splsched();
134 set_priority(thread
, thread
->priority
+ 1);
136 thread_unlock(thread
);
141 /* Initialize Interrupts */
142 void ml_install_interrupt_handler(
146 IOInterruptHandler handler
,
149 boolean_t current_state
;
151 current_state
= ml_get_interrupts_enabled();
153 PE_install_interrupt_handler(nub
, source
, target
,
154 (IOInterruptHandler
) handler
, refCon
);
156 (void) ml_set_interrupts_enabled(current_state
);
158 initialize_screen(0, kPEAcquireScreen
);
164 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
165 __asm__
volatile("sti; hlt": : :"memory");
166 __asm__
volatile("cli");
167 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
172 processor_t processor
)
174 cpu_interrupt(processor
->slot_num
);
178 ml_processor_register(
181 processor_t
*processor
,
182 ipi_handler_t
*ipi_handler
,
188 if (cpu_register(&target_cpu
) != KERN_SUCCESS
)
191 assert((boot_cpu
&& (target_cpu
== 0)) ||
192 (!boot_cpu
&& (target_cpu
!= 0)));
194 lapic_cpu_map(lapic_id
, target_cpu
);
195 cpu_data
[target_cpu
].cpu_id
= cpu_id
;
196 cpu_data
[target_cpu
].cpu_phys_number
= lapic_id
;
197 *processor
= cpu_to_processor(target_cpu
);
204 ml_cpu_get_info(ml_cpu_info_t
*cpu_info
)
206 boolean_t os_supports_sse
;
207 i386_cpu_info_t
*cpuid_infop
;
209 if (cpu_info
== NULL
)
213 * Are we supporting XMM/SSE/SSE2?
214 * As distinct from whether the cpu has these capabilities.
216 os_supports_sse
= get_cr4() & CR4_XMM
;
217 if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
218 cpu_info
->vector_unit
= 4;
219 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
220 cpu_info
->vector_unit
= 3;
221 else if (cpuid_features() & CPUID_FEATURE_MMX
)
222 cpu_info
->vector_unit
= 2;
224 cpu_info
->vector_unit
= 0;
226 cpuid_infop
= cpuid_info();
228 cpu_info
->cache_line_size
= cpuid_infop
->cache_linesize
;
230 cpu_info
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
231 cpu_info
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
233 cpu_info
->l2_settings
= 1;
234 cpu_info
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
237 cpu_info
->l3_settings
= 0;
238 cpu_info
->l3_cache_size
= 0xFFFFFFFF;
242 ml_init_max_cpus(unsigned long max_cpus
)
244 boolean_t current_state
;
246 current_state
= ml_set_interrupts_enabled(FALSE
);
247 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
248 if (max_cpus
> 0 && max_cpus
< NCPUS
)
249 machine_info
.max_cpus
= max_cpus
;
250 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
251 wakeup((event_t
)&max_cpus_initialized
);
252 max_cpus_initialized
= MAX_CPUS_SET
;
254 (void) ml_set_interrupts_enabled(current_state
);
258 ml_get_max_cpus(void)
260 boolean_t current_state
;
262 current_state
= ml_set_interrupts_enabled(FALSE
);
263 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
264 max_cpus_initialized
= MAX_CPUS_WAIT
;
265 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
266 (void)thread_block(THREAD_CONTINUE_NULL
);
268 (void) ml_set_interrupts_enabled(current_state
);
269 return(machine_info
.max_cpus
);
272 /* Stubs for pc tracing mechanism */
275 int pc_trace_cnt
= 0;
299 return(current_act_fast());
302 #undef current_thread
306 return(current_act_fast());