2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 #include <i386/machine_routines.h>
26 #include <i386/io_map_entries.h>
27 #include <i386/cpuid.h>
29 #include <kern/processor.h>
30 #include <kern/cpu_data.h>
31 #include <kern/thread_act.h>
32 #include <i386/machine_cpu.h>
34 #include <i386/mp_events.h>
36 static int max_cpus_initialized
= 0;
38 #define MAX_CPUS_SET 0x1
39 #define MAX_CPUS_WAIT 0x2
41 /* IO memory map services */
43 /* Map memory map IO space */
44 vm_offset_t
ml_io_map(
45 vm_offset_t phys_addr
,
48 return(io_map(phys_addr
,size
));
51 /* boot memory allocation */
52 vm_offset_t
ml_static_malloc(
55 return((vm_offset_t
)NULL
);
62 return phystokv(paddr
);
73 /* virtual to physical on wired pages */
74 vm_offset_t
ml_vtophys(
77 return kvtophys(vaddr
);
80 /* Interrupt handling */
82 /* Initialize Interrupts */
83 void ml_init_interrupt(void)
85 (void) ml_set_interrupts_enabled(TRUE
);
88 /* Get Interrupts Enabled */
89 boolean_t
ml_get_interrupts_enabled(void)
93 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
94 return (flags
& EFL_IF
) != 0;
97 /* Set Interrupts Enabled */
98 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
102 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
105 __asm__
volatile("sti");
107 __asm__
volatile("cli");
109 return (flags
& EFL_IF
) != 0;
112 /* Check if running at interrupt context */
113 boolean_t
ml_at_interrupt_context(void)
115 return get_interrupt_level() != 0;
118 /* Generate a fake interrupt */
119 void ml_cause_interrupt(void)
121 panic("ml_cause_interrupt not defined yet on Intel");
124 void ml_thread_policy(
127 unsigned policy_info
)
129 if (policy_id
== MACHINE_GROUP
)
130 thread_bind(thread
, master_processor
);
132 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
133 spl_t s
= splsched();
137 set_priority(thread
, thread
->priority
+ 1);
139 thread_unlock(thread
);
144 /* Initialize Interrupts */
145 void ml_install_interrupt_handler(
149 IOInterruptHandler handler
,
152 boolean_t current_state
;
154 current_state
= ml_get_interrupts_enabled();
156 PE_install_interrupt_handler(nub
, source
, target
,
157 (IOInterruptHandler
) handler
, refCon
);
159 (void) ml_set_interrupts_enabled(current_state
);
161 initialize_screen(0, kPEAcquireScreen
);
167 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
168 __asm__
volatile("sti; hlt": : :"memory");
169 __asm__
volatile("cli");
170 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
175 processor_t processor
)
177 cpu_interrupt(processor
->slot_num
);
181 ml_processor_register(
184 processor_t
*processor
,
185 ipi_handler_t
*ipi_handler
,
191 if (cpu_register(&target_cpu
) != KERN_SUCCESS
)
194 assert((boot_cpu
&& (target_cpu
== 0)) ||
195 (!boot_cpu
&& (target_cpu
!= 0)));
197 lapic_cpu_map(lapic_id
, target_cpu
);
198 cpu_data
[target_cpu
].cpu_id
= cpu_id
;
199 cpu_data
[target_cpu
].cpu_phys_number
= lapic_id
;
200 *processor
= cpu_to_processor(target_cpu
);
207 ml_cpu_get_info(ml_cpu_info_t
*cpu_info
)
209 boolean_t os_supports_sse
;
210 i386_cpu_info_t
*cpuid_infop
;
212 if (cpu_info
== NULL
)
216 * Are we supporting XMM/SSE/SSE2?
217 * As distinct from whether the cpu has these capabilities.
219 os_supports_sse
= get_cr4() & CR4_XMM
;
220 if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
221 cpu_info
->vector_unit
= 4;
222 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
223 cpu_info
->vector_unit
= 3;
224 else if (cpuid_features() & CPUID_FEATURE_MMX
)
225 cpu_info
->vector_unit
= 2;
227 cpu_info
->vector_unit
= 0;
229 cpuid_infop
= cpuid_info();
231 cpu_info
->cache_line_size
= cpuid_infop
->cache_linesize
;
233 cpu_info
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
234 cpu_info
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
236 cpu_info
->l2_settings
= 1;
237 cpu_info
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
240 cpu_info
->l3_settings
= 0;
241 cpu_info
->l3_cache_size
= 0xFFFFFFFF;
245 ml_init_max_cpus(unsigned long max_cpus
)
247 boolean_t current_state
;
249 current_state
= ml_set_interrupts_enabled(FALSE
);
250 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
251 if (max_cpus
> 0 && max_cpus
< NCPUS
)
252 machine_info
.max_cpus
= max_cpus
;
253 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
254 wakeup((event_t
)&max_cpus_initialized
);
255 max_cpus_initialized
= MAX_CPUS_SET
;
257 (void) ml_set_interrupts_enabled(current_state
);
261 ml_get_max_cpus(void)
263 boolean_t current_state
;
265 current_state
= ml_set_interrupts_enabled(FALSE
);
266 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
267 max_cpus_initialized
= MAX_CPUS_WAIT
;
268 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
269 (void)thread_block(THREAD_CONTINUE_NULL
);
271 (void) ml_set_interrupts_enabled(current_state
);
272 return(machine_info
.max_cpus
);
275 /* Stubs for pc tracing mechanism */
278 int pc_trace_cnt
= 0;
302 return(current_act_fast());
305 #undef current_thread
309 return(current_act_fast());