]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 #include <i386/machine_routines.h>
26 #include <i386/io_map_entries.h>
27 #include <i386/cpuid.h>
28 #include <i386/fpu.h>
29 #include <kern/processor.h>
30 #include <kern/cpu_data.h>
31 #include <kern/thread_act.h>
32 #include <i386/machine_cpu.h>
33 #include <i386/mp.h>
34 #include <i386/mp_events.h>
35
36 static int max_cpus_initialized = 0;
37
38 #define MAX_CPUS_SET 0x1
39 #define MAX_CPUS_WAIT 0x2
40
41 /* IO memory map services */
42
43 /* Map memory map IO space */
44 vm_offset_t ml_io_map(
45 vm_offset_t phys_addr,
46 vm_size_t size)
47 {
48 return(io_map(phys_addr,size));
49 }
50
51 /* boot memory allocation */
52 vm_offset_t ml_static_malloc(
53 vm_size_t size)
54 {
55 return((vm_offset_t)NULL);
56 }
57
58 vm_offset_t
59 ml_static_ptovirt(
60 vm_offset_t paddr)
61 {
62 return phystokv(paddr);
63 }
64
65 void
66 ml_static_mfree(
67 vm_offset_t vaddr,
68 vm_size_t size)
69 {
70 return;
71 }
72
73 /* virtual to physical on wired pages */
74 vm_offset_t ml_vtophys(
75 vm_offset_t vaddr)
76 {
77 return kvtophys(vaddr);
78 }
79
80 /* Interrupt handling */
81
82 /* Initialize Interrupts */
83 void ml_init_interrupt(void)
84 {
85 (void) ml_set_interrupts_enabled(TRUE);
86 }
87
88 /* Get Interrupts Enabled */
89 boolean_t ml_get_interrupts_enabled(void)
90 {
91 unsigned long flags;
92
93 __asm__ volatile("pushf; popl %0" : "=r" (flags));
94 return (flags & EFL_IF) != 0;
95 }
96
97 /* Set Interrupts Enabled */
98 boolean_t ml_set_interrupts_enabled(boolean_t enable)
99 {
100 unsigned long flags;
101
102 __asm__ volatile("pushf; popl %0" : "=r" (flags));
103
104 if (enable)
105 __asm__ volatile("sti");
106 else
107 __asm__ volatile("cli");
108
109 return (flags & EFL_IF) != 0;
110 }
111
112 /* Check if running at interrupt context */
113 boolean_t ml_at_interrupt_context(void)
114 {
115 return get_interrupt_level() != 0;
116 }
117
118 /* Generate a fake interrupt */
119 void ml_cause_interrupt(void)
120 {
121 panic("ml_cause_interrupt not defined yet on Intel");
122 }
123
124 void ml_thread_policy(
125 thread_t thread,
126 unsigned policy_id,
127 unsigned policy_info)
128 {
129 if (policy_id == MACHINE_GROUP)
130 thread_bind(thread, master_processor);
131
132 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
133 spl_t s = splsched();
134
135 thread_lock(thread);
136
137 set_priority(thread, thread->priority + 1);
138
139 thread_unlock(thread);
140 splx(s);
141 }
142 }
143
144 /* Initialize Interrupts */
145 void ml_install_interrupt_handler(
146 void *nub,
147 int source,
148 void *target,
149 IOInterruptHandler handler,
150 void *refCon)
151 {
152 boolean_t current_state;
153
154 current_state = ml_get_interrupts_enabled();
155
156 PE_install_interrupt_handler(nub, source, target,
157 (IOInterruptHandler) handler, refCon);
158
159 (void) ml_set_interrupts_enabled(current_state);
160
161 initialize_screen(0, kPEAcquireScreen);
162 }
163
164 void
165 machine_idle(void)
166 {
167 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
168 __asm__ volatile("sti; hlt": : :"memory");
169 __asm__ volatile("cli");
170 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
171 }
172
173 void
174 machine_signal_idle(
175 processor_t processor)
176 {
177 cpu_interrupt(processor->slot_num);
178 }
179
180 kern_return_t
181 ml_processor_register(
182 cpu_id_t cpu_id,
183 uint32_t lapic_id,
184 processor_t *processor,
185 ipi_handler_t *ipi_handler,
186 boolean_t boot_cpu)
187 {
188 kern_return_t ret;
189 int target_cpu;
190
191 if (cpu_register(&target_cpu) != KERN_SUCCESS)
192 return KERN_FAILURE;
193
194 assert((boot_cpu && (target_cpu == 0)) ||
195 (!boot_cpu && (target_cpu != 0)));
196
197 lapic_cpu_map(lapic_id, target_cpu);
198 cpu_data[target_cpu].cpu_id = cpu_id;
199 cpu_data[target_cpu].cpu_phys_number = lapic_id;
200 *processor = cpu_to_processor(target_cpu);
201 *ipi_handler = NULL;
202
203 return KERN_SUCCESS;
204 }
205
206 void
207 ml_cpu_get_info(ml_cpu_info_t *cpu_info)
208 {
209 boolean_t os_supports_sse;
210 i386_cpu_info_t *cpuid_infop;
211
212 if (cpu_info == NULL)
213 return;
214
215 /*
216 * Are we supporting XMM/SSE/SSE2?
217 * As distinct from whether the cpu has these capabilities.
218 */
219 os_supports_sse = get_cr4() & CR4_XMM;
220 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
221 cpu_info->vector_unit = 4;
222 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
223 cpu_info->vector_unit = 3;
224 else if (cpuid_features() & CPUID_FEATURE_MMX)
225 cpu_info->vector_unit = 2;
226 else
227 cpu_info->vector_unit = 0;
228
229 cpuid_infop = cpuid_info();
230
231 cpu_info->cache_line_size = cpuid_infop->cache_linesize;
232
233 cpu_info->l1_icache_size = cpuid_infop->cache_size[L1I];
234 cpu_info->l1_dcache_size = cpuid_infop->cache_size[L1D];
235
236 cpu_info->l2_settings = 1;
237 cpu_info->l2_cache_size = cpuid_infop->cache_size[L2U];
238
239 /* XXX No L3 */
240 cpu_info->l3_settings = 0;
241 cpu_info->l3_cache_size = 0xFFFFFFFF;
242 }
243
244 void
245 ml_init_max_cpus(unsigned long max_cpus)
246 {
247 boolean_t current_state;
248
249 current_state = ml_set_interrupts_enabled(FALSE);
250 if (max_cpus_initialized != MAX_CPUS_SET) {
251 if (max_cpus > 0 && max_cpus < NCPUS)
252 machine_info.max_cpus = max_cpus;
253 if (max_cpus_initialized == MAX_CPUS_WAIT)
254 wakeup((event_t)&max_cpus_initialized);
255 max_cpus_initialized = MAX_CPUS_SET;
256 }
257 (void) ml_set_interrupts_enabled(current_state);
258 }
259
260 int
261 ml_get_max_cpus(void)
262 {
263 boolean_t current_state;
264
265 current_state = ml_set_interrupts_enabled(FALSE);
266 if (max_cpus_initialized != MAX_CPUS_SET) {
267 max_cpus_initialized = MAX_CPUS_WAIT;
268 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
269 (void)thread_block(THREAD_CONTINUE_NULL);
270 }
271 (void) ml_set_interrupts_enabled(current_state);
272 return(machine_info.max_cpus);
273 }
274
275 /* Stubs for pc tracing mechanism */
276
277 int *pc_trace_buf;
278 int pc_trace_cnt = 0;
279
280 int
281 set_be_bit()
282 {
283 return(0);
284 }
285
286 int
287 clr_be_bit()
288 {
289 return(0);
290 }
291
292 int
293 be_tracing()
294 {
295 return(0);
296 }
297
298 #undef current_act
299 thread_act_t
300 current_act(void)
301 {
302 return(current_act_fast());
303 }
304
305 #undef current_thread
306 thread_t
307 current_thread(void)
308 {
309 return(current_act_fast());
310 }