]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <i386/machine_routines.h>
23 #include <i386/io_map_entries.h>
24 #include <i386/cpuid.h>
25 #include <i386/fpu.h>
26 #include <kern/processor.h>
27 #include <kern/cpu_data.h>
28 #include <kern/thread_act.h>
29 #include <i386/machine_cpu.h>
30 #include <i386/mp.h>
31 #include <i386/mp_events.h>
32
33 static int max_cpus_initialized = 0;
34
35 #define MAX_CPUS_SET 0x1
36 #define MAX_CPUS_WAIT 0x2
37
38 /* IO memory map services */
39
40 /* Map memory map IO space */
41 vm_offset_t ml_io_map(
42 vm_offset_t phys_addr,
43 vm_size_t size)
44 {
45 return(io_map(phys_addr,size));
46 }
47
48 /* boot memory allocation */
49 vm_offset_t ml_static_malloc(
50 vm_size_t size)
51 {
52 return((vm_offset_t)NULL);
53 }
54
55 vm_offset_t
56 ml_static_ptovirt(
57 vm_offset_t paddr)
58 {
59 return phystokv(paddr);
60 }
61
62 void
63 ml_static_mfree(
64 vm_offset_t vaddr,
65 vm_size_t size)
66 {
67 return;
68 }
69
70 /* virtual to physical on wired pages */
71 vm_offset_t ml_vtophys(
72 vm_offset_t vaddr)
73 {
74 return kvtophys(vaddr);
75 }
76
77 /* Interrupt handling */
78
79 /* Initialize Interrupts */
80 void ml_init_interrupt(void)
81 {
82 (void) ml_set_interrupts_enabled(TRUE);
83 }
84
85 /* Get Interrupts Enabled */
86 boolean_t ml_get_interrupts_enabled(void)
87 {
88 unsigned long flags;
89
90 __asm__ volatile("pushf; popl %0" : "=r" (flags));
91 return (flags & EFL_IF) != 0;
92 }
93
94 /* Set Interrupts Enabled */
95 boolean_t ml_set_interrupts_enabled(boolean_t enable)
96 {
97 unsigned long flags;
98
99 __asm__ volatile("pushf; popl %0" : "=r" (flags));
100
101 if (enable)
102 __asm__ volatile("sti");
103 else
104 __asm__ volatile("cli");
105
106 return (flags & EFL_IF) != 0;
107 }
108
109 /* Check if running at interrupt context */
110 boolean_t ml_at_interrupt_context(void)
111 {
112 return get_interrupt_level() != 0;
113 }
114
115 /* Generate a fake interrupt */
116 void ml_cause_interrupt(void)
117 {
118 panic("ml_cause_interrupt not defined yet on Intel");
119 }
120
121 void ml_thread_policy(
122 thread_t thread,
123 unsigned policy_id,
124 unsigned policy_info)
125 {
126 if (policy_id == MACHINE_GROUP)
127 thread_bind(thread, master_processor);
128
129 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
130 spl_t s = splsched();
131
132 thread_lock(thread);
133
134 set_priority(thread, thread->priority + 1);
135
136 thread_unlock(thread);
137 splx(s);
138 }
139 }
140
141 /* Initialize Interrupts */
142 void ml_install_interrupt_handler(
143 void *nub,
144 int source,
145 void *target,
146 IOInterruptHandler handler,
147 void *refCon)
148 {
149 boolean_t current_state;
150
151 current_state = ml_get_interrupts_enabled();
152
153 PE_install_interrupt_handler(nub, source, target,
154 (IOInterruptHandler) handler, refCon);
155
156 (void) ml_set_interrupts_enabled(current_state);
157
158 initialize_screen(0, kPEAcquireScreen);
159 }
160
161 void
162 machine_idle(void)
163 {
164 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
165 __asm__ volatile("sti; hlt": : :"memory");
166 __asm__ volatile("cli");
167 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
168 }
169
170 void
171 machine_signal_idle(
172 processor_t processor)
173 {
174 cpu_interrupt(processor->slot_num);
175 }
176
177 kern_return_t
178 ml_processor_register(
179 cpu_id_t cpu_id,
180 uint32_t lapic_id,
181 processor_t *processor,
182 ipi_handler_t *ipi_handler,
183 boolean_t boot_cpu)
184 {
185 kern_return_t ret;
186 int target_cpu;
187
188 if (cpu_register(&target_cpu) != KERN_SUCCESS)
189 return KERN_FAILURE;
190
191 assert((boot_cpu && (target_cpu == 0)) ||
192 (!boot_cpu && (target_cpu != 0)));
193
194 lapic_cpu_map(lapic_id, target_cpu);
195 cpu_data[target_cpu].cpu_id = cpu_id;
196 cpu_data[target_cpu].cpu_phys_number = lapic_id;
197 *processor = cpu_to_processor(target_cpu);
198 *ipi_handler = NULL;
199
200 return KERN_SUCCESS;
201 }
202
203 void
204 ml_cpu_get_info(ml_cpu_info_t *cpu_info)
205 {
206 boolean_t os_supports_sse;
207 i386_cpu_info_t *cpuid_infop;
208
209 if (cpu_info == NULL)
210 return;
211
212 /*
213 * Are we supporting XMM/SSE/SSE2?
214 * As distinct from whether the cpu has these capabilities.
215 */
216 os_supports_sse = get_cr4() & CR4_XMM;
217 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
218 cpu_info->vector_unit = 4;
219 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
220 cpu_info->vector_unit = 3;
221 else if (cpuid_features() & CPUID_FEATURE_MMX)
222 cpu_info->vector_unit = 2;
223 else
224 cpu_info->vector_unit = 0;
225
226 cpuid_infop = cpuid_info();
227
228 cpu_info->cache_line_size = cpuid_infop->cache_linesize;
229
230 cpu_info->l1_icache_size = cpuid_infop->cache_size[L1I];
231 cpu_info->l1_dcache_size = cpuid_infop->cache_size[L1D];
232
233 cpu_info->l2_settings = 1;
234 cpu_info->l2_cache_size = cpuid_infop->cache_size[L2U];
235
236 /* XXX No L3 */
237 cpu_info->l3_settings = 0;
238 cpu_info->l3_cache_size = 0xFFFFFFFF;
239 }
240
241 void
242 ml_init_max_cpus(unsigned long max_cpus)
243 {
244 boolean_t current_state;
245
246 current_state = ml_set_interrupts_enabled(FALSE);
247 if (max_cpus_initialized != MAX_CPUS_SET) {
248 if (max_cpus > 0 && max_cpus < NCPUS)
249 machine_info.max_cpus = max_cpus;
250 if (max_cpus_initialized == MAX_CPUS_WAIT)
251 wakeup((event_t)&max_cpus_initialized);
252 max_cpus_initialized = MAX_CPUS_SET;
253 }
254 (void) ml_set_interrupts_enabled(current_state);
255 }
256
257 int
258 ml_get_max_cpus(void)
259 {
260 boolean_t current_state;
261
262 current_state = ml_set_interrupts_enabled(FALSE);
263 if (max_cpus_initialized != MAX_CPUS_SET) {
264 max_cpus_initialized = MAX_CPUS_WAIT;
265 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
266 (void)thread_block(THREAD_CONTINUE_NULL);
267 }
268 (void) ml_set_interrupts_enabled(current_state);
269 return(machine_info.max_cpus);
270 }
271
272 /* Stubs for pc tracing mechanism */
273
274 int *pc_trace_buf;
275 int pc_trace_cnt = 0;
276
277 int
278 set_be_bit()
279 {
280 return(0);
281 }
282
283 int
284 clr_be_bit()
285 {
286 return(0);
287 }
288
289 int
290 be_tracing()
291 {
292 return(0);
293 }
294
295 #undef current_act
296 thread_act_t
297 current_act(void)
298 {
299 return(current_act_fast());
300 }
301
302 #undef current_thread
303 thread_t
304 current_thread(void)
305 {
306 return(current_act_fast());
307 }