]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
1ab5dadcdcaede923052d6a3e98eaa5812f7d751
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <i386/machine_routines.h>
31 #include <i386/io_map_entries.h>
32 #include <i386/cpuid.h>
33 #include <i386/fpu.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <i386/cpu_data.h>
40 #include <i386/machine_cpu.h>
41 #include <i386/mp.h>
42 #include <i386/mp_events.h>
43 #include <i386/cpu_threads.h>
44 #include <i386/pmap.h>
45 #include <i386/misc_protos.h>
46 #include <mach/vm_param.h>
47
48 #define MIN(a,b) ((a)<(b)? (a) : (b))
49
50 extern void initialize_screen(Boot_Video *, unsigned int);
51 extern void wakeup(void *);
52
53 static int max_cpus_initialized = 0;
54
55 #define MAX_CPUS_SET 0x1
56 #define MAX_CPUS_WAIT 0x2
57
58 /* IO memory map services */
59
60 /* Map memory map IO space */
61 vm_offset_t ml_io_map(
62 vm_offset_t phys_addr,
63 vm_size_t size)
64 {
65 return(io_map(phys_addr,size));
66 }
67
68 /* boot memory allocation */
69 vm_offset_t ml_static_malloc(
70 __unused vm_size_t size)
71 {
72 return((vm_offset_t)NULL);
73 }
74
75 vm_offset_t
76 ml_static_ptovirt(
77 vm_offset_t paddr)
78 {
79 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
80 }
81
82
83 /*
84 * Routine: ml_static_mfree
85 * Function:
86 */
87 void
88 ml_static_mfree(
89 vm_offset_t vaddr,
90 vm_size_t size)
91 {
92 vm_offset_t vaddr_cur;
93 ppnum_t ppn;
94
95 if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
96
97 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
98
99 for (vaddr_cur = vaddr;
100 vaddr_cur < round_page_32(vaddr+size);
101 vaddr_cur += PAGE_SIZE) {
102 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
103 if (ppn != (vm_offset_t)NULL) {
104 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
105 vm_page_create(ppn,(ppn+1));
106 vm_page_wire_count--;
107 }
108 }
109 }
110
111 /* virtual to physical on wired pages */
112 vm_offset_t ml_vtophys(
113 vm_offset_t vaddr)
114 {
115 return kvtophys(vaddr);
116 }
117
118 /* Interrupt handling */
119
120 /* Initialize Interrupts */
121 void ml_init_interrupt(void)
122 {
123 (void) ml_set_interrupts_enabled(TRUE);
124 }
125
126 /* Get Interrupts Enabled */
127 boolean_t ml_get_interrupts_enabled(void)
128 {
129 unsigned long flags;
130
131 __asm__ volatile("pushf; popl %0" : "=r" (flags));
132 return (flags & EFL_IF) != 0;
133 }
134
135 /* Set Interrupts Enabled */
136 boolean_t ml_set_interrupts_enabled(boolean_t enable)
137 {
138 unsigned long flags;
139
140 __asm__ volatile("pushf; popl %0" : "=r" (flags));
141
142 if (enable)
143 __asm__ volatile("sti");
144 else
145 __asm__ volatile("cli");
146
147 return (flags & EFL_IF) != 0;
148 }
149
150 /* Check if running at interrupt context */
151 boolean_t ml_at_interrupt_context(void)
152 {
153 return get_interrupt_level() != 0;
154 }
155
156 /* Generate a fake interrupt */
157 void ml_cause_interrupt(void)
158 {
159 panic("ml_cause_interrupt not defined yet on Intel");
160 }
161
162 void ml_thread_policy(
163 thread_t thread,
164 unsigned policy_id,
165 unsigned policy_info)
166 {
167 if (policy_id == MACHINE_GROUP)
168 thread_bind(thread, master_processor);
169
170 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
171 spl_t s = splsched();
172
173 thread_lock(thread);
174
175 set_priority(thread, thread->priority + 1);
176
177 thread_unlock(thread);
178 splx(s);
179 }
180 }
181
182 /* Initialize Interrupts */
183 void ml_install_interrupt_handler(
184 void *nub,
185 int source,
186 void *target,
187 IOInterruptHandler handler,
188 void *refCon)
189 {
190 boolean_t current_state;
191
192 current_state = ml_get_interrupts_enabled();
193
194 PE_install_interrupt_handler(nub, source, target,
195 (IOInterruptHandler) handler, refCon);
196
197 (void) ml_set_interrupts_enabled(current_state);
198
199 initialize_screen(0, kPEAcquireScreen);
200 }
201
202 static void
203 cpu_idle(void)
204 {
205 __asm__ volatile("sti; hlt": : :"memory");
206 }
207 void (*cpu_idle_handler)(void) = cpu_idle;
208
209 void
210 machine_idle(void)
211 {
212 cpu_core_t *my_core = cpu_core();
213 int others_active;
214
215 /*
216 * We halt this cpu thread
217 * unless kernel param idlehalt is false and no other thread
218 * in the same core is active - if so, don't halt so that this
219 * core doesn't go into a low-power mode.
220 */
221 others_active = !atomic_decl_and_test(
222 (long *) &my_core->active_threads, 1);
223 if (idlehalt || others_active) {
224 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
225 cpu_idle_handler();
226 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
227 } else {
228 __asm__ volatile("sti");
229 }
230 atomic_incl((long *) &my_core->active_threads, 1);
231 }
232
233 void
234 machine_signal_idle(
235 processor_t processor)
236 {
237 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
238 }
239
240 kern_return_t
241 ml_processor_register(
242 cpu_id_t cpu_id,
243 uint32_t lapic_id,
244 processor_t *processor_out,
245 ipi_handler_t *ipi_handler,
246 boolean_t boot_cpu)
247 {
248 int target_cpu;
249 cpu_data_t *this_cpu_datap;
250
251 this_cpu_datap = cpu_data_alloc(boot_cpu);
252 if (this_cpu_datap == NULL) {
253 return KERN_FAILURE;
254 }
255 target_cpu = this_cpu_datap->cpu_number;
256 assert((boot_cpu && (target_cpu == 0)) ||
257 (!boot_cpu && (target_cpu != 0)));
258
259 lapic_cpu_map(lapic_id, target_cpu);
260
261 this_cpu_datap->cpu_id = cpu_id;
262 this_cpu_datap->cpu_phys_number = lapic_id;
263
264 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
265 if (this_cpu_datap->cpu_console_buf == NULL)
266 goto failed;
267
268 if (!boot_cpu) {
269 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
270 if (this_cpu_datap->cpu_pmap == NULL)
271 goto failed;
272
273 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
274 if (this_cpu_datap->cpu_processor == NULL)
275 goto failed;
276 processor_init(this_cpu_datap->cpu_processor, target_cpu);
277 }
278
279 *processor_out = this_cpu_datap->cpu_processor;
280 *ipi_handler = NULL;
281
282 return KERN_SUCCESS;
283
284 failed:
285 cpu_processor_free(this_cpu_datap->cpu_processor);
286 pmap_cpu_free(this_cpu_datap->cpu_pmap);
287 console_cpu_free(this_cpu_datap->cpu_console_buf);
288 return KERN_FAILURE;
289 }
290
291 void
292 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
293 {
294 boolean_t os_supports_sse;
295 i386_cpu_info_t *cpuid_infop;
296
297 if (cpu_infop == NULL)
298 return;
299
300 /*
301 * Are we supporting XMM/SSE/SSE2?
302 * As distinct from whether the cpu has these capabilities.
303 */
304 os_supports_sse = get_cr4() & CR4_XMM;
305 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
306 cpu_infop->vector_unit = 4;
307 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
308 cpu_infop->vector_unit = 3;
309 else if (cpuid_features() & CPUID_FEATURE_MMX)
310 cpu_infop->vector_unit = 2;
311 else
312 cpu_infop->vector_unit = 0;
313
314 cpuid_infop = cpuid_info();
315
316 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
317
318 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
319 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
320
321 if (cpuid_infop->cache_size[L2U] > 0) {
322 cpu_infop->l2_settings = 1;
323 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
324 } else {
325 cpu_infop->l2_settings = 0;
326 cpu_infop->l2_cache_size = 0xFFFFFFFF;
327 }
328
329 if (cpuid_infop->cache_size[L3U] > 0) {
330 cpu_infop->l2_settings = 1;
331 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L3U];
332 } else {
333 cpu_infop->l3_settings = 0;
334 cpu_infop->l3_cache_size = 0xFFFFFFFF;
335 }
336 }
337
338 void
339 ml_init_max_cpus(unsigned long max_cpus)
340 {
341 boolean_t current_state;
342
343 current_state = ml_set_interrupts_enabled(FALSE);
344 if (max_cpus_initialized != MAX_CPUS_SET) {
345 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
346 /*
347 * Note: max_cpus is the number of enable processors
348 * that ACPI found; max_ncpus is the maximum number
349 * that the kernel supports or that the "cpus="
350 * boot-arg has set. Here we take int minimum.
351 */
352 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
353 }
354 if (max_cpus_initialized == MAX_CPUS_WAIT)
355 wakeup((event_t)&max_cpus_initialized);
356 max_cpus_initialized = MAX_CPUS_SET;
357 }
358 (void) ml_set_interrupts_enabled(current_state);
359 }
360
361 int
362 ml_get_max_cpus(void)
363 {
364 boolean_t current_state;
365
366 current_state = ml_set_interrupts_enabled(FALSE);
367 if (max_cpus_initialized != MAX_CPUS_SET) {
368 max_cpus_initialized = MAX_CPUS_WAIT;
369 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
370 (void)thread_block(THREAD_CONTINUE_NULL);
371 }
372 (void) ml_set_interrupts_enabled(current_state);
373 return(machine_info.max_cpus);
374 }
375
376 /*
377 * This is called from the machine-independent routine cpu_up()
378 * to perform machine-dependent info updates. Defer to cpu_thread_init().
379 */
380 void
381 ml_cpu_up(void)
382 {
383 return;
384 }
385
386 /*
387 * This is called from the machine-independent routine cpu_down()
388 * to perform machine-dependent info updates.
389 */
390 void
391 ml_cpu_down(void)
392 {
393 return;
394 }
395
396 /* Stubs for pc tracing mechanism */
397
398 int *pc_trace_buf;
399 int pc_trace_cnt = 0;
400
401 int
402 set_be_bit(void)
403 {
404 return(0);
405 }
406
407 int
408 clr_be_bit(void)
409 {
410 return(0);
411 }
412
413 int
414 be_tracing(void)
415 {
416 return(0);
417 }
418
419 /*
420 * The following are required for parts of the kernel
421 * that cannot resolve these functions as inlines:
422 */
423 extern thread_t current_act(void);
424 thread_t
425 current_act(void)
426 {
427 return(current_thread_fast());
428 }
429
430 #undef current_thread
431 extern thread_t current_thread(void);
432 thread_t
433 current_thread(void)
434 {
435 return(current_thread_fast());
436 }