]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23#include <i386/machine_routines.h>
24#include <i386/io_map_entries.h>
55e303ae
A
25#include <i386/cpuid.h>
26#include <i386/fpu.h>
27#include <kern/processor.h>
91447636 28#include <kern/machine.h>
1c79356b 29#include <kern/cpu_data.h>
91447636
A
30#include <kern/cpu_number.h>
31#include <kern/thread.h>
32#include <i386/cpu_data.h>
55e303ae
A
33#include <i386/machine_cpu.h>
34#include <i386/mp.h>
35#include <i386/mp_events.h>
91447636
A
36#include <i386/cpu_threads.h>
37#include <i386/pmap.h>
38#include <i386/misc_protos.h>
39#include <mach/vm_param.h>
40
41#define MIN(a,b) ((a)<(b)? (a) : (b))
42
43extern void initialize_screen(Boot_Video *, unsigned int);
44extern void wakeup(void *);
55e303ae
A
45
46static int max_cpus_initialized = 0;
47
48#define MAX_CPUS_SET 0x1
49#define MAX_CPUS_WAIT 0x2
1c79356b
A
50
51/* IO memory map services */
52
53/* Map memory map IO space */
54vm_offset_t ml_io_map(
55 vm_offset_t phys_addr,
56 vm_size_t size)
57{
58 return(io_map(phys_addr,size));
59}
60
61/* boot memory allocation */
62vm_offset_t ml_static_malloc(
91447636 63 __unused vm_size_t size)
1c79356b
A
64{
65 return((vm_offset_t)NULL);
66}
67
68vm_offset_t
69ml_static_ptovirt(
70 vm_offset_t paddr)
71{
91447636 72 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
1c79356b
A
73}
74
91447636
A
75
76/*
77 * Routine: ml_static_mfree
78 * Function:
79 */
1c79356b
A
80void
81ml_static_mfree(
91447636
A
82 vm_offset_t vaddr,
83 vm_size_t size)
1c79356b 84{
91447636
A
85 vm_offset_t vaddr_cur;
86 ppnum_t ppn;
87
88 if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
89
90 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
91
92 for (vaddr_cur = vaddr;
93 vaddr_cur < round_page_32(vaddr+size);
94 vaddr_cur += PAGE_SIZE) {
95 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
96 if (ppn != (vm_offset_t)NULL) {
97 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
98 vm_page_create(ppn,(ppn+1));
99 vm_page_wire_count--;
100 }
101 }
1c79356b
A
102}
103
104/* virtual to physical on wired pages */
105vm_offset_t ml_vtophys(
106 vm_offset_t vaddr)
107{
108 return kvtophys(vaddr);
109}
110
111/* Interrupt handling */
112
55e303ae
A
113/* Initialize Interrupts */
114void ml_init_interrupt(void)
115{
116 (void) ml_set_interrupts_enabled(TRUE);
117}
118
1c79356b
A
119/* Get Interrupts Enabled */
120boolean_t ml_get_interrupts_enabled(void)
121{
122 unsigned long flags;
123
124 __asm__ volatile("pushf; popl %0" : "=r" (flags));
125 return (flags & EFL_IF) != 0;
126}
127
128/* Set Interrupts Enabled */
129boolean_t ml_set_interrupts_enabled(boolean_t enable)
130{
131 unsigned long flags;
132
133 __asm__ volatile("pushf; popl %0" : "=r" (flags));
134
135 if (enable)
136 __asm__ volatile("sti");
137 else
138 __asm__ volatile("cli");
139
140 return (flags & EFL_IF) != 0;
141}
142
143/* Check if running at interrupt context */
144boolean_t ml_at_interrupt_context(void)
145{
146 return get_interrupt_level() != 0;
147}
148
149/* Generate a fake interrupt */
150void ml_cause_interrupt(void)
151{
152 panic("ml_cause_interrupt not defined yet on Intel");
153}
154
d52fe63f
A
155void ml_thread_policy(
156 thread_t thread,
157 unsigned policy_id,
158 unsigned policy_info)
159{
55e303ae
A
160 if (policy_id == MACHINE_GROUP)
161 thread_bind(thread, master_processor);
162
163 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
164 spl_t s = splsched();
165
166 thread_lock(thread);
167
168 set_priority(thread, thread->priority + 1);
169
170 thread_unlock(thread);
171 splx(s);
172 }
d52fe63f
A
173}
174
1c79356b
A
175/* Initialize Interrupts */
176void ml_install_interrupt_handler(
177 void *nub,
178 int source,
179 void *target,
180 IOInterruptHandler handler,
181 void *refCon)
182{
183 boolean_t current_state;
184
185 current_state = ml_get_interrupts_enabled();
186
187 PE_install_interrupt_handler(nub, source, target,
188 (IOInterruptHandler) handler, refCon);
189
190 (void) ml_set_interrupts_enabled(current_state);
55e303ae
A
191
192 initialize_screen(0, kPEAcquireScreen);
193}
194
91447636
A
195static void
196cpu_idle(void)
197{
198 __asm__ volatile("sti; hlt": : :"memory");
199}
200void (*cpu_idle_handler)(void) = cpu_idle;
201
55e303ae
A
202void
203machine_idle(void)
204{
91447636
A
205 cpu_core_t *my_core = cpu_core();
206 int others_active;
207
208 /*
209 * We halt this cpu thread
210 * unless kernel param idlehalt is false and no other thread
211 * in the same core is active - if so, don't halt so that this
212 * core doesn't go into a low-power mode.
213 */
214 others_active = !atomic_decl_and_test(
215 (long *) &my_core->active_threads, 1);
216 if (idlehalt || others_active) {
217 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
218 cpu_idle_handler();
219 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
220 } else {
221 __asm__ volatile("sti");
222 }
223 atomic_incl((long *) &my_core->active_threads, 1);
1c79356b
A
224}
225
226void
227machine_signal_idle(
228 processor_t processor)
229{
91447636 230 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
55e303ae
A
231}
232
233kern_return_t
234ml_processor_register(
235 cpu_id_t cpu_id,
236 uint32_t lapic_id,
91447636 237 processor_t *processor_out,
55e303ae
A
238 ipi_handler_t *ipi_handler,
239 boolean_t boot_cpu)
240{
55e303ae 241 int target_cpu;
91447636 242 cpu_data_t *this_cpu_datap;
55e303ae 243
91447636
A
244 this_cpu_datap = cpu_data_alloc(boot_cpu);
245 if (this_cpu_datap == NULL) {
55e303ae 246 return KERN_FAILURE;
91447636
A
247 }
248 target_cpu = this_cpu_datap->cpu_number;
55e303ae
A
249 assert((boot_cpu && (target_cpu == 0)) ||
250 (!boot_cpu && (target_cpu != 0)));
251
252 lapic_cpu_map(lapic_id, target_cpu);
91447636
A
253
254 this_cpu_datap->cpu_id = cpu_id;
255 this_cpu_datap->cpu_phys_number = lapic_id;
256
257 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
258 if (this_cpu_datap->cpu_console_buf == NULL)
259 goto failed;
260
261 if (!boot_cpu) {
262 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
263 if (this_cpu_datap->cpu_pmap == NULL)
264 goto failed;
265
266 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
267 if (this_cpu_datap->cpu_processor == NULL)
268 goto failed;
269 processor_init(this_cpu_datap->cpu_processor, target_cpu);
270 }
271
272 *processor_out = this_cpu_datap->cpu_processor;
55e303ae
A
273 *ipi_handler = NULL;
274
275 return KERN_SUCCESS;
91447636
A
276
277failed:
278 cpu_processor_free(this_cpu_datap->cpu_processor);
279 pmap_cpu_free(this_cpu_datap->cpu_pmap);
280 console_cpu_free(this_cpu_datap->cpu_console_buf);
281 return KERN_FAILURE;
1c79356b
A
282}
283
43866e37 284void
91447636 285ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 286{
55e303ae
A
287 boolean_t os_supports_sse;
288 i386_cpu_info_t *cpuid_infop;
289
91447636 290 if (cpu_infop == NULL)
55e303ae
A
291 return;
292
293 /*
294 * Are we supporting XMM/SSE/SSE2?
295 * As distinct from whether the cpu has these capabilities.
296 */
297 os_supports_sse = get_cr4() & CR4_XMM;
298 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
91447636 299 cpu_infop->vector_unit = 4;
55e303ae 300 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
91447636 301 cpu_infop->vector_unit = 3;
55e303ae 302 else if (cpuid_features() & CPUID_FEATURE_MMX)
91447636 303 cpu_infop->vector_unit = 2;
55e303ae 304 else
91447636 305 cpu_infop->vector_unit = 0;
55e303ae
A
306
307 cpuid_infop = cpuid_info();
308
91447636 309 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 310
91447636
A
311 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
312 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
55e303ae 313
91447636
A
314 if (cpuid_infop->cache_size[L2U] > 0) {
315 cpu_infop->l2_settings = 1;
316 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
317 } else {
318 cpu_infop->l2_settings = 0;
319 cpu_infop->l2_cache_size = 0xFFFFFFFF;
320 }
55e303ae 321
91447636
A
322 if (cpuid_infop->cache_size[L3U] > 0) {
323 cpu_infop->l2_settings = 1;
324 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L3U];
325 } else {
326 cpu_infop->l3_settings = 0;
327 cpu_infop->l3_cache_size = 0xFFFFFFFF;
328 }
43866e37
A
329}
330
331void
332ml_init_max_cpus(unsigned long max_cpus)
333{
55e303ae
A
334 boolean_t current_state;
335
336 current_state = ml_set_interrupts_enabled(FALSE);
337 if (max_cpus_initialized != MAX_CPUS_SET) {
91447636
A
338 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
339 /*
340 * Note: max_cpus is the number of enable processors
341 * that ACPI found; max_ncpus is the maximum number
342 * that the kernel supports or that the "cpus="
343 * boot-arg has set. Here we take int minimum.
344 */
345 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
346 }
55e303ae
A
347 if (max_cpus_initialized == MAX_CPUS_WAIT)
348 wakeup((event_t)&max_cpus_initialized);
349 max_cpus_initialized = MAX_CPUS_SET;
350 }
351 (void) ml_set_interrupts_enabled(current_state);
43866e37
A
352}
353
354int
355ml_get_max_cpus(void)
356{
55e303ae 357 boolean_t current_state;
43866e37 358
55e303ae
A
359 current_state = ml_set_interrupts_enabled(FALSE);
360 if (max_cpus_initialized != MAX_CPUS_SET) {
361 max_cpus_initialized = MAX_CPUS_WAIT;
362 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
363 (void)thread_block(THREAD_CONTINUE_NULL);
364 }
365 (void) ml_set_interrupts_enabled(current_state);
366 return(machine_info.max_cpus);
43866e37
A
367}
368
91447636
A
369/*
370 * This is called from the machine-independent routine cpu_up()
371 * to perform machine-dependent info updates. Defer to cpu_thread_init().
372 */
373void
374ml_cpu_up(void)
375{
376 return;
377}
378
379/*
380 * This is called from the machine-independent routine cpu_down()
381 * to perform machine-dependent info updates.
382 */
383void
384ml_cpu_down(void)
385{
386 return;
387}
388
1c79356b
A
389/* Stubs for pc tracing mechanism */
390
391int *pc_trace_buf;
392int pc_trace_cnt = 0;
393
394int
91447636 395set_be_bit(void)
1c79356b
A
396{
397 return(0);
398}
399
400int
91447636 401clr_be_bit(void)
1c79356b
A
402{
403 return(0);
404}
405
406int
91447636 407be_tracing(void)
1c79356b
A
408{
409 return(0);
410}
9bccf70c 411
91447636
A
412/*
413 * The following are required for parts of the kernel
414 * that cannot resolve these functions as inlines:
415 */
416extern thread_t current_act(void);
417thread_t
9bccf70c 418current_act(void)
91447636
A
419{
420 return(current_thread_fast());
421}
55e303ae
A
422
423#undef current_thread
91447636 424extern thread_t current_thread(void);
55e303ae
A
425thread_t
426current_thread(void)
427{
91447636 428 return(current_thread_fast());
55e303ae 429}