]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b 1/*
8f6c56a5 2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#include <i386/machine_routines.h>
29#include <i386/io_map_entries.h>
55e303ae
A
30#include <i386/cpuid.h>
31#include <i386/fpu.h>
32#include <kern/processor.h>
91447636 33#include <kern/machine.h>
1c79356b 34#include <kern/cpu_data.h>
91447636
A
35#include <kern/cpu_number.h>
36#include <kern/thread.h>
37#include <i386/cpu_data.h>
55e303ae
A
38#include <i386/machine_cpu.h>
39#include <i386/mp.h>
40#include <i386/mp_events.h>
91447636
A
41#include <i386/cpu_threads.h>
42#include <i386/pmap.h>
43#include <i386/misc_protos.h>
44#include <mach/vm_param.h>
45
46#define MIN(a,b) ((a)<(b)? (a) : (b))
47
48extern void initialize_screen(Boot_Video *, unsigned int);
49extern void wakeup(void *);
55e303ae
A
50
51static int max_cpus_initialized = 0;
52
53#define MAX_CPUS_SET 0x1
54#define MAX_CPUS_WAIT 0x2
1c79356b
A
55
56/* IO memory map services */
57
58/* Map memory map IO space */
59vm_offset_t ml_io_map(
60 vm_offset_t phys_addr,
61 vm_size_t size)
62{
8f6c56a5 63 return(io_map(phys_addr,size));
1c79356b
A
64}
65
66/* boot memory allocation */
67vm_offset_t ml_static_malloc(
91447636 68 __unused vm_size_t size)
1c79356b
A
69{
70 return((vm_offset_t)NULL);
71}
72
73vm_offset_t
74ml_static_ptovirt(
75 vm_offset_t paddr)
76{
91447636 77 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
1c79356b
A
78}
79
91447636
A
80
81/*
82 * Routine: ml_static_mfree
83 * Function:
84 */
1c79356b
A
85void
86ml_static_mfree(
91447636
A
87 vm_offset_t vaddr,
88 vm_size_t size)
1c79356b 89{
91447636
A
90 vm_offset_t vaddr_cur;
91 ppnum_t ppn;
92
8f6c56a5 93 if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
91447636
A
94
95 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
96
97 for (vaddr_cur = vaddr;
98 vaddr_cur < round_page_32(vaddr+size);
99 vaddr_cur += PAGE_SIZE) {
100 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
101 if (ppn != (vm_offset_t)NULL) {
102 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
103 vm_page_create(ppn,(ppn+1));
104 vm_page_wire_count--;
105 }
106 }
1c79356b
A
107}
108
109/* virtual to physical on wired pages */
110vm_offset_t ml_vtophys(
111 vm_offset_t vaddr)
112{
113 return kvtophys(vaddr);
114}
115
116/* Interrupt handling */
117
55e303ae
A
118/* Initialize Interrupts */
119void ml_init_interrupt(void)
120{
121 (void) ml_set_interrupts_enabled(TRUE);
122}
123
1c79356b
A
124/* Get Interrupts Enabled */
125boolean_t ml_get_interrupts_enabled(void)
126{
127 unsigned long flags;
128
129 __asm__ volatile("pushf; popl %0" : "=r" (flags));
130 return (flags & EFL_IF) != 0;
131}
132
133/* Set Interrupts Enabled */
134boolean_t ml_set_interrupts_enabled(boolean_t enable)
135{
136 unsigned long flags;
137
138 __asm__ volatile("pushf; popl %0" : "=r" (flags));
139
8f6c56a5 140 if (enable)
1c79356b 141 __asm__ volatile("sti");
8f6c56a5 142 else
1c79356b
A
143 __asm__ volatile("cli");
144
145 return (flags & EFL_IF) != 0;
146}
147
148/* Check if running at interrupt context */
149boolean_t ml_at_interrupt_context(void)
150{
151 return get_interrupt_level() != 0;
152}
153
154/* Generate a fake interrupt */
155void ml_cause_interrupt(void)
156{
157 panic("ml_cause_interrupt not defined yet on Intel");
158}
159
d52fe63f
A
160void ml_thread_policy(
161 thread_t thread,
162 unsigned policy_id,
163 unsigned policy_info)
164{
55e303ae
A
165 if (policy_id == MACHINE_GROUP)
166 thread_bind(thread, master_processor);
167
168 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
169 spl_t s = splsched();
170
171 thread_lock(thread);
172
173 set_priority(thread, thread->priority + 1);
174
175 thread_unlock(thread);
176 splx(s);
177 }
d52fe63f
A
178}
179
1c79356b
A
180/* Initialize Interrupts */
181void ml_install_interrupt_handler(
182 void *nub,
183 int source,
184 void *target,
185 IOInterruptHandler handler,
186 void *refCon)
187{
188 boolean_t current_state;
189
190 current_state = ml_get_interrupts_enabled();
191
192 PE_install_interrupt_handler(nub, source, target,
193 (IOInterruptHandler) handler, refCon);
194
195 (void) ml_set_interrupts_enabled(current_state);
55e303ae
A
196
197 initialize_screen(0, kPEAcquireScreen);
198}
199
8f6c56a5
A
200static void
201cpu_idle(void)
202{
203 __asm__ volatile("sti; hlt": : :"memory");
204}
205void (*cpu_idle_handler)(void) = cpu_idle;
91447636 206
55e303ae
A
207void
208machine_idle(void)
209{
91447636
A
210 cpu_core_t *my_core = cpu_core();
211 int others_active;
212
213 /*
214 * We halt this cpu thread
215 * unless kernel param idlehalt is false and no other thread
216 * in the same core is active - if so, don't halt so that this
217 * core doesn't go into a low-power mode.
218 */
219 others_active = !atomic_decl_and_test(
220 (long *) &my_core->active_threads, 1);
221 if (idlehalt || others_active) {
222 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
8f6c56a5 223 cpu_idle_handler();
91447636
A
224 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
225 } else {
226 __asm__ volatile("sti");
227 }
228 atomic_incl((long *) &my_core->active_threads, 1);
1c79356b
A
229}
230
231void
232machine_signal_idle(
233 processor_t processor)
234{
91447636 235 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
55e303ae
A
236}
237
238kern_return_t
239ml_processor_register(
240 cpu_id_t cpu_id,
241 uint32_t lapic_id,
91447636 242 processor_t *processor_out,
55e303ae
A
243 ipi_handler_t *ipi_handler,
244 boolean_t boot_cpu)
245{
55e303ae 246 int target_cpu;
91447636 247 cpu_data_t *this_cpu_datap;
55e303ae 248
91447636
A
249 this_cpu_datap = cpu_data_alloc(boot_cpu);
250 if (this_cpu_datap == NULL) {
55e303ae 251 return KERN_FAILURE;
91447636
A
252 }
253 target_cpu = this_cpu_datap->cpu_number;
55e303ae
A
254 assert((boot_cpu && (target_cpu == 0)) ||
255 (!boot_cpu && (target_cpu != 0)));
256
257 lapic_cpu_map(lapic_id, target_cpu);
91447636
A
258
259 this_cpu_datap->cpu_id = cpu_id;
260 this_cpu_datap->cpu_phys_number = lapic_id;
261
262 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
263 if (this_cpu_datap->cpu_console_buf == NULL)
264 goto failed;
265
266 if (!boot_cpu) {
267 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
268 if (this_cpu_datap->cpu_pmap == NULL)
269 goto failed;
270
271 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
272 if (this_cpu_datap->cpu_processor == NULL)
273 goto failed;
274 processor_init(this_cpu_datap->cpu_processor, target_cpu);
275 }
276
277 *processor_out = this_cpu_datap->cpu_processor;
55e303ae
A
278 *ipi_handler = NULL;
279
280 return KERN_SUCCESS;
91447636
A
281
282failed:
283 cpu_processor_free(this_cpu_datap->cpu_processor);
284 pmap_cpu_free(this_cpu_datap->cpu_pmap);
285 console_cpu_free(this_cpu_datap->cpu_console_buf);
286 return KERN_FAILURE;
1c79356b
A
287}
288
43866e37 289void
91447636 290ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 291{
55e303ae
A
292 boolean_t os_supports_sse;
293 i386_cpu_info_t *cpuid_infop;
294
91447636 295 if (cpu_infop == NULL)
55e303ae
A
296 return;
297
298 /*
8f6c56a5 299 * Are we supporting XMM/SSE/SSE2?
55e303ae
A
300 * As distinct from whether the cpu has these capabilities.
301 */
302 os_supports_sse = get_cr4() & CR4_XMM;
8f6c56a5 303 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
91447636 304 cpu_infop->vector_unit = 4;
55e303ae 305 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
91447636 306 cpu_infop->vector_unit = 3;
55e303ae 307 else if (cpuid_features() & CPUID_FEATURE_MMX)
91447636 308 cpu_infop->vector_unit = 2;
55e303ae 309 else
91447636 310 cpu_infop->vector_unit = 0;
55e303ae
A
311
312 cpuid_infop = cpuid_info();
313
91447636 314 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 315
91447636
A
316 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
317 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
55e303ae 318
91447636
A
319 if (cpuid_infop->cache_size[L2U] > 0) {
320 cpu_infop->l2_settings = 1;
321 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
322 } else {
323 cpu_infop->l2_settings = 0;
324 cpu_infop->l2_cache_size = 0xFFFFFFFF;
325 }
55e303ae 326
91447636 327 if (cpuid_infop->cache_size[L3U] > 0) {
8f6c56a5
A
328 cpu_infop->l2_settings = 1;
329 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L3U];
91447636
A
330 } else {
331 cpu_infop->l3_settings = 0;
332 cpu_infop->l3_cache_size = 0xFFFFFFFF;
333 }
43866e37
A
334}
335
336void
337ml_init_max_cpus(unsigned long max_cpus)
338{
55e303ae
A
339 boolean_t current_state;
340
341 current_state = ml_set_interrupts_enabled(FALSE);
342 if (max_cpus_initialized != MAX_CPUS_SET) {
91447636
A
343 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
344 /*
345 * Note: max_cpus is the number of enable processors
346 * that ACPI found; max_ncpus is the maximum number
347 * that the kernel supports or that the "cpus="
348 * boot-arg has set. Here we take int minimum.
349 */
350 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
351 }
55e303ae
A
352 if (max_cpus_initialized == MAX_CPUS_WAIT)
353 wakeup((event_t)&max_cpus_initialized);
354 max_cpus_initialized = MAX_CPUS_SET;
355 }
356 (void) ml_set_interrupts_enabled(current_state);
43866e37
A
357}
358
359int
360ml_get_max_cpus(void)
361{
55e303ae 362 boolean_t current_state;
43866e37 363
55e303ae
A
364 current_state = ml_set_interrupts_enabled(FALSE);
365 if (max_cpus_initialized != MAX_CPUS_SET) {
366 max_cpus_initialized = MAX_CPUS_WAIT;
367 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
368 (void)thread_block(THREAD_CONTINUE_NULL);
369 }
370 (void) ml_set_interrupts_enabled(current_state);
371 return(machine_info.max_cpus);
43866e37
A
372}
373
91447636
A
374/*
375 * This is called from the machine-independent routine cpu_up()
376 * to perform machine-dependent info updates. Defer to cpu_thread_init().
377 */
378void
379ml_cpu_up(void)
380{
381 return;
382}
383
384/*
385 * This is called from the machine-independent routine cpu_down()
386 * to perform machine-dependent info updates.
387 */
388void
389ml_cpu_down(void)
390{
391 return;
392}
393
1c79356b
A
394/* Stubs for pc tracing mechanism */
395
396int *pc_trace_buf;
397int pc_trace_cnt = 0;
398
399int
91447636 400set_be_bit(void)
1c79356b
A
401{
402 return(0);
403}
404
405int
91447636 406clr_be_bit(void)
1c79356b
A
407{
408 return(0);
409}
410
411int
91447636 412be_tracing(void)
1c79356b
A
413{
414 return(0);
415}
9bccf70c 416
91447636
A
417/*
418 * The following are required for parts of the kernel
419 * that cannot resolve these functions as inlines:
420 */
421extern thread_t current_act(void);
422thread_t
9bccf70c 423current_act(void)
91447636
A
424{
425 return(current_thread_fast());
426}
55e303ae
A
427
428#undef current_thread
91447636 429extern thread_t current_thread(void);
55e303ae
A
430thread_t
431current_thread(void)
432{
91447636 433 return(current_thread_fast());
55e303ae 434}