]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <i386/machine_routines.h>
29 #include <i386/io_map_entries.h>
30 #include <i386/cpuid.h>
31 #include <i386/fpu.h>
32 #include <kern/processor.h>
33 #include <kern/machine.h>
34 #include <kern/cpu_data.h>
35 #include <kern/cpu_number.h>
36 #include <kern/thread.h>
37 #include <i386/cpu_data.h>
38 #include <i386/machine_cpu.h>
39 #include <i386/mp.h>
40 #include <i386/mp_events.h>
41 #include <i386/cpu_threads.h>
42 #include <i386/pmap.h>
43 #include <i386/misc_protos.h>
44 #include <mach/vm_param.h>
45
46 #define MIN(a,b) ((a)<(b)? (a) : (b))
47
48 extern void initialize_screen(Boot_Video *, unsigned int);
49 extern void wakeup(void *);
50
51 static int max_cpus_initialized = 0;
52
53 #define MAX_CPUS_SET 0x1
54 #define MAX_CPUS_WAIT 0x2
55
56 /* IO memory map services */
57
58 /* Map memory map IO space */
59 vm_offset_t ml_io_map(
60 vm_offset_t phys_addr,
61 vm_size_t size)
62 {
63 return(io_map(phys_addr,size));
64 }
65
66 /* boot memory allocation */
67 vm_offset_t ml_static_malloc(
68 __unused vm_size_t size)
69 {
70 return((vm_offset_t)NULL);
71 }
72
73 vm_offset_t
74 ml_static_ptovirt(
75 vm_offset_t paddr)
76 {
77 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
78 }
79
80
81 /*
82 * Routine: ml_static_mfree
83 * Function:
84 */
85 void
86 ml_static_mfree(
87 vm_offset_t vaddr,
88 vm_size_t size)
89 {
90 vm_offset_t vaddr_cur;
91 ppnum_t ppn;
92
93 if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
94
95 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
96
97 for (vaddr_cur = vaddr;
98 vaddr_cur < round_page_32(vaddr+size);
99 vaddr_cur += PAGE_SIZE) {
100 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
101 if (ppn != (vm_offset_t)NULL) {
102 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
103 vm_page_create(ppn,(ppn+1));
104 vm_page_wire_count--;
105 }
106 }
107 }
108
109 /* virtual to physical on wired pages */
110 vm_offset_t ml_vtophys(
111 vm_offset_t vaddr)
112 {
113 return kvtophys(vaddr);
114 }
115
116 /* Interrupt handling */
117
118 /* Initialize Interrupts */
119 void ml_init_interrupt(void)
120 {
121 (void) ml_set_interrupts_enabled(TRUE);
122 }
123
124 /* Get Interrupts Enabled */
125 boolean_t ml_get_interrupts_enabled(void)
126 {
127 unsigned long flags;
128
129 __asm__ volatile("pushf; popl %0" : "=r" (flags));
130 return (flags & EFL_IF) != 0;
131 }
132
133 /* Set Interrupts Enabled */
134 boolean_t ml_set_interrupts_enabled(boolean_t enable)
135 {
136 unsigned long flags;
137
138 __asm__ volatile("pushf; popl %0" : "=r" (flags));
139
140 if (enable)
141 __asm__ volatile("sti");
142 else
143 __asm__ volatile("cli");
144
145 return (flags & EFL_IF) != 0;
146 }
147
148 /* Check if running at interrupt context */
149 boolean_t ml_at_interrupt_context(void)
150 {
151 return get_interrupt_level() != 0;
152 }
153
154 /* Generate a fake interrupt */
155 void ml_cause_interrupt(void)
156 {
157 panic("ml_cause_interrupt not defined yet on Intel");
158 }
159
160 void ml_thread_policy(
161 thread_t thread,
162 unsigned policy_id,
163 unsigned policy_info)
164 {
165 if (policy_id == MACHINE_GROUP)
166 thread_bind(thread, master_processor);
167
168 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
169 spl_t s = splsched();
170
171 thread_lock(thread);
172
173 set_priority(thread, thread->priority + 1);
174
175 thread_unlock(thread);
176 splx(s);
177 }
178 }
179
180 /* Initialize Interrupts */
181 void ml_install_interrupt_handler(
182 void *nub,
183 int source,
184 void *target,
185 IOInterruptHandler handler,
186 void *refCon)
187 {
188 boolean_t current_state;
189
190 current_state = ml_get_interrupts_enabled();
191
192 PE_install_interrupt_handler(nub, source, target,
193 (IOInterruptHandler) handler, refCon);
194
195 (void) ml_set_interrupts_enabled(current_state);
196
197 initialize_screen(0, kPEAcquireScreen);
198 }
199
200 static void
201 cpu_idle(void)
202 {
203 __asm__ volatile("sti; hlt": : :"memory");
204 }
205 void (*cpu_idle_handler)(void) = cpu_idle;
206
207 void
208 machine_idle(void)
209 {
210 cpu_core_t *my_core = cpu_core();
211 int others_active;
212
213 /*
214 * We halt this cpu thread
215 * unless kernel param idlehalt is false and no other thread
216 * in the same core is active - if so, don't halt so that this
217 * core doesn't go into a low-power mode.
218 */
219 others_active = !atomic_decl_and_test(
220 (long *) &my_core->active_threads, 1);
221 if (idlehalt || others_active) {
222 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
223 cpu_idle_handler();
224 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
225 } else {
226 __asm__ volatile("sti");
227 }
228 atomic_incl((long *) &my_core->active_threads, 1);
229 }
230
231 void
232 machine_signal_idle(
233 processor_t processor)
234 {
235 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
236 }
237
238 kern_return_t
239 ml_processor_register(
240 cpu_id_t cpu_id,
241 uint32_t lapic_id,
242 processor_t *processor_out,
243 ipi_handler_t *ipi_handler,
244 boolean_t boot_cpu)
245 {
246 int target_cpu;
247 cpu_data_t *this_cpu_datap;
248
249 this_cpu_datap = cpu_data_alloc(boot_cpu);
250 if (this_cpu_datap == NULL) {
251 return KERN_FAILURE;
252 }
253 target_cpu = this_cpu_datap->cpu_number;
254 assert((boot_cpu && (target_cpu == 0)) ||
255 (!boot_cpu && (target_cpu != 0)));
256
257 lapic_cpu_map(lapic_id, target_cpu);
258
259 this_cpu_datap->cpu_id = cpu_id;
260 this_cpu_datap->cpu_phys_number = lapic_id;
261
262 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
263 if (this_cpu_datap->cpu_console_buf == NULL)
264 goto failed;
265
266 if (!boot_cpu) {
267 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
268 if (this_cpu_datap->cpu_pmap == NULL)
269 goto failed;
270
271 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
272 if (this_cpu_datap->cpu_processor == NULL)
273 goto failed;
274 processor_init(this_cpu_datap->cpu_processor, target_cpu);
275 }
276
277 *processor_out = this_cpu_datap->cpu_processor;
278 *ipi_handler = NULL;
279
280 return KERN_SUCCESS;
281
282 failed:
283 cpu_processor_free(this_cpu_datap->cpu_processor);
284 pmap_cpu_free(this_cpu_datap->cpu_pmap);
285 console_cpu_free(this_cpu_datap->cpu_console_buf);
286 return KERN_FAILURE;
287 }
288
289 void
290 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
291 {
292 boolean_t os_supports_sse;
293 i386_cpu_info_t *cpuid_infop;
294
295 if (cpu_infop == NULL)
296 return;
297
298 /*
299 * Are we supporting XMM/SSE/SSE2?
300 * As distinct from whether the cpu has these capabilities.
301 */
302 os_supports_sse = get_cr4() & CR4_XMM;
303 if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
304 cpu_infop->vector_unit = 4;
305 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
306 cpu_infop->vector_unit = 3;
307 else if (cpuid_features() & CPUID_FEATURE_MMX)
308 cpu_infop->vector_unit = 2;
309 else
310 cpu_infop->vector_unit = 0;
311
312 cpuid_infop = cpuid_info();
313
314 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
315
316 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
317 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
318
319 if (cpuid_infop->cache_size[L2U] > 0) {
320 cpu_infop->l2_settings = 1;
321 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
322 } else {
323 cpu_infop->l2_settings = 0;
324 cpu_infop->l2_cache_size = 0xFFFFFFFF;
325 }
326
327 if (cpuid_infop->cache_size[L3U] > 0) {
328 cpu_infop->l2_settings = 1;
329 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L3U];
330 } else {
331 cpu_infop->l3_settings = 0;
332 cpu_infop->l3_cache_size = 0xFFFFFFFF;
333 }
334 }
335
336 void
337 ml_init_max_cpus(unsigned long max_cpus)
338 {
339 boolean_t current_state;
340
341 current_state = ml_set_interrupts_enabled(FALSE);
342 if (max_cpus_initialized != MAX_CPUS_SET) {
343 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
344 /*
345 * Note: max_cpus is the number of enable processors
346 * that ACPI found; max_ncpus is the maximum number
347 * that the kernel supports or that the "cpus="
348 * boot-arg has set. Here we take int minimum.
349 */
350 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
351 }
352 if (max_cpus_initialized == MAX_CPUS_WAIT)
353 wakeup((event_t)&max_cpus_initialized);
354 max_cpus_initialized = MAX_CPUS_SET;
355 }
356 (void) ml_set_interrupts_enabled(current_state);
357 }
358
359 int
360 ml_get_max_cpus(void)
361 {
362 boolean_t current_state;
363
364 current_state = ml_set_interrupts_enabled(FALSE);
365 if (max_cpus_initialized != MAX_CPUS_SET) {
366 max_cpus_initialized = MAX_CPUS_WAIT;
367 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
368 (void)thread_block(THREAD_CONTINUE_NULL);
369 }
370 (void) ml_set_interrupts_enabled(current_state);
371 return(machine_info.max_cpus);
372 }
373
374 /*
375 * This is called from the machine-independent routine cpu_up()
376 * to perform machine-dependent info updates. Defer to cpu_thread_init().
377 */
378 void
379 ml_cpu_up(void)
380 {
381 return;
382 }
383
384 /*
385 * This is called from the machine-independent routine cpu_down()
386 * to perform machine-dependent info updates.
387 */
388 void
389 ml_cpu_down(void)
390 {
391 return;
392 }
393
394 /* Stubs for pc tracing mechanism */
395
396 int *pc_trace_buf;
397 int pc_trace_cnt = 0;
398
399 int
400 set_be_bit(void)
401 {
402 return(0);
403 }
404
405 int
406 clr_be_bit(void)
407 {
408 return(0);
409 }
410
411 int
412 be_tracing(void)
413 {
414 return(0);
415 }
416
417 /*
418 * The following are required for parts of the kernel
419 * that cannot resolve these functions as inlines:
420 */
421 extern thread_t current_act(void);
422 thread_t
423 current_act(void)
424 {
425 return(current_thread_fast());
426 }
427
428 #undef current_thread
429 extern thread_t current_thread(void);
430 thread_t
431 current_thread(void)
432 {
433 return(current_thread_fast());
434 }