2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <i386/machine_routines.h>
29 #include <i386/io_map_entries.h>
30 #include <i386/cpuid.h>
32 #include <kern/processor.h>
33 #include <kern/machine.h>
34 #include <kern/cpu_data.h>
35 #include <kern/cpu_number.h>
36 #include <kern/thread.h>
37 #include <i386/cpu_data.h>
38 #include <i386/machine_cpu.h>
40 #include <i386/mp_events.h>
41 #include <i386/cpu_threads.h>
42 #include <i386/pmap.h>
43 #include <i386/misc_protos.h>
44 #include <mach/vm_param.h>
46 #define MIN(a,b) ((a)<(b)? (a) : (b))
48 extern void initialize_screen(Boot_Video
*, unsigned int);
49 extern void wakeup(void *);
51 static int max_cpus_initialized
= 0;
53 #define MAX_CPUS_SET 0x1
54 #define MAX_CPUS_WAIT 0x2
56 /* IO memory map services */
58 /* Map memory map IO space */
59 vm_offset_t
ml_io_map(
60 vm_offset_t phys_addr
,
63 return(io_map(phys_addr
,size
));
66 /* boot memory allocation */
67 vm_offset_t
ml_static_malloc(
68 __unused vm_size_t size
)
70 return((vm_offset_t
)NULL
);
77 return (vm_offset_t
)((unsigned) paddr
| LINEAR_KERNEL_ADDRESS
);
82 * Routine: ml_static_mfree
90 vm_offset_t vaddr_cur
;
93 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) return;
95 assert((vaddr
& (PAGE_SIZE
-1)) == 0); /* must be page aligned */
97 for (vaddr_cur
= vaddr
;
98 vaddr_cur
< round_page_32(vaddr
+size
);
99 vaddr_cur
+= PAGE_SIZE
) {
100 ppn
= pmap_find_phys(kernel_pmap
, (addr64_t
)vaddr_cur
);
101 if (ppn
!= (vm_offset_t
)NULL
) {
102 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
103 vm_page_create(ppn
,(ppn
+1));
104 vm_page_wire_count
--;
109 /* virtual to physical on wired pages */
110 vm_offset_t
ml_vtophys(
113 return kvtophys(vaddr
);
116 /* Interrupt handling */
118 /* Initialize Interrupts */
119 void ml_init_interrupt(void)
121 (void) ml_set_interrupts_enabled(TRUE
);
124 /* Get Interrupts Enabled */
125 boolean_t
ml_get_interrupts_enabled(void)
129 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
130 return (flags
& EFL_IF
) != 0;
133 /* Set Interrupts Enabled */
134 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
138 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
141 __asm__
volatile("sti");
143 __asm__
volatile("cli");
145 return (flags
& EFL_IF
) != 0;
148 /* Check if running at interrupt context */
149 boolean_t
ml_at_interrupt_context(void)
151 return get_interrupt_level() != 0;
154 /* Generate a fake interrupt */
155 void ml_cause_interrupt(void)
157 panic("ml_cause_interrupt not defined yet on Intel");
160 void ml_thread_policy(
163 unsigned policy_info
)
165 if (policy_id
== MACHINE_GROUP
)
166 thread_bind(thread
, master_processor
);
168 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
169 spl_t s
= splsched();
173 set_priority(thread
, thread
->priority
+ 1);
175 thread_unlock(thread
);
180 /* Initialize Interrupts */
181 void ml_install_interrupt_handler(
185 IOInterruptHandler handler
,
188 boolean_t current_state
;
190 current_state
= ml_get_interrupts_enabled();
192 PE_install_interrupt_handler(nub
, source
, target
,
193 (IOInterruptHandler
) handler
, refCon
);
195 (void) ml_set_interrupts_enabled(current_state
);
197 initialize_screen(0, kPEAcquireScreen
);
203 __asm__
volatile("sti; hlt": : :"memory");
205 void (*cpu_idle_handler
)(void) = cpu_idle
;
210 cpu_core_t
*my_core
= cpu_core();
214 * We halt this cpu thread
215 * unless kernel param idlehalt is false and no other thread
216 * in the same core is active - if so, don't halt so that this
217 * core doesn't go into a low-power mode.
219 others_active
= !atomic_decl_and_test(
220 (long *) &my_core
->active_threads
, 1);
221 if (idlehalt
|| others_active
) {
222 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
224 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
226 __asm__
volatile("sti");
228 atomic_incl((long *) &my_core
->active_threads
, 1);
233 processor_t processor
)
235 cpu_interrupt(PROCESSOR_DATA(processor
, slot_num
));
239 ml_processor_register(
242 processor_t
*processor_out
,
243 ipi_handler_t
*ipi_handler
,
247 cpu_data_t
*this_cpu_datap
;
249 this_cpu_datap
= cpu_data_alloc(boot_cpu
);
250 if (this_cpu_datap
== NULL
) {
253 target_cpu
= this_cpu_datap
->cpu_number
;
254 assert((boot_cpu
&& (target_cpu
== 0)) ||
255 (!boot_cpu
&& (target_cpu
!= 0)));
257 lapic_cpu_map(lapic_id
, target_cpu
);
259 this_cpu_datap
->cpu_id
= cpu_id
;
260 this_cpu_datap
->cpu_phys_number
= lapic_id
;
262 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(boot_cpu
);
263 if (this_cpu_datap
->cpu_console_buf
== NULL
)
267 this_cpu_datap
->cpu_pmap
= pmap_cpu_alloc(boot_cpu
);
268 if (this_cpu_datap
->cpu_pmap
== NULL
)
271 this_cpu_datap
->cpu_processor
= cpu_processor_alloc(boot_cpu
);
272 if (this_cpu_datap
->cpu_processor
== NULL
)
274 processor_init(this_cpu_datap
->cpu_processor
, target_cpu
);
277 *processor_out
= this_cpu_datap
->cpu_processor
;
283 cpu_processor_free(this_cpu_datap
->cpu_processor
);
284 pmap_cpu_free(this_cpu_datap
->cpu_pmap
);
285 console_cpu_free(this_cpu_datap
->cpu_console_buf
);
290 ml_cpu_get_info(ml_cpu_info_t
*cpu_infop
)
292 boolean_t os_supports_sse
;
293 i386_cpu_info_t
*cpuid_infop
;
295 if (cpu_infop
== NULL
)
299 * Are we supporting XMM/SSE/SSE2?
300 * As distinct from whether the cpu has these capabilities.
302 os_supports_sse
= get_cr4() & CR4_XMM
;
303 if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
304 cpu_infop
->vector_unit
= 4;
305 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
306 cpu_infop
->vector_unit
= 3;
307 else if (cpuid_features() & CPUID_FEATURE_MMX
)
308 cpu_infop
->vector_unit
= 2;
310 cpu_infop
->vector_unit
= 0;
312 cpuid_infop
= cpuid_info();
314 cpu_infop
->cache_line_size
= cpuid_infop
->cache_linesize
;
316 cpu_infop
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
317 cpu_infop
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
319 if (cpuid_infop
->cache_size
[L2U
] > 0) {
320 cpu_infop
->l2_settings
= 1;
321 cpu_infop
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
323 cpu_infop
->l2_settings
= 0;
324 cpu_infop
->l2_cache_size
= 0xFFFFFFFF;
327 if (cpuid_infop
->cache_size
[L3U
] > 0) {
328 cpu_infop
->l2_settings
= 1;
329 cpu_infop
->l2_cache_size
= cpuid_infop
->cache_size
[L3U
];
331 cpu_infop
->l3_settings
= 0;
332 cpu_infop
->l3_cache_size
= 0xFFFFFFFF;
337 ml_init_max_cpus(unsigned long max_cpus
)
339 boolean_t current_state
;
341 current_state
= ml_set_interrupts_enabled(FALSE
);
342 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
343 if (max_cpus
> 0 && max_cpus
<= MAX_CPUS
) {
345 * Note: max_cpus is the number of enable processors
346 * that ACPI found; max_ncpus is the maximum number
347 * that the kernel supports or that the "cpus="
348 * boot-arg has set. Here we take int minimum.
350 machine_info
.max_cpus
= MIN(max_cpus
, max_ncpus
);
352 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
353 wakeup((event_t
)&max_cpus_initialized
);
354 max_cpus_initialized
= MAX_CPUS_SET
;
356 (void) ml_set_interrupts_enabled(current_state
);
360 ml_get_max_cpus(void)
362 boolean_t current_state
;
364 current_state
= ml_set_interrupts_enabled(FALSE
);
365 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
366 max_cpus_initialized
= MAX_CPUS_WAIT
;
367 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
368 (void)thread_block(THREAD_CONTINUE_NULL
);
370 (void) ml_set_interrupts_enabled(current_state
);
371 return(machine_info
.max_cpus
);
375 * This is called from the machine-independent routine cpu_up()
376 * to perform machine-dependent info updates. Defer to cpu_thread_init().
385 * This is called from the machine-independent routine cpu_down()
386 * to perform machine-dependent info updates.
394 /* Stubs for pc tracing mechanism */
397 int pc_trace_cnt
= 0;
418 * The following are required for parts of the kernel
419 * that cannot resolve these functions as inlines:
421 extern thread_t
current_act(void);
425 return(current_thread_fast());
428 #undef current_thread
429 extern thread_t
current_thread(void);
433 return(current_thread_fast());