]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
2d21ac55 | 28 | |
1c79356b A |
29 | #include <i386/machine_routines.h> |
30 | #include <i386/io_map_entries.h> | |
55e303ae A |
31 | #include <i386/cpuid.h> |
32 | #include <i386/fpu.h> | |
2d21ac55 | 33 | #include <mach/processor.h> |
55e303ae | 34 | #include <kern/processor.h> |
91447636 | 35 | #include <kern/machine.h> |
1c79356b | 36 | #include <kern/cpu_data.h> |
91447636 A |
37 | #include <kern/cpu_number.h> |
38 | #include <kern/thread.h> | |
39236c6e | 39 | #include <kern/thread_call.h> |
fe8ab488 | 40 | #include <prng/random.h> |
55e303ae | 41 | #include <i386/machine_cpu.h> |
593a1d5f | 42 | #include <i386/lapic.h> |
fe8ab488 | 43 | #include <i386/bit_routines.h> |
55e303ae | 44 | #include <i386/mp_events.h> |
0c530ab8 | 45 | #include <i386/pmCPU.h> |
6d2010ae | 46 | #include <i386/trap.h> |
2d21ac55 A |
47 | #include <i386/tsc.h> |
48 | #include <i386/cpu_threads.h> | |
b0d623f7 | 49 | #include <i386/proc_reg.h> |
91447636 | 50 | #include <mach/vm_param.h> |
b0d623f7 | 51 | #include <i386/pmap.h> |
316670eb | 52 | #include <i386/pmap_internal.h> |
b0d623f7 | 53 | #include <i386/misc_protos.h> |
39236c6e A |
54 | #include <kern/timer_queue.h> |
55 | #if KPC | |
56 | #include <kern/kpc.h> | |
57 | #endif | |
fe8ab488 | 58 | #include <architecture/i386/pio.h> |
91447636 | 59 | |
0c530ab8 A |
60 | #if DEBUG |
61 | #define DBG(x...) kprintf("DBG: " x) | |
62 | #else | |
63 | #define DBG(x...) | |
64 | #endif | |
65 | ||
91447636 | 66 | extern void wakeup(void *); |
55e303ae A |
67 | |
68 | static int max_cpus_initialized = 0; | |
69 | ||
2d21ac55 | 70 | unsigned int LockTimeOut; |
fe8ab488 | 71 | unsigned int TLBTimeOut; |
2d21ac55 A |
72 | unsigned int LockTimeOutTSC; |
73 | unsigned int MutexSpin; | |
b0d623f7 | 74 | uint64_t LastDebuggerEntryAllowance; |
316670eb | 75 | uint64_t delay_spin_threshold; |
0c530ab8 | 76 | |
6d2010ae A |
77 | extern uint64_t panic_restart_timeout; |
78 | ||
79 | boolean_t virtualized = FALSE; | |
80 | ||
39236c6e A |
81 | decl_simple_lock_data(static, ml_timer_evaluation_slock); |
82 | uint32_t ml_timer_eager_evaluations; | |
83 | uint64_t ml_timer_eager_evaluation_max; | |
84 | static boolean_t ml_timer_evaluation_in_progress = FALSE; | |
85 | ||
86 | ||
55e303ae A |
87 | #define MAX_CPUS_SET 0x1 |
88 | #define MAX_CPUS_WAIT 0x2 | |
1c79356b A |
89 | |
90 | /* IO memory map services */ | |
91 | ||
92 | /* Map memory map IO space */ | |
93 | vm_offset_t ml_io_map( | |
94 | vm_offset_t phys_addr, | |
95 | vm_size_t size) | |
96 | { | |
0c530ab8 | 97 | return(io_map(phys_addr,size,VM_WIMG_IO)); |
1c79356b A |
98 | } |
99 | ||
100 | /* boot memory allocation */ | |
101 | vm_offset_t ml_static_malloc( | |
91447636 | 102 | __unused vm_size_t size) |
1c79356b A |
103 | { |
104 | return((vm_offset_t)NULL); | |
105 | } | |
106 | ||
0c530ab8 A |
107 | |
108 | void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size) | |
109 | { | |
0b4c1975 A |
110 | *phys_addr = 0; |
111 | *size = 0; | |
0c530ab8 A |
112 | } |
113 | ||
114 | ||
1c79356b A |
115 | vm_offset_t |
116 | ml_static_ptovirt( | |
117 | vm_offset_t paddr) | |
118 | { | |
b0d623f7 A |
119 | #if defined(__x86_64__) |
120 | return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS); | |
121 | #else | |
122 | return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS); | |
123 | #endif | |
1c79356b A |
124 | } |
125 | ||
91447636 A |
126 | |
127 | /* | |
128 | * Routine: ml_static_mfree | |
129 | * Function: | |
130 | */ | |
1c79356b A |
131 | void |
132 | ml_static_mfree( | |
91447636 A |
133 | vm_offset_t vaddr, |
134 | vm_size_t size) | |
1c79356b | 135 | { |
b0d623f7 | 136 | addr64_t vaddr_cur; |
91447636 | 137 | ppnum_t ppn; |
316670eb | 138 | uint32_t freed_pages = 0; |
b0d623f7 | 139 | assert(vaddr >= VM_MIN_KERNEL_ADDRESS); |
91447636 A |
140 | |
141 | assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */ | |
142 | ||
143 | for (vaddr_cur = vaddr; | |
316670eb | 144 | vaddr_cur < round_page_64(vaddr+size); |
91447636 | 145 | vaddr_cur += PAGE_SIZE) { |
b0d623f7 | 146 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
91447636 | 147 | if (ppn != (vm_offset_t)NULL) { |
2d21ac55 A |
148 | kernel_pmap->stats.resident_count++; |
149 | if (kernel_pmap->stats.resident_count > | |
150 | kernel_pmap->stats.resident_max) { | |
151 | kernel_pmap->stats.resident_max = | |
152 | kernel_pmap->stats.resident_count; | |
153 | } | |
b0d623f7 | 154 | pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE); |
316670eb A |
155 | assert(pmap_valid_page(ppn)); |
156 | ||
157 | if (IS_MANAGED_PAGE(ppn)) { | |
158 | vm_page_create(ppn,(ppn+1)); | |
159 | vm_page_wire_count--; | |
160 | freed_pages++; | |
161 | } | |
91447636 A |
162 | } |
163 | } | |
316670eb A |
164 | #if DEBUG |
165 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); | |
166 | #endif | |
1c79356b A |
167 | } |
168 | ||
0c530ab8 | 169 | |
1c79356b A |
170 | /* virtual to physical on wired pages */ |
171 | vm_offset_t ml_vtophys( | |
172 | vm_offset_t vaddr) | |
173 | { | |
b0d623f7 | 174 | return (vm_offset_t)kvtophys(vaddr); |
1c79356b A |
175 | } |
176 | ||
2d21ac55 A |
177 | /* |
178 | * Routine: ml_nofault_copy | |
179 | * Function: Perform a physical mode copy if the source and | |
180 | * destination have valid translations in the kernel pmap. | |
181 | * If translations are present, they are assumed to | |
182 | * be wired; i.e. no attempt is made to guarantee that the | |
183 | * translations obtained remained valid for | |
184 | * the duration of the copy process. | |
185 | */ | |
186 | ||
187 | vm_size_t ml_nofault_copy( | |
188 | vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) | |
189 | { | |
190 | addr64_t cur_phys_dst, cur_phys_src; | |
191 | uint32_t count, nbytes = 0; | |
192 | ||
193 | while (size > 0) { | |
194 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
195 | break; | |
196 | if (!(cur_phys_dst = kvtophys(virtdst))) | |
197 | break; | |
198 | if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) | |
199 | break; | |
b0d623f7 | 200 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
2d21ac55 | 201 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) |
b0d623f7 | 202 | count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); |
2d21ac55 | 203 | if (count > size) |
b0d623f7 | 204 | count = (uint32_t)size; |
2d21ac55 A |
205 | |
206 | bcopy_phys(cur_phys_src, cur_phys_dst, count); | |
207 | ||
208 | nbytes += count; | |
209 | virtsrc += count; | |
210 | virtdst += count; | |
211 | size -= count; | |
212 | } | |
213 | ||
214 | return nbytes; | |
215 | } | |
216 | ||
39236c6e A |
217 | /* |
218 | * Routine: ml_validate_nofault | |
219 | * Function: Validate that ths address range has a valid translations | |
220 | * in the kernel pmap. If translations are present, they are | |
221 | * assumed to be wired; i.e. no attempt is made to guarantee | |
222 | * that the translation persist after the check. | |
223 | * Returns: TRUE if the range is mapped and will not cause a fault, | |
224 | * FALSE otherwise. | |
225 | */ | |
226 | ||
227 | boolean_t ml_validate_nofault( | |
228 | vm_offset_t virtsrc, vm_size_t size) | |
229 | { | |
230 | addr64_t cur_phys_src; | |
231 | uint32_t count; | |
232 | ||
233 | while (size > 0) { | |
234 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
235 | return FALSE; | |
236 | if (!pmap_valid_page(i386_btop(cur_phys_src))) | |
237 | return FALSE; | |
238 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); | |
239 | if (count > size) | |
240 | count = (uint32_t)size; | |
241 | ||
242 | virtsrc += count; | |
243 | size -= count; | |
244 | } | |
245 | ||
246 | return TRUE; | |
247 | } | |
248 | ||
1c79356b A |
249 | /* Interrupt handling */ |
250 | ||
55e303ae A |
251 | /* Initialize Interrupts */ |
252 | void ml_init_interrupt(void) | |
253 | { | |
254 | (void) ml_set_interrupts_enabled(TRUE); | |
255 | } | |
256 | ||
b0d623f7 | 257 | |
1c79356b A |
258 | /* Get Interrupts Enabled */ |
259 | boolean_t ml_get_interrupts_enabled(void) | |
260 | { | |
261 | unsigned long flags; | |
262 | ||
b0d623f7 | 263 | __asm__ volatile("pushf; pop %0" : "=r" (flags)); |
1c79356b A |
264 | return (flags & EFL_IF) != 0; |
265 | } | |
266 | ||
267 | /* Set Interrupts Enabled */ | |
268 | boolean_t ml_set_interrupts_enabled(boolean_t enable) | |
269 | { | |
6d2010ae A |
270 | unsigned long flags; |
271 | boolean_t istate; | |
272 | ||
273 | __asm__ volatile("pushf; pop %0" : "=r" (flags)); | |
1c79356b | 274 | |
39236c6e A |
275 | assert(get_interrupt_level() ? (enable == FALSE) : TRUE); |
276 | ||
6d2010ae | 277 | istate = ((flags & EFL_IF) != 0); |
0c530ab8 | 278 | |
6d2010ae A |
279 | if (enable) { |
280 | __asm__ volatile("sti;nop"); | |
0c530ab8 | 281 | |
6d2010ae | 282 | if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) |
fe8ab488 | 283 | __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); |
6d2010ae A |
284 | } |
285 | else { | |
286 | if (istate) | |
287 | __asm__ volatile("cli"); | |
0c530ab8 | 288 | } |
1c79356b | 289 | |
6d2010ae | 290 | return istate; |
1c79356b A |
291 | } |
292 | ||
293 | /* Check if running at interrupt context */ | |
294 | boolean_t ml_at_interrupt_context(void) | |
295 | { | |
296 | return get_interrupt_level() != 0; | |
297 | } | |
298 | ||
4b17d6b6 A |
299 | void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { |
300 | *icp = (get_interrupt_level() != 0); | |
301 | /* These will be technically inaccurate for interrupts that occur | |
302 | * successively within a single "idle exit" event, but shouldn't | |
303 | * matter statistically. | |
304 | */ | |
305 | *pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage); | |
306 | } | |
307 | ||
1c79356b A |
308 | /* Generate a fake interrupt */ |
309 | void ml_cause_interrupt(void) | |
310 | { | |
311 | panic("ml_cause_interrupt not defined yet on Intel"); | |
312 | } | |
313 | ||
fe8ab488 A |
314 | /* |
315 | * TODO: transition users of this to kernel_thread_start_priority | |
316 | * ml_thread_policy is an unsupported KPI | |
317 | */ | |
d52fe63f A |
318 | void ml_thread_policy( |
319 | thread_t thread, | |
2d21ac55 | 320 | __unused unsigned policy_id, |
d52fe63f A |
321 | unsigned policy_info) |
322 | { | |
55e303ae | 323 | if (policy_info & MACHINE_NETWORK_WORKLOOP) { |
fe8ab488 A |
324 | thread_precedence_policy_data_t info; |
325 | __assert_only kern_return_t kret; | |
55e303ae | 326 | |
fe8ab488 | 327 | info.importance = 1; |
55e303ae | 328 | |
fe8ab488 A |
329 | kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY, |
330 | (thread_policy_t)&info, | |
331 | THREAD_PRECEDENCE_POLICY_COUNT); | |
332 | assert(kret == KERN_SUCCESS); | |
55e303ae | 333 | } |
d52fe63f A |
334 | } |
335 | ||
1c79356b A |
336 | /* Initialize Interrupts */ |
337 | void ml_install_interrupt_handler( | |
338 | void *nub, | |
339 | int source, | |
340 | void *target, | |
341 | IOInterruptHandler handler, | |
342 | void *refCon) | |
343 | { | |
344 | boolean_t current_state; | |
345 | ||
346 | current_state = ml_get_interrupts_enabled(); | |
347 | ||
348 | PE_install_interrupt_handler(nub, source, target, | |
349 | (IOInterruptHandler) handler, refCon); | |
350 | ||
351 | (void) ml_set_interrupts_enabled(current_state); | |
55e303ae | 352 | |
2d21ac55 | 353 | initialize_screen(NULL, kPEAcquireScreen); |
55e303ae A |
354 | } |
355 | ||
91447636 | 356 | |
1c79356b A |
357 | void |
358 | machine_signal_idle( | |
359 | processor_t processor) | |
360 | { | |
b0d623f7 | 361 | cpu_interrupt(processor->cpu_id); |
55e303ae A |
362 | } |
363 | ||
b0d623f7 A |
364 | static kern_return_t |
365 | register_cpu( | |
366 | uint32_t lapic_id, | |
367 | processor_t *processor_out, | |
368 | boolean_t boot_cpu ) | |
55e303ae | 369 | { |
55e303ae | 370 | int target_cpu; |
91447636 | 371 | cpu_data_t *this_cpu_datap; |
55e303ae | 372 | |
91447636 A |
373 | this_cpu_datap = cpu_data_alloc(boot_cpu); |
374 | if (this_cpu_datap == NULL) { | |
55e303ae | 375 | return KERN_FAILURE; |
91447636 A |
376 | } |
377 | target_cpu = this_cpu_datap->cpu_number; | |
55e303ae A |
378 | assert((boot_cpu && (target_cpu == 0)) || |
379 | (!boot_cpu && (target_cpu != 0))); | |
380 | ||
381 | lapic_cpu_map(lapic_id, target_cpu); | |
91447636 | 382 | |
b0d623f7 A |
383 | /* The cpu_id is not known at registration phase. Just do |
384 | * lapic_id for now | |
385 | */ | |
91447636 A |
386 | this_cpu_datap->cpu_phys_number = lapic_id; |
387 | ||
388 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu); | |
389 | if (this_cpu_datap->cpu_console_buf == NULL) | |
390 | goto failed; | |
391 | ||
0c530ab8 A |
392 | this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu); |
393 | if (this_cpu_datap->cpu_chud == NULL) | |
394 | goto failed; | |
395 | ||
39236c6e A |
396 | #if KPC |
397 | this_cpu_datap->cpu_kpc_buf[0] = kpc_counterbuf_alloc(); | |
398 | if(this_cpu_datap->cpu_kpc_buf[0] == NULL ) | |
399 | goto failed; | |
400 | this_cpu_datap->cpu_kpc_buf[1] = kpc_counterbuf_alloc(); | |
401 | if(this_cpu_datap->cpu_kpc_buf[1] == NULL ) | |
402 | goto failed; | |
403 | ||
404 | this_cpu_datap->cpu_kpc_shadow = kpc_counterbuf_alloc(); | |
405 | if(this_cpu_datap->cpu_kpc_shadow == NULL ) | |
406 | goto failed; | |
407 | ||
408 | this_cpu_datap->cpu_kpc_reload = kpc_counterbuf_alloc(); | |
409 | if(this_cpu_datap->cpu_kpc_reload == NULL ) | |
410 | goto failed; | |
411 | #endif | |
412 | ||
91447636 | 413 | if (!boot_cpu) { |
593a1d5f | 414 | cpu_thread_alloc(this_cpu_datap->cpu_number); |
2d21ac55 A |
415 | if (this_cpu_datap->lcpu.core == NULL) |
416 | goto failed; | |
417 | ||
b0d623f7 | 418 | #if NCOPY_WINDOWS > 0 |
91447636 A |
419 | this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu); |
420 | if (this_cpu_datap->cpu_pmap == NULL) | |
421 | goto failed; | |
b0d623f7 | 422 | #endif |
91447636 A |
423 | |
424 | this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu); | |
425 | if (this_cpu_datap->cpu_processor == NULL) | |
426 | goto failed; | |
2d21ac55 A |
427 | /* |
428 | * processor_init() deferred to topology start | |
429 | * because "slot numbers" a.k.a. logical processor numbers | |
430 | * are not yet finalized. | |
431 | */ | |
91447636 A |
432 | } |
433 | ||
434 | *processor_out = this_cpu_datap->cpu_processor; | |
2d21ac55 | 435 | |
55e303ae | 436 | return KERN_SUCCESS; |
91447636 A |
437 | |
438 | failed: | |
439 | cpu_processor_free(this_cpu_datap->cpu_processor); | |
b0d623f7 | 440 | #if NCOPY_WINDOWS > 0 |
91447636 | 441 | pmap_cpu_free(this_cpu_datap->cpu_pmap); |
b0d623f7 | 442 | #endif |
0c530ab8 | 443 | chudxnu_cpu_free(this_cpu_datap->cpu_chud); |
91447636 | 444 | console_cpu_free(this_cpu_datap->cpu_console_buf); |
39236c6e A |
445 | #if KPC |
446 | kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[0]); | |
447 | kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[1]); | |
448 | kpc_counterbuf_free(this_cpu_datap->cpu_kpc_shadow); | |
449 | kpc_counterbuf_free(this_cpu_datap->cpu_kpc_reload); | |
450 | #endif | |
451 | ||
91447636 | 452 | return KERN_FAILURE; |
1c79356b A |
453 | } |
454 | ||
b0d623f7 A |
455 | |
456 | kern_return_t | |
457 | ml_processor_register( | |
458 | cpu_id_t cpu_id, | |
459 | uint32_t lapic_id, | |
460 | processor_t *processor_out, | |
461 | boolean_t boot_cpu, | |
462 | boolean_t start ) | |
463 | { | |
464 | static boolean_t done_topo_sort = FALSE; | |
465 | static uint32_t num_registered = 0; | |
466 | ||
467 | /* Register all CPUs first, and track max */ | |
468 | if( start == FALSE ) | |
469 | { | |
470 | num_registered++; | |
471 | ||
472 | DBG( "registering CPU lapic id %d\n", lapic_id ); | |
473 | ||
474 | return register_cpu( lapic_id, processor_out, boot_cpu ); | |
475 | } | |
476 | ||
477 | /* Sort by topology before we start anything */ | |
478 | if( !done_topo_sort ) | |
479 | { | |
480 | DBG( "about to start CPUs. %d registered\n", num_registered ); | |
481 | ||
482 | cpu_topology_sort( num_registered ); | |
483 | done_topo_sort = TRUE; | |
484 | } | |
485 | ||
486 | /* Assign the cpu ID */ | |
487 | uint32_t cpunum = -1; | |
488 | cpu_data_t *this_cpu_datap = NULL; | |
489 | ||
490 | /* find cpu num and pointer */ | |
491 | cpunum = ml_get_cpuid( lapic_id ); | |
492 | ||
493 | if( cpunum == 0xFFFFFFFF ) /* never heard of it? */ | |
494 | panic( "trying to start invalid/unregistered CPU %d\n", lapic_id ); | |
495 | ||
496 | this_cpu_datap = cpu_datap(cpunum); | |
497 | ||
498 | /* fix the CPU id */ | |
499 | this_cpu_datap->cpu_id = cpu_id; | |
500 | ||
fe8ab488 A |
501 | /* allocate and initialize other per-cpu structures */ |
502 | if (!boot_cpu) { | |
503 | mp_cpus_call_cpu_init(cpunum); | |
504 | prng_cpu_init(cpunum); | |
505 | } | |
506 | ||
b0d623f7 A |
507 | /* output arg */ |
508 | *processor_out = this_cpu_datap->cpu_processor; | |
509 | ||
510 | /* OK, try and start this CPU */ | |
511 | return cpu_topology_start_cpu( cpunum ); | |
512 | } | |
513 | ||
514 | ||
43866e37 | 515 | void |
91447636 | 516 | ml_cpu_get_info(ml_cpu_info_t *cpu_infop) |
43866e37 | 517 | { |
55e303ae A |
518 | boolean_t os_supports_sse; |
519 | i386_cpu_info_t *cpuid_infop; | |
520 | ||
91447636 | 521 | if (cpu_infop == NULL) |
55e303ae A |
522 | return; |
523 | ||
524 | /* | |
0c530ab8 | 525 | * Are we supporting MMX/SSE/SSE2/SSE3? |
55e303ae A |
526 | * As distinct from whether the cpu has these capabilities. |
527 | */ | |
060df5ea | 528 | os_supports_sse = !!(get_cr4() & CR4_OSXMM); |
6d2010ae A |
529 | |
530 | if (ml_fpu_avx_enabled()) | |
531 | cpu_infop->vector_unit = 9; | |
532 | else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) | |
2d21ac55 A |
533 | cpu_infop->vector_unit = 8; |
534 | else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) | |
535 | cpu_infop->vector_unit = 7; | |
536 | else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) | |
0c530ab8 A |
537 | cpu_infop->vector_unit = 6; |
538 | else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) | |
539 | cpu_infop->vector_unit = 5; | |
540 | else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) | |
91447636 | 541 | cpu_infop->vector_unit = 4; |
55e303ae | 542 | else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) |
91447636 | 543 | cpu_infop->vector_unit = 3; |
55e303ae | 544 | else if (cpuid_features() & CPUID_FEATURE_MMX) |
91447636 | 545 | cpu_infop->vector_unit = 2; |
55e303ae | 546 | else |
91447636 | 547 | cpu_infop->vector_unit = 0; |
55e303ae A |
548 | |
549 | cpuid_infop = cpuid_info(); | |
550 | ||
91447636 | 551 | cpu_infop->cache_line_size = cpuid_infop->cache_linesize; |
55e303ae | 552 | |
91447636 A |
553 | cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I]; |
554 | cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D]; | |
55e303ae | 555 | |
91447636 A |
556 | if (cpuid_infop->cache_size[L2U] > 0) { |
557 | cpu_infop->l2_settings = 1; | |
558 | cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U]; | |
559 | } else { | |
560 | cpu_infop->l2_settings = 0; | |
561 | cpu_infop->l2_cache_size = 0xFFFFFFFF; | |
562 | } | |
55e303ae | 563 | |
91447636 | 564 | if (cpuid_infop->cache_size[L3U] > 0) { |
0c530ab8 A |
565 | cpu_infop->l3_settings = 1; |
566 | cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U]; | |
91447636 A |
567 | } else { |
568 | cpu_infop->l3_settings = 0; | |
569 | cpu_infop->l3_cache_size = 0xFFFFFFFF; | |
570 | } | |
43866e37 A |
571 | } |
572 | ||
573 | void | |
574 | ml_init_max_cpus(unsigned long max_cpus) | |
575 | { | |
55e303ae A |
576 | boolean_t current_state; |
577 | ||
578 | current_state = ml_set_interrupts_enabled(FALSE); | |
579 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
91447636 A |
580 | if (max_cpus > 0 && max_cpus <= MAX_CPUS) { |
581 | /* | |
2d21ac55 | 582 | * Note: max_cpus is the number of enabled processors |
91447636 A |
583 | * that ACPI found; max_ncpus is the maximum number |
584 | * that the kernel supports or that the "cpus=" | |
585 | * boot-arg has set. Here we take int minimum. | |
586 | */ | |
b0d623f7 | 587 | machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus); |
91447636 | 588 | } |
55e303ae A |
589 | if (max_cpus_initialized == MAX_CPUS_WAIT) |
590 | wakeup((event_t)&max_cpus_initialized); | |
591 | max_cpus_initialized = MAX_CPUS_SET; | |
592 | } | |
593 | (void) ml_set_interrupts_enabled(current_state); | |
43866e37 A |
594 | } |
595 | ||
596 | int | |
597 | ml_get_max_cpus(void) | |
598 | { | |
55e303ae | 599 | boolean_t current_state; |
43866e37 | 600 | |
55e303ae A |
601 | current_state = ml_set_interrupts_enabled(FALSE); |
602 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
603 | max_cpus_initialized = MAX_CPUS_WAIT; | |
604 | assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); | |
605 | (void)thread_block(THREAD_CONTINUE_NULL); | |
606 | } | |
607 | (void) ml_set_interrupts_enabled(current_state); | |
608 | return(machine_info.max_cpus); | |
43866e37 A |
609 | } |
610 | ||
0c530ab8 A |
611 | /* |
612 | * Routine: ml_init_lock_timeout | |
613 | * Function: | |
614 | */ | |
615 | void | |
616 | ml_init_lock_timeout(void) | |
617 | { | |
618 | uint64_t abstime; | |
b0d623f7 | 619 | uint32_t mtxspin; |
143464d5 | 620 | #if DEVELOPMENT || DEBUG |
b0d623f7 | 621 | uint64_t default_timeout_ns = NSEC_PER_SEC>>2; |
143464d5 A |
622 | #else |
623 | uint64_t default_timeout_ns = NSEC_PER_SEC>>1; | |
624 | #endif | |
b0d623f7 | 625 | uint32_t slto; |
6d2010ae A |
626 | uint32_t prt; |
627 | ||
b0d623f7 A |
628 | if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) |
629 | default_timeout_ns = slto * NSEC_PER_USEC; | |
0c530ab8 | 630 | |
2d21ac55 | 631 | /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */ |
b0d623f7 | 632 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); |
2d21ac55 A |
633 | LockTimeOut = (uint32_t) abstime; |
634 | LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t); | |
0c530ab8 | 635 | |
fe8ab488 A |
636 | /* |
637 | * TLBTimeOut dictates the TLB flush timeout period. It defaults to | |
638 | * LockTimeOut but can be overriden separately. In particular, a | |
639 | * zero value inhibits the timeout-panic and cuts a trace evnt instead | |
640 | * - see pmap_flush_tlbs(). | |
641 | */ | |
642 | if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) { | |
643 | default_timeout_ns = slto * NSEC_PER_USEC; | |
644 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); | |
645 | TLBTimeOut = (uint32_t) abstime; | |
646 | } else { | |
647 | TLBTimeOut = LockTimeOut; | |
648 | } | |
649 | ||
593a1d5f | 650 | if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { |
0c530ab8 A |
651 | if (mtxspin > USEC_PER_SEC>>4) |
652 | mtxspin = USEC_PER_SEC>>4; | |
653 | nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); | |
654 | } else { | |
655 | nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); | |
656 | } | |
657 | MutexSpin = (unsigned int)abstime; | |
b0d623f7 | 658 | |
060df5ea | 659 | nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance); |
6d2010ae A |
660 | if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt))) |
661 | nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout); | |
662 | virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0); | |
060df5ea | 663 | interrupt_latency_tracker_setup(); |
39236c6e | 664 | simple_lock_init(&ml_timer_evaluation_slock, 0); |
0c530ab8 A |
665 | } |
666 | ||
316670eb A |
667 | /* |
668 | * Threshold above which we should attempt to block | |
669 | * instead of spinning for clock_delay_until(). | |
670 | */ | |
39236c6e | 671 | |
316670eb | 672 | void |
bd504ef0 | 673 | ml_init_delay_spin_threshold(int threshold_us) |
316670eb | 674 | { |
bd504ef0 | 675 | nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold); |
316670eb A |
676 | } |
677 | ||
678 | boolean_t | |
679 | ml_delay_should_spin(uint64_t interval) | |
680 | { | |
681 | return (interval < delay_spin_threshold) ? TRUE : FALSE; | |
682 | } | |
683 | ||
91447636 | 684 | /* |
bd504ef0 | 685 | * This is called from the machine-independent layer |
91447636 A |
686 | * to perform machine-dependent info updates. Defer to cpu_thread_init(). |
687 | */ | |
688 | void | |
689 | ml_cpu_up(void) | |
690 | { | |
691 | return; | |
692 | } | |
693 | ||
694 | /* | |
bd504ef0 | 695 | * This is called from the machine-independent layer |
91447636 A |
696 | * to perform machine-dependent info updates. |
697 | */ | |
698 | void | |
699 | ml_cpu_down(void) | |
700 | { | |
bd504ef0 A |
701 | i386_deactivate_cpu(); |
702 | ||
91447636 A |
703 | return; |
704 | } | |
705 | ||
91447636 A |
706 | /* |
707 | * The following are required for parts of the kernel | |
708 | * that cannot resolve these functions as inlines: | |
709 | */ | |
710 | extern thread_t current_act(void); | |
711 | thread_t | |
9bccf70c | 712 | current_act(void) |
91447636 A |
713 | { |
714 | return(current_thread_fast()); | |
715 | } | |
55e303ae A |
716 | |
717 | #undef current_thread | |
91447636 | 718 | extern thread_t current_thread(void); |
55e303ae A |
719 | thread_t |
720 | current_thread(void) | |
721 | { | |
91447636 | 722 | return(current_thread_fast()); |
55e303ae | 723 | } |
0c530ab8 | 724 | |
0c530ab8 A |
725 | |
726 | boolean_t ml_is64bit(void) { | |
727 | ||
728 | return (cpu_mode_is64bit()); | |
729 | } | |
730 | ||
731 | ||
732 | boolean_t ml_thread_is64bit(thread_t thread) { | |
733 | ||
734 | return (thread_is_64bit(thread)); | |
735 | } | |
736 | ||
737 | ||
738 | boolean_t ml_state_is64bit(void *saved_state) { | |
739 | ||
740 | return is_saved_state64(saved_state); | |
741 | } | |
742 | ||
743 | void ml_cpu_set_ldt(int selector) | |
744 | { | |
745 | /* | |
746 | * Avoid loading the LDT | |
747 | * if we're setting the KERNEL LDT and it's already set. | |
748 | */ | |
749 | if (selector == KERNEL_LDT && | |
750 | current_cpu_datap()->cpu_ldt == KERNEL_LDT) | |
751 | return; | |
752 | ||
b0d623f7 | 753 | lldt(selector); |
b0d623f7 | 754 | current_cpu_datap()->cpu_ldt = selector; |
0c530ab8 A |
755 | } |
756 | ||
757 | void ml_fp_setvalid(boolean_t value) | |
758 | { | |
759 | fp_setvalid(value); | |
760 | } | |
761 | ||
2d21ac55 A |
762 | uint64_t ml_cpu_int_event_time(void) |
763 | { | |
764 | return current_cpu_datap()->cpu_int_event_time; | |
765 | } | |
766 | ||
b0d623f7 A |
767 | vm_offset_t ml_stack_remaining(void) |
768 | { | |
769 | uintptr_t local = (uintptr_t) &local; | |
770 | ||
771 | if (ml_at_interrupt_context() != 0) { | |
772 | return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE)); | |
773 | } else { | |
774 | return (local - current_thread()->kernel_stack); | |
775 | } | |
776 | } | |
2d21ac55 | 777 | |
6d2010ae A |
778 | void |
779 | kernel_preempt_check(void) | |
780 | { | |
781 | boolean_t intr; | |
782 | unsigned long flags; | |
783 | ||
784 | assert(get_preemption_level() == 0); | |
785 | ||
786 | __asm__ volatile("pushf; pop %0" : "=r" (flags)); | |
787 | ||
788 | intr = ((flags & EFL_IF) != 0); | |
789 | ||
790 | if ((*ast_pending() & AST_URGENT) && intr == TRUE) { | |
791 | /* | |
792 | * can handle interrupts and preemptions | |
793 | * at this point | |
794 | */ | |
795 | ||
796 | /* | |
797 | * now cause the PRE-EMPTION trap | |
798 | */ | |
799 | __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); | |
800 | } | |
801 | } | |
802 | ||
060df5ea | 803 | boolean_t machine_timeout_suspended(void) { |
143464d5 | 804 | return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake()); |
060df5ea | 805 | } |
39236c6e A |
806 | |
807 | /* Eagerly evaluate all pending timer and thread callouts | |
808 | */ | |
809 | void ml_timer_evaluate(void) { | |
810 | KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0); | |
811 | ||
812 | uint64_t te_end, te_start = mach_absolute_time(); | |
813 | simple_lock(&ml_timer_evaluation_slock); | |
814 | ml_timer_evaluation_in_progress = TRUE; | |
815 | thread_call_delayed_timer_rescan_all(); | |
816 | mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL); | |
817 | ml_timer_evaluation_in_progress = FALSE; | |
818 | ml_timer_eager_evaluations++; | |
819 | te_end = mach_absolute_time(); | |
820 | ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start)); | |
821 | simple_unlock(&ml_timer_evaluation_slock); | |
822 | ||
823 | KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0); | |
824 | } | |
825 | ||
826 | boolean_t | |
827 | ml_timer_forced_evaluation(void) { | |
828 | return ml_timer_evaluation_in_progress; | |
829 | } | |
fe8ab488 A |
830 | |
831 | /* 32-bit right-rotate n bits */ | |
832 | static inline uint32_t ror32(uint32_t val, const unsigned int n) | |
833 | { | |
834 | __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n)); | |
835 | return val; | |
836 | } | |
837 | ||
838 | void | |
839 | ml_entropy_collect(void) | |
840 | { | |
841 | uint32_t tsc_lo, tsc_hi; | |
842 | uint32_t *ep; | |
843 | ||
844 | assert(cpu_number() == master_cpu); | |
845 | ||
846 | /* update buffer pointer cyclically */ | |
847 | if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE) | |
848 | ep = EntropyData.index_ptr = EntropyData.buffer; | |
849 | else | |
850 | ep = EntropyData.index_ptr++; | |
851 | ||
852 | rdtsc_nofence(tsc_lo, tsc_hi); | |
853 | *ep = ror32(*ep, 9) ^ tsc_lo; | |
854 | } | |
855 | ||
856 | void | |
857 | ml_gpu_stat_update(uint64_t gpu_ns_delta) { | |
858 | current_thread()->machine.thread_gpu_ns += gpu_ns_delta; | |
859 | } | |
860 | ||
861 | uint64_t | |
862 | ml_gpu_stat(thread_t t) { | |
863 | return t->machine.thread_gpu_ns; | |
864 | } |