]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/machine_routines.h>
30 #include <i386/io_map_entries.h>
31 #include <i386/cpuid.h>
32 #include <i386/fpu.h>
33 #include <mach/processor.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/thread_call.h>
40 #include <kern/policy_internal.h>
41
42 #include <prng/random.h>
43 #include <i386/machine_cpu.h>
44 #include <i386/lapic.h>
45 #include <i386/bit_routines.h>
46 #include <i386/mp_events.h>
47 #include <i386/pmCPU.h>
48 #include <i386/trap.h>
49 #include <i386/tsc.h>
50 #include <i386/cpu_threads.h>
51 #include <i386/proc_reg.h>
52 #include <mach/vm_param.h>
53 #include <i386/pmap.h>
54 #include <i386/pmap_internal.h>
55 #include <i386/misc_protos.h>
56 #include <kern/timer_queue.h>
57 #if KPC
58 #include <kern/kpc.h>
59 #endif
60 #include <architecture/i386/pio.h>
61 #include <i386/cpu_data.h>
62 #if DEBUG
63 #define DBG(x...) kprintf("DBG: " x)
64 #else
65 #define DBG(x...)
66 #endif
67
68 extern void wakeup(void *);
69
70 static int max_cpus_initialized = 0;
71
72 uint64_t LockTimeOut;
73 uint64_t TLBTimeOut;
74 uint64_t LockTimeOutTSC;
75 uint32_t LockTimeOutUsec;
76 uint64_t MutexSpin;
77 uint64_t LastDebuggerEntryAllowance;
78 uint64_t delay_spin_threshold;
79
80 extern uint64_t panic_restart_timeout;
81
82 boolean_t virtualized = FALSE;
83
84 decl_simple_lock_data(static, ml_timer_evaluation_slock);
85 uint32_t ml_timer_eager_evaluations;
86 uint64_t ml_timer_eager_evaluation_max;
87 static boolean_t ml_timer_evaluation_in_progress = FALSE;
88
89
90 #define MAX_CPUS_SET 0x1
91 #define MAX_CPUS_WAIT 0x2
92
93 /* IO memory map services */
94
95 /* Map memory map IO space */
96 vm_offset_t ml_io_map(
97 vm_offset_t phys_addr,
98 vm_size_t size)
99 {
100 return(io_map(phys_addr,size,VM_WIMG_IO));
101 }
102
103 /* boot memory allocation */
104 vm_offset_t ml_static_malloc(
105 __unused vm_size_t size)
106 {
107 return((vm_offset_t)NULL);
108 }
109
110
111 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
112 {
113 *phys_addr = 0;
114 *size = 0;
115 }
116
117
118 vm_offset_t
119 ml_static_ptovirt(
120 vm_offset_t paddr)
121 {
122 #if defined(__x86_64__)
123 return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS);
124 #else
125 return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS);
126 #endif
127 }
128
129
130 /*
131 * Routine: ml_static_mfree
132 * Function:
133 */
134 void
135 ml_static_mfree(
136 vm_offset_t vaddr,
137 vm_size_t size)
138 {
139 addr64_t vaddr_cur;
140 ppnum_t ppn;
141 uint32_t freed_pages = 0;
142 assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
143
144 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
145
146 for (vaddr_cur = vaddr;
147 vaddr_cur < round_page_64(vaddr+size);
148 vaddr_cur += PAGE_SIZE) {
149 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
150 if (ppn != (vm_offset_t)NULL) {
151 kernel_pmap->stats.resident_count++;
152 if (kernel_pmap->stats.resident_count >
153 kernel_pmap->stats.resident_max) {
154 kernel_pmap->stats.resident_max =
155 kernel_pmap->stats.resident_count;
156 }
157 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
158 assert(pmap_valid_page(ppn));
159 if (IS_MANAGED_PAGE(ppn)) {
160 vm_page_create(ppn,(ppn+1));
161 freed_pages++;
162 }
163 }
164 }
165 vm_page_lockspin_queues();
166 vm_page_wire_count -= freed_pages;
167 vm_page_wire_count_initial -= freed_pages;
168 vm_page_unlock_queues();
169
170 #if DEBUG
171 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
172 #endif
173 }
174
175
176 /* virtual to physical on wired pages */
177 vm_offset_t ml_vtophys(
178 vm_offset_t vaddr)
179 {
180 return (vm_offset_t)kvtophys(vaddr);
181 }
182
183 /*
184 * Routine: ml_nofault_copy
185 * Function: Perform a physical mode copy if the source and
186 * destination have valid translations in the kernel pmap.
187 * If translations are present, they are assumed to
188 * be wired; i.e. no attempt is made to guarantee that the
189 * translations obtained remained valid for
190 * the duration of the copy process.
191 */
192
193 vm_size_t ml_nofault_copy(
194 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
195 {
196 addr64_t cur_phys_dst, cur_phys_src;
197 uint32_t count, nbytes = 0;
198
199 while (size > 0) {
200 if (!(cur_phys_src = kvtophys(virtsrc)))
201 break;
202 if (!(cur_phys_dst = kvtophys(virtdst)))
203 break;
204 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
205 break;
206 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
207 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
208 count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
209 if (count > size)
210 count = (uint32_t)size;
211
212 bcopy_phys(cur_phys_src, cur_phys_dst, count);
213
214 nbytes += count;
215 virtsrc += count;
216 virtdst += count;
217 size -= count;
218 }
219
220 return nbytes;
221 }
222
223 /*
224 * Routine: ml_validate_nofault
225 * Function: Validate that ths address range has a valid translations
226 * in the kernel pmap. If translations are present, they are
227 * assumed to be wired; i.e. no attempt is made to guarantee
228 * that the translation persist after the check.
229 * Returns: TRUE if the range is mapped and will not cause a fault,
230 * FALSE otherwise.
231 */
232
233 boolean_t ml_validate_nofault(
234 vm_offset_t virtsrc, vm_size_t size)
235 {
236 addr64_t cur_phys_src;
237 uint32_t count;
238
239 while (size > 0) {
240 if (!(cur_phys_src = kvtophys(virtsrc)))
241 return FALSE;
242 if (!pmap_valid_page(i386_btop(cur_phys_src)))
243 return FALSE;
244 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
245 if (count > size)
246 count = (uint32_t)size;
247
248 virtsrc += count;
249 size -= count;
250 }
251
252 return TRUE;
253 }
254
255 /* Interrupt handling */
256
257 /* Initialize Interrupts */
258 void ml_init_interrupt(void)
259 {
260 (void) ml_set_interrupts_enabled(TRUE);
261 }
262
263
264 /* Get Interrupts Enabled */
265 boolean_t ml_get_interrupts_enabled(void)
266 {
267 unsigned long flags;
268
269 __asm__ volatile("pushf; pop %0" : "=r" (flags));
270 return (flags & EFL_IF) != 0;
271 }
272
273 /* Set Interrupts Enabled */
274 boolean_t ml_set_interrupts_enabled(boolean_t enable)
275 {
276 unsigned long flags;
277 boolean_t istate;
278
279 __asm__ volatile("pushf; pop %0" : "=r" (flags));
280
281 assert(get_interrupt_level() ? (enable == FALSE) : TRUE);
282
283 istate = ((flags & EFL_IF) != 0);
284
285 if (enable) {
286 __asm__ volatile("sti;nop");
287
288 if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT))
289 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
290 }
291 else {
292 if (istate)
293 __asm__ volatile("cli");
294 }
295
296 return istate;
297 }
298
299 /* Check if running at interrupt context */
300 boolean_t ml_at_interrupt_context(void)
301 {
302 return get_interrupt_level() != 0;
303 }
304
305 void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
306 *icp = (get_interrupt_level() != 0);
307 /* These will be technically inaccurate for interrupts that occur
308 * successively within a single "idle exit" event, but shouldn't
309 * matter statistically.
310 */
311 *pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage);
312 }
313
314 /* Generate a fake interrupt */
315 void ml_cause_interrupt(void)
316 {
317 panic("ml_cause_interrupt not defined yet on Intel");
318 }
319
320 /*
321 * TODO: transition users of this to kernel_thread_start_priority
322 * ml_thread_policy is an unsupported KPI
323 */
324 void ml_thread_policy(
325 thread_t thread,
326 __unused unsigned policy_id,
327 unsigned policy_info)
328 {
329 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
330 thread_precedence_policy_data_t info;
331 __assert_only kern_return_t kret;
332
333 info.importance = 1;
334
335 kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
336 (thread_policy_t)&info,
337 THREAD_PRECEDENCE_POLICY_COUNT);
338 assert(kret == KERN_SUCCESS);
339 }
340 }
341
342 /* Initialize Interrupts */
343 void ml_install_interrupt_handler(
344 void *nub,
345 int source,
346 void *target,
347 IOInterruptHandler handler,
348 void *refCon)
349 {
350 boolean_t current_state;
351
352 current_state = ml_get_interrupts_enabled();
353
354 PE_install_interrupt_handler(nub, source, target,
355 (IOInterruptHandler) handler, refCon);
356
357 (void) ml_set_interrupts_enabled(current_state);
358
359 initialize_screen(NULL, kPEAcquireScreen);
360 }
361
362
363 void
364 machine_signal_idle(
365 processor_t processor)
366 {
367 cpu_interrupt(processor->cpu_id);
368 }
369
370 void
371 machine_signal_idle_deferred(
372 __unused processor_t processor)
373 {
374 panic("Unimplemented");
375 }
376
377 void
378 machine_signal_idle_cancel(
379 __unused processor_t processor)
380 {
381 panic("Unimplemented");
382 }
383
384 static kern_return_t
385 register_cpu(
386 uint32_t lapic_id,
387 processor_t *processor_out,
388 boolean_t boot_cpu )
389 {
390 int target_cpu;
391 cpu_data_t *this_cpu_datap;
392
393 this_cpu_datap = cpu_data_alloc(boot_cpu);
394 if (this_cpu_datap == NULL) {
395 return KERN_FAILURE;
396 }
397 target_cpu = this_cpu_datap->cpu_number;
398 assert((boot_cpu && (target_cpu == 0)) ||
399 (!boot_cpu && (target_cpu != 0)));
400
401 lapic_cpu_map(lapic_id, target_cpu);
402
403 /* The cpu_id is not known at registration phase. Just do
404 * lapic_id for now
405 */
406 this_cpu_datap->cpu_phys_number = lapic_id;
407
408 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
409 if (this_cpu_datap->cpu_console_buf == NULL)
410 goto failed;
411
412 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
413 if (this_cpu_datap->cpu_chud == NULL)
414 goto failed;
415
416 #if KPC
417 if (kpc_register_cpu(this_cpu_datap) != TRUE)
418 goto failed;
419 #endif
420
421 if (!boot_cpu) {
422 cpu_thread_alloc(this_cpu_datap->cpu_number);
423 if (this_cpu_datap->lcpu.core == NULL)
424 goto failed;
425
426 #if NCOPY_WINDOWS > 0
427 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
428 if (this_cpu_datap->cpu_pmap == NULL)
429 goto failed;
430 #endif
431
432 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
433 if (this_cpu_datap->cpu_processor == NULL)
434 goto failed;
435 /*
436 * processor_init() deferred to topology start
437 * because "slot numbers" a.k.a. logical processor numbers
438 * are not yet finalized.
439 */
440 }
441
442 *processor_out = this_cpu_datap->cpu_processor;
443
444 return KERN_SUCCESS;
445
446 failed:
447 cpu_processor_free(this_cpu_datap->cpu_processor);
448 #if NCOPY_WINDOWS > 0
449 pmap_cpu_free(this_cpu_datap->cpu_pmap);
450 #endif
451 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
452 console_cpu_free(this_cpu_datap->cpu_console_buf);
453 #if KPC
454 kpc_unregister_cpu(this_cpu_datap);
455 #endif
456
457 return KERN_FAILURE;
458 }
459
460
461 kern_return_t
462 ml_processor_register(
463 cpu_id_t cpu_id,
464 uint32_t lapic_id,
465 processor_t *processor_out,
466 boolean_t boot_cpu,
467 boolean_t start )
468 {
469 static boolean_t done_topo_sort = FALSE;
470 static uint32_t num_registered = 0;
471
472 /* Register all CPUs first, and track max */
473 if( start == FALSE )
474 {
475 num_registered++;
476
477 DBG( "registering CPU lapic id %d\n", lapic_id );
478
479 return register_cpu( lapic_id, processor_out, boot_cpu );
480 }
481
482 /* Sort by topology before we start anything */
483 if( !done_topo_sort )
484 {
485 DBG( "about to start CPUs. %d registered\n", num_registered );
486
487 cpu_topology_sort( num_registered );
488 done_topo_sort = TRUE;
489 }
490
491 /* Assign the cpu ID */
492 uint32_t cpunum = -1;
493 cpu_data_t *this_cpu_datap = NULL;
494
495 /* find cpu num and pointer */
496 cpunum = ml_get_cpuid( lapic_id );
497
498 if( cpunum == 0xFFFFFFFF ) /* never heard of it? */
499 panic( "trying to start invalid/unregistered CPU %d\n", lapic_id );
500
501 this_cpu_datap = cpu_datap(cpunum);
502
503 /* fix the CPU id */
504 this_cpu_datap->cpu_id = cpu_id;
505
506 /* allocate and initialize other per-cpu structures */
507 if (!boot_cpu) {
508 mp_cpus_call_cpu_init(cpunum);
509 prng_cpu_init(cpunum);
510 }
511
512 /* output arg */
513 *processor_out = this_cpu_datap->cpu_processor;
514
515 /* OK, try and start this CPU */
516 return cpu_topology_start_cpu( cpunum );
517 }
518
519
520 void
521 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
522 {
523 boolean_t os_supports_sse;
524 i386_cpu_info_t *cpuid_infop;
525
526 if (cpu_infop == NULL)
527 return;
528
529 /*
530 * Are we supporting MMX/SSE/SSE2/SSE3?
531 * As distinct from whether the cpu has these capabilities.
532 */
533 os_supports_sse = !!(get_cr4() & CR4_OSXMM);
534
535 if (ml_fpu_avx_enabled())
536 cpu_infop->vector_unit = 9;
537 else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse)
538 cpu_infop->vector_unit = 8;
539 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse)
540 cpu_infop->vector_unit = 7;
541 else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse)
542 cpu_infop->vector_unit = 6;
543 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
544 cpu_infop->vector_unit = 5;
545 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
546 cpu_infop->vector_unit = 4;
547 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
548 cpu_infop->vector_unit = 3;
549 else if (cpuid_features() & CPUID_FEATURE_MMX)
550 cpu_infop->vector_unit = 2;
551 else
552 cpu_infop->vector_unit = 0;
553
554 cpuid_infop = cpuid_info();
555
556 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
557
558 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
559 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
560
561 if (cpuid_infop->cache_size[L2U] > 0) {
562 cpu_infop->l2_settings = 1;
563 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
564 } else {
565 cpu_infop->l2_settings = 0;
566 cpu_infop->l2_cache_size = 0xFFFFFFFF;
567 }
568
569 if (cpuid_infop->cache_size[L3U] > 0) {
570 cpu_infop->l3_settings = 1;
571 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
572 } else {
573 cpu_infop->l3_settings = 0;
574 cpu_infop->l3_cache_size = 0xFFFFFFFF;
575 }
576 }
577
578 void
579 ml_init_max_cpus(unsigned long max_cpus)
580 {
581 boolean_t current_state;
582
583 current_state = ml_set_interrupts_enabled(FALSE);
584 if (max_cpus_initialized != MAX_CPUS_SET) {
585 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
586 /*
587 * Note: max_cpus is the number of enabled processors
588 * that ACPI found; max_ncpus is the maximum number
589 * that the kernel supports or that the "cpus="
590 * boot-arg has set. Here we take int minimum.
591 */
592 machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus);
593 }
594 if (max_cpus_initialized == MAX_CPUS_WAIT)
595 wakeup((event_t)&max_cpus_initialized);
596 max_cpus_initialized = MAX_CPUS_SET;
597 }
598 (void) ml_set_interrupts_enabled(current_state);
599 }
600
601 int
602 ml_get_max_cpus(void)
603 {
604 boolean_t current_state;
605
606 current_state = ml_set_interrupts_enabled(FALSE);
607 if (max_cpus_initialized != MAX_CPUS_SET) {
608 max_cpus_initialized = MAX_CPUS_WAIT;
609 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
610 (void)thread_block(THREAD_CONTINUE_NULL);
611 }
612 (void) ml_set_interrupts_enabled(current_state);
613 return(machine_info.max_cpus);
614 }
615 /*
616 * Routine: ml_init_lock_timeout
617 * Function:
618 */
619 void
620 ml_init_lock_timeout(void)
621 {
622 uint64_t abstime;
623 uint32_t mtxspin;
624 #if DEVELOPMENT || DEBUG
625 uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
626 #else
627 uint64_t default_timeout_ns = NSEC_PER_SEC>>1;
628 #endif
629 uint32_t slto;
630 uint32_t prt;
631
632 if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
633 default_timeout_ns = slto * NSEC_PER_USEC;
634
635 /*
636 * LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks,
637 * and LockTimeOutUsec is in microseconds and it's 32-bits.
638 */
639 LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC);
640 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
641 LockTimeOut = abstime;
642 LockTimeOutTSC = tmrCvt(abstime, tscFCvtn2t);
643
644 /*
645 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
646 * LockTimeOut but can be overriden separately. In particular, a
647 * zero value inhibits the timeout-panic and cuts a trace evnt instead
648 * - see pmap_flush_tlbs().
649 */
650 if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) {
651 default_timeout_ns = slto * NSEC_PER_USEC;
652 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
653 TLBTimeOut = (uint32_t) abstime;
654 } else {
655 TLBTimeOut = LockTimeOut;
656 }
657
658 #if DEVELOPMENT || DEBUG
659 reportphyreaddelayabs = LockTimeOut;
660 #endif
661 if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof (slto))) {
662 default_timeout_ns = slto * NSEC_PER_USEC;
663 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
664 reportphyreaddelayabs = abstime;
665 }
666
667 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
668 if (mtxspin > USEC_PER_SEC>>4)
669 mtxspin = USEC_PER_SEC>>4;
670 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
671 } else {
672 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
673 }
674 MutexSpin = (unsigned int)abstime;
675
676 nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
677 if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt)))
678 nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
679
680 virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
681 if (virtualized) {
682 int vti;
683
684 if (!PE_parse_boot_argn("vti", &vti, sizeof (vti)))
685 vti = 6;
686 printf("Timeouts adjusted for virtualization (<<%d)\n", vti);
687 kprintf("Timeouts adjusted for virtualization (<<%d):\n", vti);
688 #define VIRTUAL_TIMEOUT_INFLATE64(_timeout) \
689 MACRO_BEGIN \
690 kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \
691 _timeout <<= vti; \
692 kprintf("-> 0x%016llx\n", _timeout); \
693 MACRO_END
694 #define VIRTUAL_TIMEOUT_INFLATE32(_timeout) \
695 MACRO_BEGIN \
696 kprintf("%24s: 0x%08x ", #_timeout, _timeout); \
697 if ((_timeout <<vti) >> vti == _timeout) \
698 _timeout <<= vti; \
699 else \
700 _timeout = ~0; /* cap rather than overflow */ \
701 kprintf("-> 0x%08x\n", _timeout); \
702 MACRO_END
703 VIRTUAL_TIMEOUT_INFLATE32(LockTimeOutUsec);
704 VIRTUAL_TIMEOUT_INFLATE64(LockTimeOut);
705 VIRTUAL_TIMEOUT_INFLATE64(LockTimeOutTSC);
706 VIRTUAL_TIMEOUT_INFLATE64(TLBTimeOut);
707 VIRTUAL_TIMEOUT_INFLATE64(MutexSpin);
708 VIRTUAL_TIMEOUT_INFLATE64(reportphyreaddelayabs);
709 }
710
711 interrupt_latency_tracker_setup();
712 simple_lock_init(&ml_timer_evaluation_slock, 0);
713 }
714
715 /*
716 * Threshold above which we should attempt to block
717 * instead of spinning for clock_delay_until().
718 */
719
720 void
721 ml_init_delay_spin_threshold(int threshold_us)
722 {
723 nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
724 }
725
726 boolean_t
727 ml_delay_should_spin(uint64_t interval)
728 {
729 return (interval < delay_spin_threshold) ? TRUE : FALSE;
730 }
731
732 /*
733 * This is called from the machine-independent layer
734 * to perform machine-dependent info updates. Defer to cpu_thread_init().
735 */
736 void
737 ml_cpu_up(void)
738 {
739 return;
740 }
741
742 /*
743 * This is called from the machine-independent layer
744 * to perform machine-dependent info updates.
745 */
746 void
747 ml_cpu_down(void)
748 {
749 i386_deactivate_cpu();
750
751 return;
752 }
753
754 /*
755 * The following are required for parts of the kernel
756 * that cannot resolve these functions as inlines:
757 */
758 extern thread_t current_act(void);
759 thread_t
760 current_act(void)
761 {
762 return(current_thread_fast());
763 }
764
765 #undef current_thread
766 extern thread_t current_thread(void);
767 thread_t
768 current_thread(void)
769 {
770 return(current_thread_fast());
771 }
772
773
774 boolean_t ml_is64bit(void) {
775
776 return (cpu_mode_is64bit());
777 }
778
779
780 boolean_t ml_thread_is64bit(thread_t thread) {
781
782 return (thread_is_64bit(thread));
783 }
784
785
786 boolean_t ml_state_is64bit(void *saved_state) {
787
788 return is_saved_state64(saved_state);
789 }
790
791 void ml_cpu_set_ldt(int selector)
792 {
793 /*
794 * Avoid loading the LDT
795 * if we're setting the KERNEL LDT and it's already set.
796 */
797 if (selector == KERNEL_LDT &&
798 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
799 return;
800
801 lldt(selector);
802 current_cpu_datap()->cpu_ldt = selector;
803 }
804
805 void ml_fp_setvalid(boolean_t value)
806 {
807 fp_setvalid(value);
808 }
809
810 uint64_t ml_cpu_int_event_time(void)
811 {
812 return current_cpu_datap()->cpu_int_event_time;
813 }
814
815 vm_offset_t ml_stack_remaining(void)
816 {
817 uintptr_t local = (uintptr_t) &local;
818
819 if (ml_at_interrupt_context() != 0) {
820 return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE));
821 } else {
822 return (local - current_thread()->kernel_stack);
823 }
824 }
825
826 void
827 kernel_preempt_check(void)
828 {
829 boolean_t intr;
830 unsigned long flags;
831
832 assert(get_preemption_level() == 0);
833
834 __asm__ volatile("pushf; pop %0" : "=r" (flags));
835
836 intr = ((flags & EFL_IF) != 0);
837
838 if ((*ast_pending() & AST_URGENT) && intr == TRUE) {
839 /*
840 * can handle interrupts and preemptions
841 * at this point
842 */
843
844 /*
845 * now cause the PRE-EMPTION trap
846 */
847 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
848 }
849 }
850
851 boolean_t machine_timeout_suspended(void) {
852 return (pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
853 }
854
855 /* Eagerly evaluate all pending timer and thread callouts
856 */
857 void ml_timer_evaluate(void) {
858 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0);
859
860 uint64_t te_end, te_start = mach_absolute_time();
861 simple_lock(&ml_timer_evaluation_slock);
862 ml_timer_evaluation_in_progress = TRUE;
863 thread_call_delayed_timer_rescan_all();
864 mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
865 ml_timer_evaluation_in_progress = FALSE;
866 ml_timer_eager_evaluations++;
867 te_end = mach_absolute_time();
868 ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
869 simple_unlock(&ml_timer_evaluation_slock);
870
871 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0);
872 }
873
874 boolean_t
875 ml_timer_forced_evaluation(void) {
876 return ml_timer_evaluation_in_progress;
877 }
878
879 /* 32-bit right-rotate n bits */
880 static inline uint32_t ror32(uint32_t val, const unsigned int n)
881 {
882 __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n));
883 return val;
884 }
885
886 void
887 ml_entropy_collect(void)
888 {
889 uint32_t tsc_lo, tsc_hi;
890 uint32_t *ep;
891
892 assert(cpu_number() == master_cpu);
893
894 /* update buffer pointer cyclically */
895 if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE)
896 ep = EntropyData.index_ptr = EntropyData.buffer;
897 else
898 ep = EntropyData.index_ptr++;
899
900 rdtsc_nofence(tsc_lo, tsc_hi);
901 *ep = ror32(*ep, 9) ^ tsc_lo;
902 }
903
904 uint64_t
905 ml_energy_stat(__unused thread_t t) {
906 return 0;
907 }
908
909 void
910 ml_gpu_stat_update(uint64_t gpu_ns_delta) {
911 current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
912 }
913
914 uint64_t
915 ml_gpu_stat(thread_t t) {
916 return t->machine.thread_gpu_ns;
917 }
918
919 int plctrace_enabled = 0;
920
921 void _disable_preemption(void) {
922 disable_preemption_internal();
923 }
924
925 void _enable_preemption(void) {
926 enable_preemption_internal();
927 }
928
929 void plctrace_disable(void) {
930 plctrace_enabled = 0;
931 }