]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/machine_routines.h>
30 #include <i386/io_map_entries.h>
31 #include <i386/cpuid.h>
32 #include <i386/fpu.h>
33 #include <mach/processor.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/thread_call.h>
40 #include <prng/random.h>
41 #include <i386/machine_cpu.h>
42 #include <i386/lapic.h>
43 #include <i386/bit_routines.h>
44 #include <i386/mp_events.h>
45 #include <i386/pmCPU.h>
46 #include <i386/trap.h>
47 #include <i386/tsc.h>
48 #include <i386/cpu_threads.h>
49 #include <i386/proc_reg.h>
50 #include <mach/vm_param.h>
51 #include <i386/pmap.h>
52 #include <i386/pmap_internal.h>
53 #include <i386/misc_protos.h>
54 #include <kern/timer_queue.h>
55 #if KPC
56 #include <kern/kpc.h>
57 #endif
58 #include <architecture/i386/pio.h>
59
60 #if DEBUG
61 #define DBG(x...) kprintf("DBG: " x)
62 #else
63 #define DBG(x...)
64 #endif
65
66 extern void wakeup(void *);
67
68 static int max_cpus_initialized = 0;
69
70 unsigned int LockTimeOut;
71 unsigned int TLBTimeOut;
72 unsigned int LockTimeOutTSC;
73 unsigned int MutexSpin;
74 uint64_t LastDebuggerEntryAllowance;
75 uint64_t delay_spin_threshold;
76
77 extern uint64_t panic_restart_timeout;
78
79 boolean_t virtualized = FALSE;
80
81 decl_simple_lock_data(static, ml_timer_evaluation_slock);
82 uint32_t ml_timer_eager_evaluations;
83 uint64_t ml_timer_eager_evaluation_max;
84 static boolean_t ml_timer_evaluation_in_progress = FALSE;
85
86
87 #define MAX_CPUS_SET 0x1
88 #define MAX_CPUS_WAIT 0x2
89
90 /* IO memory map services */
91
92 /* Map memory map IO space */
93 vm_offset_t ml_io_map(
94 vm_offset_t phys_addr,
95 vm_size_t size)
96 {
97 return(io_map(phys_addr,size,VM_WIMG_IO));
98 }
99
100 /* boot memory allocation */
101 vm_offset_t ml_static_malloc(
102 __unused vm_size_t size)
103 {
104 return((vm_offset_t)NULL);
105 }
106
107
108 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
109 {
110 *phys_addr = 0;
111 *size = 0;
112 }
113
114
115 vm_offset_t
116 ml_static_ptovirt(
117 vm_offset_t paddr)
118 {
119 #if defined(__x86_64__)
120 return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS);
121 #else
122 return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS);
123 #endif
124 }
125
126
127 /*
128 * Routine: ml_static_mfree
129 * Function:
130 */
131 void
132 ml_static_mfree(
133 vm_offset_t vaddr,
134 vm_size_t size)
135 {
136 addr64_t vaddr_cur;
137 ppnum_t ppn;
138 uint32_t freed_pages = 0;
139 assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
140
141 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
142
143 for (vaddr_cur = vaddr;
144 vaddr_cur < round_page_64(vaddr+size);
145 vaddr_cur += PAGE_SIZE) {
146 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
147 if (ppn != (vm_offset_t)NULL) {
148 kernel_pmap->stats.resident_count++;
149 if (kernel_pmap->stats.resident_count >
150 kernel_pmap->stats.resident_max) {
151 kernel_pmap->stats.resident_max =
152 kernel_pmap->stats.resident_count;
153 }
154 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
155 assert(pmap_valid_page(ppn));
156 if (IS_MANAGED_PAGE(ppn)) {
157 vm_page_create(ppn,(ppn+1));
158 freed_pages++;
159 }
160 }
161 }
162 vm_page_lockspin_queues();
163 vm_page_wire_count -= freed_pages;
164 vm_page_wire_count_initial -= freed_pages;
165 vm_page_unlock_queues();
166
167 #if DEBUG
168 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
169 #endif
170 }
171
172
173 /* virtual to physical on wired pages */
174 vm_offset_t ml_vtophys(
175 vm_offset_t vaddr)
176 {
177 return (vm_offset_t)kvtophys(vaddr);
178 }
179
180 /*
181 * Routine: ml_nofault_copy
182 * Function: Perform a physical mode copy if the source and
183 * destination have valid translations in the kernel pmap.
184 * If translations are present, they are assumed to
185 * be wired; i.e. no attempt is made to guarantee that the
186 * translations obtained remained valid for
187 * the duration of the copy process.
188 */
189
190 vm_size_t ml_nofault_copy(
191 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
192 {
193 addr64_t cur_phys_dst, cur_phys_src;
194 uint32_t count, nbytes = 0;
195
196 while (size > 0) {
197 if (!(cur_phys_src = kvtophys(virtsrc)))
198 break;
199 if (!(cur_phys_dst = kvtophys(virtdst)))
200 break;
201 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
202 break;
203 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
204 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
205 count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
206 if (count > size)
207 count = (uint32_t)size;
208
209 bcopy_phys(cur_phys_src, cur_phys_dst, count);
210
211 nbytes += count;
212 virtsrc += count;
213 virtdst += count;
214 size -= count;
215 }
216
217 return nbytes;
218 }
219
220 /*
221 * Routine: ml_validate_nofault
222 * Function: Validate that ths address range has a valid translations
223 * in the kernel pmap. If translations are present, they are
224 * assumed to be wired; i.e. no attempt is made to guarantee
225 * that the translation persist after the check.
226 * Returns: TRUE if the range is mapped and will not cause a fault,
227 * FALSE otherwise.
228 */
229
230 boolean_t ml_validate_nofault(
231 vm_offset_t virtsrc, vm_size_t size)
232 {
233 addr64_t cur_phys_src;
234 uint32_t count;
235
236 while (size > 0) {
237 if (!(cur_phys_src = kvtophys(virtsrc)))
238 return FALSE;
239 if (!pmap_valid_page(i386_btop(cur_phys_src)))
240 return FALSE;
241 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
242 if (count > size)
243 count = (uint32_t)size;
244
245 virtsrc += count;
246 size -= count;
247 }
248
249 return TRUE;
250 }
251
252 /* Interrupt handling */
253
254 /* Initialize Interrupts */
255 void ml_init_interrupt(void)
256 {
257 (void) ml_set_interrupts_enabled(TRUE);
258 }
259
260
261 /* Get Interrupts Enabled */
262 boolean_t ml_get_interrupts_enabled(void)
263 {
264 unsigned long flags;
265
266 __asm__ volatile("pushf; pop %0" : "=r" (flags));
267 return (flags & EFL_IF) != 0;
268 }
269
270 /* Set Interrupts Enabled */
271 boolean_t ml_set_interrupts_enabled(boolean_t enable)
272 {
273 unsigned long flags;
274 boolean_t istate;
275
276 __asm__ volatile("pushf; pop %0" : "=r" (flags));
277
278 assert(get_interrupt_level() ? (enable == FALSE) : TRUE);
279
280 istate = ((flags & EFL_IF) != 0);
281
282 if (enable) {
283 __asm__ volatile("sti;nop");
284
285 if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT))
286 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
287 }
288 else {
289 if (istate)
290 __asm__ volatile("cli");
291 }
292
293 return istate;
294 }
295
296 /* Check if running at interrupt context */
297 boolean_t ml_at_interrupt_context(void)
298 {
299 return get_interrupt_level() != 0;
300 }
301
302 void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
303 *icp = (get_interrupt_level() != 0);
304 /* These will be technically inaccurate for interrupts that occur
305 * successively within a single "idle exit" event, but shouldn't
306 * matter statistically.
307 */
308 *pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage);
309 }
310
311 /* Generate a fake interrupt */
312 void ml_cause_interrupt(void)
313 {
314 panic("ml_cause_interrupt not defined yet on Intel");
315 }
316
317 /*
318 * TODO: transition users of this to kernel_thread_start_priority
319 * ml_thread_policy is an unsupported KPI
320 */
321 void ml_thread_policy(
322 thread_t thread,
323 __unused unsigned policy_id,
324 unsigned policy_info)
325 {
326 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
327 thread_precedence_policy_data_t info;
328 __assert_only kern_return_t kret;
329
330 info.importance = 1;
331
332 kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
333 (thread_policy_t)&info,
334 THREAD_PRECEDENCE_POLICY_COUNT);
335 assert(kret == KERN_SUCCESS);
336 }
337 }
338
339 /* Initialize Interrupts */
340 void ml_install_interrupt_handler(
341 void *nub,
342 int source,
343 void *target,
344 IOInterruptHandler handler,
345 void *refCon)
346 {
347 boolean_t current_state;
348
349 current_state = ml_get_interrupts_enabled();
350
351 PE_install_interrupt_handler(nub, source, target,
352 (IOInterruptHandler) handler, refCon);
353
354 (void) ml_set_interrupts_enabled(current_state);
355
356 initialize_screen(NULL, kPEAcquireScreen);
357 }
358
359
360 void
361 machine_signal_idle(
362 processor_t processor)
363 {
364 cpu_interrupt(processor->cpu_id);
365 }
366
367 void
368 machine_signal_idle_deferred(
369 __unused processor_t processor)
370 {
371 panic("Unimplemented");
372 }
373
374 void
375 machine_signal_idle_cancel(
376 __unused processor_t processor)
377 {
378 panic("Unimplemented");
379 }
380
381 static kern_return_t
382 register_cpu(
383 uint32_t lapic_id,
384 processor_t *processor_out,
385 boolean_t boot_cpu )
386 {
387 int target_cpu;
388 cpu_data_t *this_cpu_datap;
389
390 this_cpu_datap = cpu_data_alloc(boot_cpu);
391 if (this_cpu_datap == NULL) {
392 return KERN_FAILURE;
393 }
394 target_cpu = this_cpu_datap->cpu_number;
395 assert((boot_cpu && (target_cpu == 0)) ||
396 (!boot_cpu && (target_cpu != 0)));
397
398 lapic_cpu_map(lapic_id, target_cpu);
399
400 /* The cpu_id is not known at registration phase. Just do
401 * lapic_id for now
402 */
403 this_cpu_datap->cpu_phys_number = lapic_id;
404
405 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
406 if (this_cpu_datap->cpu_console_buf == NULL)
407 goto failed;
408
409 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
410 if (this_cpu_datap->cpu_chud == NULL)
411 goto failed;
412
413 #if KPC
414 if (kpc_register_cpu(this_cpu_datap) != TRUE)
415 goto failed;
416 #endif
417
418 if (!boot_cpu) {
419 cpu_thread_alloc(this_cpu_datap->cpu_number);
420 if (this_cpu_datap->lcpu.core == NULL)
421 goto failed;
422
423 #if NCOPY_WINDOWS > 0
424 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
425 if (this_cpu_datap->cpu_pmap == NULL)
426 goto failed;
427 #endif
428
429 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
430 if (this_cpu_datap->cpu_processor == NULL)
431 goto failed;
432 /*
433 * processor_init() deferred to topology start
434 * because "slot numbers" a.k.a. logical processor numbers
435 * are not yet finalized.
436 */
437 }
438
439 *processor_out = this_cpu_datap->cpu_processor;
440
441 return KERN_SUCCESS;
442
443 failed:
444 cpu_processor_free(this_cpu_datap->cpu_processor);
445 #if NCOPY_WINDOWS > 0
446 pmap_cpu_free(this_cpu_datap->cpu_pmap);
447 #endif
448 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
449 console_cpu_free(this_cpu_datap->cpu_console_buf);
450 #if KPC
451 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[0]);
452 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[1]);
453 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_shadow);
454 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_reload);
455 #endif
456
457 return KERN_FAILURE;
458 }
459
460
461 kern_return_t
462 ml_processor_register(
463 cpu_id_t cpu_id,
464 uint32_t lapic_id,
465 processor_t *processor_out,
466 boolean_t boot_cpu,
467 boolean_t start )
468 {
469 static boolean_t done_topo_sort = FALSE;
470 static uint32_t num_registered = 0;
471
472 /* Register all CPUs first, and track max */
473 if( start == FALSE )
474 {
475 num_registered++;
476
477 DBG( "registering CPU lapic id %d\n", lapic_id );
478
479 return register_cpu( lapic_id, processor_out, boot_cpu );
480 }
481
482 /* Sort by topology before we start anything */
483 if( !done_topo_sort )
484 {
485 DBG( "about to start CPUs. %d registered\n", num_registered );
486
487 cpu_topology_sort( num_registered );
488 done_topo_sort = TRUE;
489 }
490
491 /* Assign the cpu ID */
492 uint32_t cpunum = -1;
493 cpu_data_t *this_cpu_datap = NULL;
494
495 /* find cpu num and pointer */
496 cpunum = ml_get_cpuid( lapic_id );
497
498 if( cpunum == 0xFFFFFFFF ) /* never heard of it? */
499 panic( "trying to start invalid/unregistered CPU %d\n", lapic_id );
500
501 this_cpu_datap = cpu_datap(cpunum);
502
503 /* fix the CPU id */
504 this_cpu_datap->cpu_id = cpu_id;
505
506 /* allocate and initialize other per-cpu structures */
507 if (!boot_cpu) {
508 mp_cpus_call_cpu_init(cpunum);
509 prng_cpu_init(cpunum);
510 }
511
512 /* output arg */
513 *processor_out = this_cpu_datap->cpu_processor;
514
515 /* OK, try and start this CPU */
516 return cpu_topology_start_cpu( cpunum );
517 }
518
519
520 void
521 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
522 {
523 boolean_t os_supports_sse;
524 i386_cpu_info_t *cpuid_infop;
525
526 if (cpu_infop == NULL)
527 return;
528
529 /*
530 * Are we supporting MMX/SSE/SSE2/SSE3?
531 * As distinct from whether the cpu has these capabilities.
532 */
533 os_supports_sse = !!(get_cr4() & CR4_OSXMM);
534
535 if (ml_fpu_avx_enabled())
536 cpu_infop->vector_unit = 9;
537 else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse)
538 cpu_infop->vector_unit = 8;
539 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse)
540 cpu_infop->vector_unit = 7;
541 else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse)
542 cpu_infop->vector_unit = 6;
543 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
544 cpu_infop->vector_unit = 5;
545 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
546 cpu_infop->vector_unit = 4;
547 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
548 cpu_infop->vector_unit = 3;
549 else if (cpuid_features() & CPUID_FEATURE_MMX)
550 cpu_infop->vector_unit = 2;
551 else
552 cpu_infop->vector_unit = 0;
553
554 cpuid_infop = cpuid_info();
555
556 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
557
558 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
559 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
560
561 if (cpuid_infop->cache_size[L2U] > 0) {
562 cpu_infop->l2_settings = 1;
563 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
564 } else {
565 cpu_infop->l2_settings = 0;
566 cpu_infop->l2_cache_size = 0xFFFFFFFF;
567 }
568
569 if (cpuid_infop->cache_size[L3U] > 0) {
570 cpu_infop->l3_settings = 1;
571 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
572 } else {
573 cpu_infop->l3_settings = 0;
574 cpu_infop->l3_cache_size = 0xFFFFFFFF;
575 }
576 }
577
578 void
579 ml_init_max_cpus(unsigned long max_cpus)
580 {
581 boolean_t current_state;
582
583 current_state = ml_set_interrupts_enabled(FALSE);
584 if (max_cpus_initialized != MAX_CPUS_SET) {
585 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
586 /*
587 * Note: max_cpus is the number of enabled processors
588 * that ACPI found; max_ncpus is the maximum number
589 * that the kernel supports or that the "cpus="
590 * boot-arg has set. Here we take int minimum.
591 */
592 machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus);
593 }
594 if (max_cpus_initialized == MAX_CPUS_WAIT)
595 wakeup((event_t)&max_cpus_initialized);
596 max_cpus_initialized = MAX_CPUS_SET;
597 }
598 (void) ml_set_interrupts_enabled(current_state);
599 }
600
601 int
602 ml_get_max_cpus(void)
603 {
604 boolean_t current_state;
605
606 current_state = ml_set_interrupts_enabled(FALSE);
607 if (max_cpus_initialized != MAX_CPUS_SET) {
608 max_cpus_initialized = MAX_CPUS_WAIT;
609 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
610 (void)thread_block(THREAD_CONTINUE_NULL);
611 }
612 (void) ml_set_interrupts_enabled(current_state);
613 return(machine_info.max_cpus);
614 }
615
616 /*
617 * Routine: ml_init_lock_timeout
618 * Function:
619 */
620 void
621 ml_init_lock_timeout(void)
622 {
623 uint64_t abstime;
624 uint32_t mtxspin;
625 #if DEVELOPMENT || DEBUG
626 uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
627 #else
628 uint64_t default_timeout_ns = NSEC_PER_SEC>>1;
629 #endif
630 uint32_t slto;
631 uint32_t prt;
632
633 if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
634 default_timeout_ns = slto * NSEC_PER_USEC;
635
636 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
637 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
638 LockTimeOut = (uint32_t) abstime;
639 LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t);
640
641 /*
642 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
643 * LockTimeOut but can be overriden separately. In particular, a
644 * zero value inhibits the timeout-panic and cuts a trace evnt instead
645 * - see pmap_flush_tlbs().
646 */
647 if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) {
648 default_timeout_ns = slto * NSEC_PER_USEC;
649 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
650 TLBTimeOut = (uint32_t) abstime;
651 } else {
652 TLBTimeOut = LockTimeOut;
653 }
654
655 if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof (slto))) {
656 default_timeout_ns = slto * NSEC_PER_USEC;
657 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
658 reportphyreaddelayabs = abstime;
659 }
660
661 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
662 if (mtxspin > USEC_PER_SEC>>4)
663 mtxspin = USEC_PER_SEC>>4;
664 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
665 } else {
666 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
667 }
668 MutexSpin = (unsigned int)abstime;
669
670 nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
671 if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt)))
672 nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
673 virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
674 interrupt_latency_tracker_setup();
675 simple_lock_init(&ml_timer_evaluation_slock, 0);
676 }
677
678 /*
679 * Threshold above which we should attempt to block
680 * instead of spinning for clock_delay_until().
681 */
682
683 void
684 ml_init_delay_spin_threshold(int threshold_us)
685 {
686 nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
687 }
688
689 boolean_t
690 ml_delay_should_spin(uint64_t interval)
691 {
692 return (interval < delay_spin_threshold) ? TRUE : FALSE;
693 }
694
695 /*
696 * This is called from the machine-independent layer
697 * to perform machine-dependent info updates. Defer to cpu_thread_init().
698 */
699 void
700 ml_cpu_up(void)
701 {
702 return;
703 }
704
705 /*
706 * This is called from the machine-independent layer
707 * to perform machine-dependent info updates.
708 */
709 void
710 ml_cpu_down(void)
711 {
712 i386_deactivate_cpu();
713
714 return;
715 }
716
717 /*
718 * The following are required for parts of the kernel
719 * that cannot resolve these functions as inlines:
720 */
721 extern thread_t current_act(void);
722 thread_t
723 current_act(void)
724 {
725 return(current_thread_fast());
726 }
727
728 #undef current_thread
729 extern thread_t current_thread(void);
730 thread_t
731 current_thread(void)
732 {
733 return(current_thread_fast());
734 }
735
736
737 boolean_t ml_is64bit(void) {
738
739 return (cpu_mode_is64bit());
740 }
741
742
743 boolean_t ml_thread_is64bit(thread_t thread) {
744
745 return (thread_is_64bit(thread));
746 }
747
748
749 boolean_t ml_state_is64bit(void *saved_state) {
750
751 return is_saved_state64(saved_state);
752 }
753
754 void ml_cpu_set_ldt(int selector)
755 {
756 /*
757 * Avoid loading the LDT
758 * if we're setting the KERNEL LDT and it's already set.
759 */
760 if (selector == KERNEL_LDT &&
761 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
762 return;
763
764 lldt(selector);
765 current_cpu_datap()->cpu_ldt = selector;
766 }
767
768 void ml_fp_setvalid(boolean_t value)
769 {
770 fp_setvalid(value);
771 }
772
773 uint64_t ml_cpu_int_event_time(void)
774 {
775 return current_cpu_datap()->cpu_int_event_time;
776 }
777
778 vm_offset_t ml_stack_remaining(void)
779 {
780 uintptr_t local = (uintptr_t) &local;
781
782 if (ml_at_interrupt_context() != 0) {
783 return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE));
784 } else {
785 return (local - current_thread()->kernel_stack);
786 }
787 }
788
789 void
790 kernel_preempt_check(void)
791 {
792 boolean_t intr;
793 unsigned long flags;
794
795 assert(get_preemption_level() == 0);
796
797 __asm__ volatile("pushf; pop %0" : "=r" (flags));
798
799 intr = ((flags & EFL_IF) != 0);
800
801 if ((*ast_pending() & AST_URGENT) && intr == TRUE) {
802 /*
803 * can handle interrupts and preemptions
804 * at this point
805 */
806
807 /*
808 * now cause the PRE-EMPTION trap
809 */
810 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
811 }
812 }
813
814 boolean_t machine_timeout_suspended(void) {
815 return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
816 }
817
818 /* Eagerly evaluate all pending timer and thread callouts
819 */
820 void ml_timer_evaluate(void) {
821 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0);
822
823 uint64_t te_end, te_start = mach_absolute_time();
824 simple_lock(&ml_timer_evaluation_slock);
825 ml_timer_evaluation_in_progress = TRUE;
826 thread_call_delayed_timer_rescan_all();
827 mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
828 ml_timer_evaluation_in_progress = FALSE;
829 ml_timer_eager_evaluations++;
830 te_end = mach_absolute_time();
831 ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
832 simple_unlock(&ml_timer_evaluation_slock);
833
834 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0);
835 }
836
837 boolean_t
838 ml_timer_forced_evaluation(void) {
839 return ml_timer_evaluation_in_progress;
840 }
841
842 /* 32-bit right-rotate n bits */
843 static inline uint32_t ror32(uint32_t val, const unsigned int n)
844 {
845 __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n));
846 return val;
847 }
848
849 void
850 ml_entropy_collect(void)
851 {
852 uint32_t tsc_lo, tsc_hi;
853 uint32_t *ep;
854
855 assert(cpu_number() == master_cpu);
856
857 /* update buffer pointer cyclically */
858 if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE)
859 ep = EntropyData.index_ptr = EntropyData.buffer;
860 else
861 ep = EntropyData.index_ptr++;
862
863 rdtsc_nofence(tsc_lo, tsc_hi);
864 *ep = ror32(*ep, 9) ^ tsc_lo;
865 }
866
867 void
868 ml_gpu_stat_update(uint64_t gpu_ns_delta) {
869 current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
870 }
871
872 uint64_t
873 ml_gpu_stat(thread_t t) {
874 return t->machine.thread_gpu_ns;
875 }