2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/machine_routines.h>
30 #include <i386/io_map_entries.h>
31 #include <i386/cpuid.h>
33 #include <mach/processor.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/thread_call.h>
40 #include <prng/random.h>
41 #include <i386/machine_cpu.h>
42 #include <i386/lapic.h>
43 #include <i386/bit_routines.h>
44 #include <i386/mp_events.h>
45 #include <i386/pmCPU.h>
46 #include <i386/trap.h>
48 #include <i386/cpu_threads.h>
49 #include <i386/proc_reg.h>
50 #include <mach/vm_param.h>
51 #include <i386/pmap.h>
52 #include <i386/pmap_internal.h>
53 #include <i386/misc_protos.h>
54 #include <kern/timer_queue.h>
58 #include <architecture/i386/pio.h>
61 #define DBG(x...) kprintf("DBG: " x)
66 extern void wakeup(void *);
68 static int max_cpus_initialized
= 0;
70 unsigned int LockTimeOut
;
71 unsigned int TLBTimeOut
;
72 unsigned int LockTimeOutTSC
;
73 unsigned int MutexSpin
;
74 uint64_t LastDebuggerEntryAllowance
;
75 uint64_t delay_spin_threshold
;
77 extern uint64_t panic_restart_timeout
;
79 boolean_t virtualized
= FALSE
;
81 decl_simple_lock_data(static, ml_timer_evaluation_slock
);
82 uint32_t ml_timer_eager_evaluations
;
83 uint64_t ml_timer_eager_evaluation_max
;
84 static boolean_t ml_timer_evaluation_in_progress
= FALSE
;
87 #define MAX_CPUS_SET 0x1
88 #define MAX_CPUS_WAIT 0x2
90 /* IO memory map services */
92 /* Map memory map IO space */
93 vm_offset_t
ml_io_map(
94 vm_offset_t phys_addr
,
97 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
100 /* boot memory allocation */
101 vm_offset_t
ml_static_malloc(
102 __unused vm_size_t size
)
104 return((vm_offset_t
)NULL
);
108 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
119 #if defined(__x86_64__)
120 return (vm_offset_t
)(((unsigned long) paddr
) | VM_MIN_KERNEL_ADDRESS
);
122 return (vm_offset_t
)((paddr
) | LINEAR_KERNEL_ADDRESS
);
128 * Routine: ml_static_mfree
138 uint32_t freed_pages
= 0;
139 assert(vaddr
>= VM_MIN_KERNEL_ADDRESS
);
141 assert((vaddr
& (PAGE_SIZE
-1)) == 0); /* must be page aligned */
143 for (vaddr_cur
= vaddr
;
144 vaddr_cur
< round_page_64(vaddr
+size
);
145 vaddr_cur
+= PAGE_SIZE
) {
146 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
147 if (ppn
!= (vm_offset_t
)NULL
) {
148 kernel_pmap
->stats
.resident_count
++;
149 if (kernel_pmap
->stats
.resident_count
>
150 kernel_pmap
->stats
.resident_max
) {
151 kernel_pmap
->stats
.resident_max
=
152 kernel_pmap
->stats
.resident_count
;
154 pmap_remove(kernel_pmap
, vaddr_cur
, vaddr_cur
+PAGE_SIZE
);
155 assert(pmap_valid_page(ppn
));
157 if (IS_MANAGED_PAGE(ppn
)) {
158 vm_page_create(ppn
,(ppn
+1));
159 vm_page_wire_count
--;
165 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
170 /* virtual to physical on wired pages */
171 vm_offset_t
ml_vtophys(
174 return (vm_offset_t
)kvtophys(vaddr
);
178 * Routine: ml_nofault_copy
179 * Function: Perform a physical mode copy if the source and
180 * destination have valid translations in the kernel pmap.
181 * If translations are present, they are assumed to
182 * be wired; i.e. no attempt is made to guarantee that the
183 * translations obtained remained valid for
184 * the duration of the copy process.
187 vm_size_t
ml_nofault_copy(
188 vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
190 addr64_t cur_phys_dst
, cur_phys_src
;
191 uint32_t count
, nbytes
= 0;
194 if (!(cur_phys_src
= kvtophys(virtsrc
)))
196 if (!(cur_phys_dst
= kvtophys(virtdst
)))
198 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
200 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
201 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
202 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
204 count
= (uint32_t)size
;
206 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
218 * Routine: ml_validate_nofault
219 * Function: Validate that ths address range has a valid translations
220 * in the kernel pmap. If translations are present, they are
221 * assumed to be wired; i.e. no attempt is made to guarantee
222 * that the translation persist after the check.
223 * Returns: TRUE if the range is mapped and will not cause a fault,
227 boolean_t
ml_validate_nofault(
228 vm_offset_t virtsrc
, vm_size_t size
)
230 addr64_t cur_phys_src
;
234 if (!(cur_phys_src
= kvtophys(virtsrc
)))
236 if (!pmap_valid_page(i386_btop(cur_phys_src
)))
238 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
240 count
= (uint32_t)size
;
249 /* Interrupt handling */
251 /* Initialize Interrupts */
252 void ml_init_interrupt(void)
254 (void) ml_set_interrupts_enabled(TRUE
);
258 /* Get Interrupts Enabled */
259 boolean_t
ml_get_interrupts_enabled(void)
263 __asm__
volatile("pushf; pop %0" : "=r" (flags
));
264 return (flags
& EFL_IF
) != 0;
267 /* Set Interrupts Enabled */
268 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
273 __asm__
volatile("pushf; pop %0" : "=r" (flags
));
275 assert(get_interrupt_level() ? (enable
== FALSE
) : TRUE
);
277 istate
= ((flags
& EFL_IF
) != 0);
280 __asm__
volatile("sti;nop");
282 if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT
))
283 __asm__
volatile ("int %0" :: "N" (T_PREEMPT
));
287 __asm__
volatile("cli");
293 /* Check if running at interrupt context */
294 boolean_t
ml_at_interrupt_context(void)
296 return get_interrupt_level() != 0;
299 void ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
) {
300 *icp
= (get_interrupt_level() != 0);
301 /* These will be technically inaccurate for interrupts that occur
302 * successively within a single "idle exit" event, but shouldn't
303 * matter statistically.
305 *pidlep
= (current_cpu_datap()->lcpu
.package
->num_idle
== topoParms
.nLThreadsPerPackage
);
308 /* Generate a fake interrupt */
309 void ml_cause_interrupt(void)
311 panic("ml_cause_interrupt not defined yet on Intel");
315 * TODO: transition users of this to kernel_thread_start_priority
316 * ml_thread_policy is an unsupported KPI
318 void ml_thread_policy(
320 __unused
unsigned policy_id
,
321 unsigned policy_info
)
323 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
324 thread_precedence_policy_data_t info
;
325 __assert_only kern_return_t kret
;
329 kret
= thread_policy_set_internal(thread
, THREAD_PRECEDENCE_POLICY
,
330 (thread_policy_t
)&info
,
331 THREAD_PRECEDENCE_POLICY_COUNT
);
332 assert(kret
== KERN_SUCCESS
);
336 /* Initialize Interrupts */
337 void ml_install_interrupt_handler(
341 IOInterruptHandler handler
,
344 boolean_t current_state
;
346 current_state
= ml_get_interrupts_enabled();
348 PE_install_interrupt_handler(nub
, source
, target
,
349 (IOInterruptHandler
) handler
, refCon
);
351 (void) ml_set_interrupts_enabled(current_state
);
353 initialize_screen(NULL
, kPEAcquireScreen
);
359 processor_t processor
)
361 cpu_interrupt(processor
->cpu_id
);
367 processor_t
*processor_out
,
371 cpu_data_t
*this_cpu_datap
;
373 this_cpu_datap
= cpu_data_alloc(boot_cpu
);
374 if (this_cpu_datap
== NULL
) {
377 target_cpu
= this_cpu_datap
->cpu_number
;
378 assert((boot_cpu
&& (target_cpu
== 0)) ||
379 (!boot_cpu
&& (target_cpu
!= 0)));
381 lapic_cpu_map(lapic_id
, target_cpu
);
383 /* The cpu_id is not known at registration phase. Just do
386 this_cpu_datap
->cpu_phys_number
= lapic_id
;
388 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(boot_cpu
);
389 if (this_cpu_datap
->cpu_console_buf
== NULL
)
392 this_cpu_datap
->cpu_chud
= chudxnu_cpu_alloc(boot_cpu
);
393 if (this_cpu_datap
->cpu_chud
== NULL
)
397 this_cpu_datap
->cpu_kpc_buf
[0] = kpc_counterbuf_alloc();
398 if(this_cpu_datap
->cpu_kpc_buf
[0] == NULL
)
400 this_cpu_datap
->cpu_kpc_buf
[1] = kpc_counterbuf_alloc();
401 if(this_cpu_datap
->cpu_kpc_buf
[1] == NULL
)
404 this_cpu_datap
->cpu_kpc_shadow
= kpc_counterbuf_alloc();
405 if(this_cpu_datap
->cpu_kpc_shadow
== NULL
)
408 this_cpu_datap
->cpu_kpc_reload
= kpc_counterbuf_alloc();
409 if(this_cpu_datap
->cpu_kpc_reload
== NULL
)
414 cpu_thread_alloc(this_cpu_datap
->cpu_number
);
415 if (this_cpu_datap
->lcpu
.core
== NULL
)
418 #if NCOPY_WINDOWS > 0
419 this_cpu_datap
->cpu_pmap
= pmap_cpu_alloc(boot_cpu
);
420 if (this_cpu_datap
->cpu_pmap
== NULL
)
424 this_cpu_datap
->cpu_processor
= cpu_processor_alloc(boot_cpu
);
425 if (this_cpu_datap
->cpu_processor
== NULL
)
428 * processor_init() deferred to topology start
429 * because "slot numbers" a.k.a. logical processor numbers
430 * are not yet finalized.
434 *processor_out
= this_cpu_datap
->cpu_processor
;
439 cpu_processor_free(this_cpu_datap
->cpu_processor
);
440 #if NCOPY_WINDOWS > 0
441 pmap_cpu_free(this_cpu_datap
->cpu_pmap
);
443 chudxnu_cpu_free(this_cpu_datap
->cpu_chud
);
444 console_cpu_free(this_cpu_datap
->cpu_console_buf
);
446 kpc_counterbuf_free(this_cpu_datap
->cpu_kpc_buf
[0]);
447 kpc_counterbuf_free(this_cpu_datap
->cpu_kpc_buf
[1]);
448 kpc_counterbuf_free(this_cpu_datap
->cpu_kpc_shadow
);
449 kpc_counterbuf_free(this_cpu_datap
->cpu_kpc_reload
);
457 ml_processor_register(
460 processor_t
*processor_out
,
464 static boolean_t done_topo_sort
= FALSE
;
465 static uint32_t num_registered
= 0;
467 /* Register all CPUs first, and track max */
472 DBG( "registering CPU lapic id %d\n", lapic_id
);
474 return register_cpu( lapic_id
, processor_out
, boot_cpu
);
477 /* Sort by topology before we start anything */
478 if( !done_topo_sort
)
480 DBG( "about to start CPUs. %d registered\n", num_registered
);
482 cpu_topology_sort( num_registered
);
483 done_topo_sort
= TRUE
;
486 /* Assign the cpu ID */
487 uint32_t cpunum
= -1;
488 cpu_data_t
*this_cpu_datap
= NULL
;
490 /* find cpu num and pointer */
491 cpunum
= ml_get_cpuid( lapic_id
);
493 if( cpunum
== 0xFFFFFFFF ) /* never heard of it? */
494 panic( "trying to start invalid/unregistered CPU %d\n", lapic_id
);
496 this_cpu_datap
= cpu_datap(cpunum
);
499 this_cpu_datap
->cpu_id
= cpu_id
;
501 /* allocate and initialize other per-cpu structures */
503 mp_cpus_call_cpu_init(cpunum
);
504 prng_cpu_init(cpunum
);
508 *processor_out
= this_cpu_datap
->cpu_processor
;
510 /* OK, try and start this CPU */
511 return cpu_topology_start_cpu( cpunum
);
516 ml_cpu_get_info(ml_cpu_info_t
*cpu_infop
)
518 boolean_t os_supports_sse
;
519 i386_cpu_info_t
*cpuid_infop
;
521 if (cpu_infop
== NULL
)
525 * Are we supporting MMX/SSE/SSE2/SSE3?
526 * As distinct from whether the cpu has these capabilities.
528 os_supports_sse
= !!(get_cr4() & CR4_OSXMM
);
530 if (ml_fpu_avx_enabled())
531 cpu_infop
->vector_unit
= 9;
532 else if ((cpuid_features() & CPUID_FEATURE_SSE4_2
) && os_supports_sse
)
533 cpu_infop
->vector_unit
= 8;
534 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1
) && os_supports_sse
)
535 cpu_infop
->vector_unit
= 7;
536 else if ((cpuid_features() & CPUID_FEATURE_SSSE3
) && os_supports_sse
)
537 cpu_infop
->vector_unit
= 6;
538 else if ((cpuid_features() & CPUID_FEATURE_SSE3
) && os_supports_sse
)
539 cpu_infop
->vector_unit
= 5;
540 else if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
541 cpu_infop
->vector_unit
= 4;
542 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
543 cpu_infop
->vector_unit
= 3;
544 else if (cpuid_features() & CPUID_FEATURE_MMX
)
545 cpu_infop
->vector_unit
= 2;
547 cpu_infop
->vector_unit
= 0;
549 cpuid_infop
= cpuid_info();
551 cpu_infop
->cache_line_size
= cpuid_infop
->cache_linesize
;
553 cpu_infop
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
554 cpu_infop
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
556 if (cpuid_infop
->cache_size
[L2U
] > 0) {
557 cpu_infop
->l2_settings
= 1;
558 cpu_infop
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
560 cpu_infop
->l2_settings
= 0;
561 cpu_infop
->l2_cache_size
= 0xFFFFFFFF;
564 if (cpuid_infop
->cache_size
[L3U
] > 0) {
565 cpu_infop
->l3_settings
= 1;
566 cpu_infop
->l3_cache_size
= cpuid_infop
->cache_size
[L3U
];
568 cpu_infop
->l3_settings
= 0;
569 cpu_infop
->l3_cache_size
= 0xFFFFFFFF;
574 ml_init_max_cpus(unsigned long max_cpus
)
576 boolean_t current_state
;
578 current_state
= ml_set_interrupts_enabled(FALSE
);
579 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
580 if (max_cpus
> 0 && max_cpus
<= MAX_CPUS
) {
582 * Note: max_cpus is the number of enabled processors
583 * that ACPI found; max_ncpus is the maximum number
584 * that the kernel supports or that the "cpus="
585 * boot-arg has set. Here we take int minimum.
587 machine_info
.max_cpus
= (integer_t
)MIN(max_cpus
, max_ncpus
);
589 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
590 wakeup((event_t
)&max_cpus_initialized
);
591 max_cpus_initialized
= MAX_CPUS_SET
;
593 (void) ml_set_interrupts_enabled(current_state
);
597 ml_get_max_cpus(void)
599 boolean_t current_state
;
601 current_state
= ml_set_interrupts_enabled(FALSE
);
602 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
603 max_cpus_initialized
= MAX_CPUS_WAIT
;
604 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
605 (void)thread_block(THREAD_CONTINUE_NULL
);
607 (void) ml_set_interrupts_enabled(current_state
);
608 return(machine_info
.max_cpus
);
612 * Routine: ml_init_lock_timeout
616 ml_init_lock_timeout(void)
620 #if DEVELOPMENT || DEBUG
621 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>2;
623 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>1;
628 if (PE_parse_boot_argn("slto_us", &slto
, sizeof (slto
)))
629 default_timeout_ns
= slto
* NSEC_PER_USEC
;
631 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
632 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
633 LockTimeOut
= (uint32_t) abstime
;
634 LockTimeOutTSC
= (uint32_t) tmrCvt(abstime
, tscFCvtn2t
);
637 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
638 * LockTimeOut but can be overriden separately. In particular, a
639 * zero value inhibits the timeout-panic and cuts a trace evnt instead
640 * - see pmap_flush_tlbs().
642 if (PE_parse_boot_argn("tlbto_us", &slto
, sizeof (slto
))) {
643 default_timeout_ns
= slto
* NSEC_PER_USEC
;
644 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
645 TLBTimeOut
= (uint32_t) abstime
;
647 TLBTimeOut
= LockTimeOut
;
650 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
651 if (mtxspin
> USEC_PER_SEC
>>4)
652 mtxspin
= USEC_PER_SEC
>>4;
653 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
655 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
657 MutexSpin
= (unsigned int)abstime
;
659 nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC
, &LastDebuggerEntryAllowance
);
660 if (PE_parse_boot_argn("panic_restart_timeout", &prt
, sizeof (prt
)))
661 nanoseconds_to_absolutetime(prt
* NSEC_PER_SEC
, &panic_restart_timeout
);
662 virtualized
= ((cpuid_features() & CPUID_FEATURE_VMM
) != 0);
663 interrupt_latency_tracker_setup();
664 simple_lock_init(&ml_timer_evaluation_slock
, 0);
668 * Threshold above which we should attempt to block
669 * instead of spinning for clock_delay_until().
673 ml_init_delay_spin_threshold(int threshold_us
)
675 nanoseconds_to_absolutetime(threshold_us
* NSEC_PER_USEC
, &delay_spin_threshold
);
679 ml_delay_should_spin(uint64_t interval
)
681 return (interval
< delay_spin_threshold
) ? TRUE
: FALSE
;
685 * This is called from the machine-independent layer
686 * to perform machine-dependent info updates. Defer to cpu_thread_init().
695 * This is called from the machine-independent layer
696 * to perform machine-dependent info updates.
701 i386_deactivate_cpu();
707 * The following are required for parts of the kernel
708 * that cannot resolve these functions as inlines:
710 extern thread_t
current_act(void);
714 return(current_thread_fast());
717 #undef current_thread
718 extern thread_t
current_thread(void);
722 return(current_thread_fast());
726 boolean_t
ml_is64bit(void) {
728 return (cpu_mode_is64bit());
732 boolean_t
ml_thread_is64bit(thread_t thread
) {
734 return (thread_is_64bit(thread
));
738 boolean_t
ml_state_is64bit(void *saved_state
) {
740 return is_saved_state64(saved_state
);
743 void ml_cpu_set_ldt(int selector
)
746 * Avoid loading the LDT
747 * if we're setting the KERNEL LDT and it's already set.
749 if (selector
== KERNEL_LDT
&&
750 current_cpu_datap()->cpu_ldt
== KERNEL_LDT
)
754 current_cpu_datap()->cpu_ldt
= selector
;
757 void ml_fp_setvalid(boolean_t value
)
762 uint64_t ml_cpu_int_event_time(void)
764 return current_cpu_datap()->cpu_int_event_time
;
767 vm_offset_t
ml_stack_remaining(void)
769 uintptr_t local
= (uintptr_t) &local
;
771 if (ml_at_interrupt_context() != 0) {
772 return (local
- (current_cpu_datap()->cpu_int_stack_top
- INTSTACK_SIZE
));
774 return (local
- current_thread()->kernel_stack
);
779 kernel_preempt_check(void)
784 assert(get_preemption_level() == 0);
786 __asm__
volatile("pushf; pop %0" : "=r" (flags
));
788 intr
= ((flags
& EFL_IF
) != 0);
790 if ((*ast_pending() & AST_URGENT
) && intr
== TRUE
) {
792 * can handle interrupts and preemptions
797 * now cause the PRE-EMPTION trap
799 __asm__
volatile ("int %0" :: "N" (T_PREEMPT
));
803 boolean_t
machine_timeout_suspended(void) {
804 return (virtualized
|| pmap_tlb_flush_timeout
|| spinlock_timed_out
|| panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
807 /* Eagerly evaluate all pending timer and thread callouts
809 void ml_timer_evaluate(void) {
810 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN
|DBG_FUNC_START
, 0, 0, 0, 0, 0);
812 uint64_t te_end
, te_start
= mach_absolute_time();
813 simple_lock(&ml_timer_evaluation_slock
);
814 ml_timer_evaluation_in_progress
= TRUE
;
815 thread_call_delayed_timer_rescan_all();
816 mp_cpus_call(CPUMASK_ALL
, ASYNC
, timer_queue_expire_rescan
, NULL
);
817 ml_timer_evaluation_in_progress
= FALSE
;
818 ml_timer_eager_evaluations
++;
819 te_end
= mach_absolute_time();
820 ml_timer_eager_evaluation_max
= MAX(ml_timer_eager_evaluation_max
, (te_end
- te_start
));
821 simple_unlock(&ml_timer_evaluation_slock
);
823 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN
|DBG_FUNC_END
, 0, 0, 0, 0, 0);
827 ml_timer_forced_evaluation(void) {
828 return ml_timer_evaluation_in_progress
;
831 /* 32-bit right-rotate n bits */
832 static inline uint32_t ror32(uint32_t val
, const unsigned int n
)
834 __asm__
volatile("rorl %%cl,%0" : "=r" (val
) : "0" (val
), "c" (n
));
839 ml_entropy_collect(void)
841 uint32_t tsc_lo
, tsc_hi
;
844 assert(cpu_number() == master_cpu
);
846 /* update buffer pointer cyclically */
847 if (EntropyData
.index_ptr
- EntropyData
.buffer
== ENTROPY_BUFFER_SIZE
)
848 ep
= EntropyData
.index_ptr
= EntropyData
.buffer
;
850 ep
= EntropyData
.index_ptr
++;
852 rdtsc_nofence(tsc_lo
, tsc_hi
);
853 *ep
= ror32(*ep
, 9) ^ tsc_lo
;
857 ml_gpu_stat_update(uint64_t gpu_ns_delta
) {
858 current_thread()->machine
.thread_gpu_ns
+= gpu_ns_delta
;
862 ml_gpu_stat(thread_t t
) {
863 return t
->machine
.thread_gpu_ns
;