2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
31 #include <ppc/machine_routines.h>
32 #include <ppc/cpu_internal.h>
33 #include <ppc/exception.h>
34 #include <ppc/io_map_entries.h>
35 #include <ppc/misc_protos.h>
36 #include <ppc/savearea.h>
37 #include <ppc/Firmware.h>
40 #include <ppc/new_screen.h>
41 #include <ppc/proc_reg.h>
42 #include <ppc/machine_cpu.h> /* for cpu_signal_handler() */
43 #include <ppc/fpu_protos.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
48 #include <vm/vm_page.h>
50 unsigned int LockTimeOut
= 1250000000;
51 unsigned int MutexSpin
= 0;
53 static int max_cpus_initialized
= 0;
55 uint32_t warFlags
= 0;
56 #define warDisMBpoff 0x80000000
57 #define MAX_CPUS_SET 0x01
58 #define MAX_CPUS_WAIT 0x02
60 decl_simple_lock_data(, spsLock
);
61 unsigned int spsLockInit
= 0;
63 extern unsigned int hwllckPatch_isync
;
64 extern unsigned int hwulckPatch_isync
;
65 extern unsigned int hwulckbPatch_isync
;
66 extern unsigned int hwlmlckPatch_isync
;
67 extern unsigned int hwltlckPatch_isync
;
68 extern unsigned int hwcsatomicPatch_isync
;
69 extern unsigned int mlckePatch_isync
;
70 extern unsigned int mlckPatch_isync
;
71 extern unsigned int mltelckPatch_isync
;
72 extern unsigned int mltlckPatch_isync
;
73 extern unsigned int mulckePatch_isync
;
74 extern unsigned int mulckPatch_isync
;
75 extern unsigned int slckPatch_isync
;
76 extern unsigned int stlckPatch_isync
;
77 extern unsigned int sulckPatch_isync
;
78 extern unsigned int rwlePatch_isync
;
79 extern unsigned int rwlsPatch_isync
;
80 extern unsigned int rwlsePatch_isync
;
81 extern unsigned int rwlesPatch_isync
;
82 extern unsigned int rwtlePatch_isync
;
83 extern unsigned int rwtlsPatch_isync
;
84 extern unsigned int rwldPatch_isync
;
85 extern unsigned int hwulckPatch_eieio
;
86 extern unsigned int mulckPatch_eieio
;
87 extern unsigned int mulckePatch_eieio
;
88 extern unsigned int sulckPatch_eieio
;
89 extern unsigned int rwlesPatch_eieio
;
90 extern unsigned int rwldPatch_eieio
;
97 typedef struct patch_up patch_up_t
;
99 patch_up_t patch_up_table
[] = {
100 {&hwllckPatch_isync
, 0x60000000},
101 {&hwulckPatch_isync
, 0x60000000},
102 {&hwulckbPatch_isync
, 0x60000000},
103 {&hwlmlckPatch_isync
, 0x60000000},
104 {&hwltlckPatch_isync
, 0x60000000},
105 {&hwcsatomicPatch_isync
, 0x60000000},
106 {&mlckePatch_isync
, 0x60000000},
107 {&mlckPatch_isync
, 0x60000000},
108 {&mltelckPatch_isync
, 0x60000000},
109 {&mltlckPatch_isync
, 0x60000000},
110 {&mulckePatch_isync
, 0x60000000},
111 {&mulckPatch_isync
, 0x60000000},
112 {&slckPatch_isync
, 0x60000000},
113 {&stlckPatch_isync
, 0x60000000},
114 {&sulckPatch_isync
, 0x60000000},
115 {&rwlePatch_isync
, 0x60000000},
116 {&rwlsPatch_isync
, 0x60000000},
117 {&rwlsePatch_isync
, 0x60000000},
118 {&rwlesPatch_isync
, 0x60000000},
119 {&rwtlePatch_isync
, 0x60000000},
120 {&rwtlsPatch_isync
, 0x60000000},
121 {&rwldPatch_isync
, 0x60000000},
122 {&hwulckPatch_eieio
, 0x60000000},
123 {&hwulckPatch_eieio
, 0x60000000},
124 {&mulckPatch_eieio
, 0x60000000},
125 {&mulckePatch_eieio
, 0x60000000},
126 {&sulckPatch_eieio
, 0x60000000},
127 {&rwlesPatch_eieio
, 0x60000000},
128 {&rwldPatch_eieio
, 0x60000000},
133 extern boolean_t pmap_initialized
;
135 /* Map memory map IO space */
138 vm_offset_t phys_addr
,
141 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
145 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
153 * Routine: ml_static_malloc
154 * Function: static memory allocation
162 if (pmap_initialized
)
163 return((vm_offset_t
)NULL
);
165 vaddr
= static_memory_end
;
166 static_memory_end
= round_page(vaddr
+size
);
172 * Routine: ml_static_ptovirt
181 /* Static memory is map V=R */
183 if ( (vaddr
< static_memory_end
) && (pmap_extract(kernel_pmap
, vaddr
)==paddr
) )
186 return((vm_offset_t
)NULL
);
190 * Routine: ml_static_mfree
198 vm_offset_t paddr_cur
, vaddr_cur
;
200 for (vaddr_cur
= round_page_32(vaddr
);
201 vaddr_cur
< trunc_page_32(vaddr
+size
);
202 vaddr_cur
+= PAGE_SIZE
) {
203 paddr_cur
= pmap_extract(kernel_pmap
, vaddr_cur
);
204 if (paddr_cur
!= (vm_offset_t
)NULL
) {
205 vm_page_wire_count
--;
206 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
207 vm_page_create(paddr_cur
>>12,(paddr_cur
+PAGE_SIZE
)>>12);
213 * Routine: ml_vtophys
214 * Function: virtual to physical on static pages
216 vm_offset_t
ml_vtophys(
219 return(pmap_extract(kernel_pmap
, vaddr
));
223 * Routine: ml_install_interrupt_handler
224 * Function: Initialize Interrupt Handler
226 void ml_install_interrupt_handler(
230 IOInterruptHandler handler
,
233 struct per_proc_info
*proc_info
;
234 boolean_t current_state
;
236 current_state
= ml_get_interrupts_enabled();
237 proc_info
= getPerProc();
239 proc_info
->interrupt_nub
= nub
;
240 proc_info
->interrupt_source
= source
;
241 proc_info
->interrupt_target
= target
;
242 proc_info
->interrupt_handler
= handler
;
243 proc_info
->interrupt_refCon
= refCon
;
245 proc_info
->interrupts_enabled
= TRUE
;
246 (void) ml_set_interrupts_enabled(current_state
);
248 initialize_screen(NULL
, kPEAcquireScreen
);
252 * Routine: ml_nofault_copy
253 * Function: Perform a physical mode copy if the source and
254 * destination have valid translations in the kernel pmap.
255 * If translations are present, they are assumed to
256 * be wired; i.e. no attempt is made to guarantee that the
257 * translations obtained remained valid for
258 * the duration of their use.
261 vm_size_t
ml_nofault_copy(
262 vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
264 addr64_t cur_phys_dst
, cur_phys_src
;
265 uint32_t count
, pindex
, nbytes
= 0;
268 if (!(cur_phys_src
= kvtophys(virtsrc
)))
270 if (!(cur_phys_dst
= kvtophys(virtdst
)))
272 if (!mapping_phys_lookup((cur_phys_src
>>12), &pindex
) ||
273 !mapping_phys_lookup((cur_phys_dst
>>12), &pindex
))
275 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
276 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
277 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
281 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
293 * Routine: ml_init_interrupt
294 * Function: Initialize Interrupts
296 void ml_init_interrupt(void)
298 boolean_t current_state
;
300 current_state
= ml_get_interrupts_enabled();
302 getPerProc()->interrupts_enabled
= TRUE
;
303 (void) ml_set_interrupts_enabled(current_state
);
307 * Routine: ml_get_interrupts_enabled
308 * Function: Get Interrupts Enabled
310 boolean_t
ml_get_interrupts_enabled(void)
312 return((mfmsr() & MASK(MSR_EE
)) != 0);
316 * Routine: ml_at_interrupt_context
317 * Function: Check if running at interrupt context
319 boolean_t
ml_at_interrupt_context(void)
322 boolean_t current_state
;
324 current_state
= ml_set_interrupts_enabled(FALSE
);
325 ret
= (getPerProc()->istackptr
== 0);
326 ml_set_interrupts_enabled(current_state
);
331 * Routine: ml_cause_interrupt
332 * Function: Generate a fake interrupt
334 void ml_cause_interrupt(void)
340 * Routine: ml_thread_policy
343 void ml_thread_policy(
345 __unused
unsigned policy_id
,
346 unsigned policy_info
)
348 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
349 spl_t s
= splsched();
353 set_priority(thread
, thread
->priority
+ 1);
355 thread_unlock(thread
);
361 * Routine: machine_signal_idle
366 processor_t processor
)
368 struct per_proc_info
*proc_info
;
370 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
372 if (proc_info
->pf
.Available
& (pfCanDoze
|pfWillNap
))
373 (void)cpu_signal(proc_info
->cpu_number
, SIGPwake
, 0, 0);
377 * Routine: ml_processor_register
381 ml_processor_register(
382 ml_processor_info_t
*in_processor_info
,
383 processor_t
*processor_out
,
384 ipi_handler_t
*ipi_handler
)
386 struct per_proc_info
*proc_info
;
388 boolean_t current_state
;
389 boolean_t boot_processor
;
391 if (in_processor_info
->boot_cpu
== FALSE
) {
392 if (spsLockInit
== 0) {
394 simple_lock_init(&spsLock
, 0);
396 boot_processor
= FALSE
;
397 proc_info
= cpu_per_proc_alloc();
398 if (proc_info
== (struct per_proc_info
*)NULL
)
400 proc_info
->pp_cbfr
= console_per_proc_alloc(FALSE
);
401 if (proc_info
->pp_cbfr
== (void *)NULL
)
402 goto processor_register_error
;
404 boot_processor
= TRUE
;
405 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
408 proc_info
->pp_chud
= chudxnu_per_proc_alloc(boot_processor
);
409 if (proc_info
->pp_chud
== (void *)NULL
)
410 goto processor_register_error
;
413 if (cpu_per_proc_register(proc_info
) != KERN_SUCCESS
)
414 goto processor_register_error
;
416 proc_info
->cpu_id
= in_processor_info
->cpu_id
;
417 proc_info
->start_paddr
= in_processor_info
->start_paddr
;
418 if(in_processor_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
419 proc_info
->time_base_enable
= in_processor_info
->time_base_enable
;
421 proc_info
->time_base_enable
= (void(*)(cpu_id_t
, boolean_t
))NULL
;
423 if((proc_info
->pf
.pfPowerModes
& pmType
) == pmPowerTune
) {
424 proc_info
->pf
.pfPowerTune0
= in_processor_info
->power_mode_0
;
425 proc_info
->pf
.pfPowerTune1
= in_processor_info
->power_mode_1
;
428 donap
= in_processor_info
->supports_nap
; /* Assume we use requested nap */
429 if(forcenap
) donap
= forcenap
- 1; /* If there was an override, use that */
431 if((proc_info
->pf
.Available
& pfCanNap
)
433 proc_info
->pf
.Available
|= pfWillNap
;
434 current_state
= ml_set_interrupts_enabled(FALSE
);
435 if(proc_info
== getPerProc())
436 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
437 (void) ml_set_interrupts_enabled(current_state
);
440 if (!boot_processor
) {
441 (void)hw_atomic_add(&saveanchor
.savetarget
, FreeListMin
); /* saveareas for this processor */
442 processor_init((struct processor
*)proc_info
->processor
,
443 proc_info
->cpu_number
, processor_pset(master_processor
));
446 *processor_out
= (struct processor
*)proc_info
->processor
;
447 *ipi_handler
= cpu_signal_handler
;
451 processor_register_error
:
452 if (proc_info
->pp_cbfr
!= (void *)NULL
)
453 console_per_proc_free(proc_info
->pp_cbfr
);
454 if (proc_info
->pp_chud
!= (void *)NULL
)
455 chudxnu_per_proc_free(proc_info
->pp_chud
);
457 cpu_per_proc_free(proc_info
);
462 * Routine: ml_enable_nap
466 ml_enable_nap(int target_cpu
, boolean_t nap_enabled
)
468 struct per_proc_info
*proc_info
;
469 boolean_t prev_value
;
470 boolean_t current_state
;
472 proc_info
= PerProcTable
[target_cpu
].ppe_vaddr
;
474 prev_value
= (proc_info
->pf
.Available
& pfCanNap
) && (proc_info
->pf
.Available
& pfWillNap
);
476 if(forcenap
) nap_enabled
= forcenap
- 1; /* If we are to force nap on or off, do it */
478 if(proc_info
->pf
.Available
& pfCanNap
) { /* Can the processor nap? */
479 if (nap_enabled
) proc_info
->pf
.Available
|= pfWillNap
; /* Is nap supported on this machine? */
480 else proc_info
->pf
.Available
&= ~pfWillNap
; /* Clear if not */
483 current_state
= ml_set_interrupts_enabled(FALSE
);
484 if(proc_info
== getPerProc())
485 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
486 (void) ml_set_interrupts_enabled(current_state
);
492 * Routine: ml_init_max_cpus
496 ml_init_max_cpus(unsigned int max_cpus
)
498 boolean_t current_state
;
500 current_state
= ml_set_interrupts_enabled(FALSE
);
501 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
502 if (max_cpus
> 0 && max_cpus
<= MAX_CPUS
) {
504 * Note: max_ncpus is the maximum number
505 * that the kernel supports or that the "cpus="
506 * boot-arg has set. Here we take int minimum.
508 machine_info
.max_cpus
= MIN(max_cpus
, max_ncpus
);
509 machine_info
.physical_cpu_max
= max_cpus
;
510 machine_info
.logical_cpu_max
= max_cpus
;
512 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
513 wakeup((event_t
)&max_cpus_initialized
);
514 max_cpus_initialized
= MAX_CPUS_SET
;
517 if (machine_info
.logical_cpu_max
== 1) {
518 struct patch_up
*patch_up_ptr
= &patch_up_table
[0];
520 while (patch_up_ptr
->addr
!= NULL
) {
522 * Patch for V=R kernel text section
524 bcopy_phys((addr64_t
)((unsigned int)(&patch_up_ptr
->data
)),
525 (addr64_t
)((unsigned int)(patch_up_ptr
->addr
)), 4);
526 sync_cache64((addr64_t
)((unsigned int)(patch_up_ptr
->addr
)),4);
531 (void) ml_set_interrupts_enabled(current_state
);
535 * Routine: ml_get_max_cpus
539 ml_get_max_cpus(void)
541 boolean_t current_state
;
543 current_state
= ml_set_interrupts_enabled(FALSE
);
544 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
545 max_cpus_initialized
= MAX_CPUS_WAIT
;
546 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
547 (void)thread_block(THREAD_CONTINUE_NULL
);
549 (void) ml_set_interrupts_enabled(current_state
);
550 return(machine_info
.max_cpus
);
554 * This is called from the machine-independent routine cpu_up()
555 * to perform machine-dependent info updates.
560 (void)hw_atomic_add(&machine_info
.physical_cpu
, 1);
561 (void)hw_atomic_add(&machine_info
.logical_cpu
, 1);
565 * This is called from the machine-independent routine cpu_down()
566 * to perform machine-dependent info updates.
571 (void)hw_atomic_sub(&machine_info
.physical_cpu
, 1);
572 (void)hw_atomic_sub(&machine_info
.logical_cpu
, 1);
576 * Routine: ml_cpu_get_info
580 ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
)
582 struct per_proc_info
*proc_info
;
584 if (ml_cpu_info
== 0) return;
586 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
587 ml_cpu_info
->vector_unit
= (proc_info
->pf
.Available
& pfAltivec
) != 0;
588 ml_cpu_info
->cache_line_size
= proc_info
->pf
.lineSize
;
589 ml_cpu_info
->l1_icache_size
= proc_info
->pf
.l1iSize
;
590 ml_cpu_info
->l1_dcache_size
= proc_info
->pf
.l1dSize
;
592 if (proc_info
->pf
.Available
& pfL2
) {
593 ml_cpu_info
->l2_settings
= proc_info
->pf
.l2cr
;
594 ml_cpu_info
->l2_cache_size
= proc_info
->pf
.l2Size
;
596 ml_cpu_info
->l2_settings
= 0;
597 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
599 if (proc_info
->pf
.Available
& pfL3
) {
600 ml_cpu_info
->l3_settings
= proc_info
->pf
.l3cr
;
601 ml_cpu_info
->l3_cache_size
= proc_info
->pf
.l3Size
;
603 ml_cpu_info
->l3_settings
= 0;
604 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
609 * Routine: ml_enable_cache_level
612 #define l2em 0x80000000
613 #define l3em 0x80000000
615 ml_enable_cache_level(int cache_level
, int enable
)
618 unsigned long available
, ccr
;
619 struct per_proc_info
*proc_info
;
621 if (real_ncpus
!= 1) return -1; /* XXX: This test is not safe */
623 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
624 available
= proc_info
->pf
.Available
;
626 if ((cache_level
== 2) && (available
& pfL2
)) {
627 ccr
= proc_info
->pf
.l2cr
;
628 old_mode
= (ccr
& l2em
) ? TRUE
: FALSE
;
629 if (old_mode
!= enable
) {
630 if (enable
) ccr
= proc_info
->pf
.l2crOriginal
;
632 proc_info
->pf
.l2cr
= ccr
;
639 if ((cache_level
== 3) && (available
& pfL3
)) {
640 ccr
= proc_info
->pf
.l3cr
;
641 old_mode
= (ccr
& l3em
) ? TRUE
: FALSE
;
642 if (old_mode
!= enable
) {
643 if (enable
) ccr
= proc_info
->pf
.l3crOriginal
;
645 proc_info
->pf
.l3cr
= ccr
;
657 * Routine: ml_set_processor_speed
661 ml_set_processor_speed(unsigned long speed
)
663 struct per_proc_info
*proc_info
;
665 kern_return_t result
;
666 boolean_t current_state
;
669 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
671 switch (proc_info
->pf
.pfPowerModes
& pmType
) { /* Figure specific type */
674 ml_set_processor_speed_dpll(speed
);
679 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
681 * cpu_signal() returns after .5ms if it fails to signal a running cpu
682 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
684 for (i
=200; i
>0; i
--) {
685 current_state
= ml_set_interrupts_enabled(FALSE
);
686 if (cpu
!= (unsigned)cpu_number()) {
687 if (PerProcTable
[cpu
].ppe_vaddr
->cpu_flags
& SignalReady
)
689 * Target cpu is off-line, skip
691 result
= KERN_SUCCESS
;
693 simple_lock(&spsLock
);
694 result
= cpu_signal(cpu
, SIGPcpureq
, CPRQsps
, speed
);
695 if (result
== KERN_SUCCESS
)
696 thread_sleep_simple_lock(&spsLock
, &spsLock
, THREAD_UNINT
);
697 simple_unlock(&spsLock
);
700 ml_set_processor_speed_dfs(speed
);
701 result
= KERN_SUCCESS
;
703 (void) ml_set_interrupts_enabled(current_state
);
704 if (result
== KERN_SUCCESS
)
707 if (result
!= KERN_SUCCESS
)
708 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu
);
714 ml_set_processor_speed_powertune(speed
);
725 * Routine: ml_set_processor_speed_slave
729 ml_set_processor_speed_slave(unsigned long speed
)
731 ml_set_processor_speed_dfs(speed
);
733 simple_lock(&spsLock
);
734 thread_wakeup(&spsLock
);
735 simple_unlock(&spsLock
);
739 * Routine: ml_init_lock_timeout
743 ml_init_lock_timeout(void)
748 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
749 LockTimeOut
= (unsigned int)abstime
;
751 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
752 if (mtxspin
> USEC_PER_SEC
>>4)
753 mtxspin
= USEC_PER_SEC
>>4;
754 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
756 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
758 MutexSpin
= (unsigned int)abstime
;
762 * Routine: init_ast_check
767 __unused processor_t processor
)
771 * Routine: cause_ast_check
776 processor_t processor
)
778 struct per_proc_info
*proc_info
;
780 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
782 if (proc_info
!= getPerProc()
783 && proc_info
->interrupts_enabled
== TRUE
)
784 cpu_signal(proc_info
->cpu_number
, SIGPast
, (unsigned int)NULL
, (unsigned int)NULL
);
788 * Routine: machine_processor_shutdown
792 machine_processor_shutdown(
793 __unused thread_t thread
,
794 __unused
void (*doshutdown
)(processor_t
),
795 __unused processor_t processor
)
798 return((thread_t
)(getPerProc()->old_thread
));
802 void ml_mem_backoff(void) {
804 if(warFlags
& warDisMBpoff
) return; /* If backoff disabled, exit */
806 __asm__
volatile("sync");
807 __asm__
volatile("isync");
815 * Stubs for CPU Stepper
818 machine_run_count(__unused
uint32_t count
)
823 machine_cpu_is_inactive(__unused
int num
)
828 vm_offset_t
ml_stack_remaining(void)
830 uintptr_t local
= (uintptr_t) &local
;
832 if (ml_at_interrupt_context()) {
833 return (local
- (getPerProc()->intstack_top_ss
- INTSTACK_SIZE
));
835 return (local
- current_thread()->kernel_stack
);