2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
32 #include <sys/errno.h>
36 .globl EXT(machine_set_current_thread)
37 LEXT(machine_set_current_thread)
38 ldr r1, [r0, ACT_CPUDATAP]
39 str r0, [r1, CPU_ACTIVE_THREAD]
40 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
41 ldr r1, [r0, TH_CTH_SELF]
42 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
43 and r2, r2, #3 // Extract cpu number
45 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
47 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
51 * void machine_idle(void)
55 .globl EXT(machine_idle)
57 cpsid if // Disable FIQ IRQ
61 cpsie if // Enable FIQ IRQ
65 * void cpu_idle_wfi(boolean_t wfi_fast):
66 * cpu_idle is the only function that should call this.
70 .globl EXT(cpu_idle_wfi)
85 * We export the address of the WFI instruction so that it can be patched; this will be
86 * ugly from a debugging perspective.
89 #if (__ARM_ARCH__ >= 7)
95 mcr p15, 0, r0, c7, c10, 4
98 mcr p15, 0, r0, c7, c0, 4
113 .globl EXT(timer_grab)
116 ldr r2, [r0, TIMER_HIGH]
117 ldr r3, [r0, TIMER_LOW]
121 ldr r1, [r0, TIMER_HIGHCHK]
128 .globl EXT(timer_advance_internal_32)
129 LEXT(timer_advance_internal_32)
130 str r1, [r0, TIMER_HIGHCHK]
134 str r2, [r0, TIMER_LOW]
138 str r1, [r0, TIMER_HIGH]
142 .globl EXT(get_vfp_enabled)
143 LEXT(get_vfp_enabled)
146 and r1, r0, #FPEXC_EN // Extact vfp enable previous state
147 mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled
149 mov r0, #0 // return false
153 /* This is no longer useful (but is exported, so this may require kext cleanup). */
155 .globl EXT(enable_kernel_vfp_context)
156 LEXT(enable_kernel_vfp_context)
159 /* uint32_t get_fpscr(void):
160 * Returns the current state of the FPSCR register.
163 .globl EXT(get_fpscr)
170 .globl EXT(set_fpscr)
171 /* void set_fpscr(uint32_t value):
172 * Set the FPSCR register.
183 * void OSSynchronizeIO(void)
187 .globl EXT(OSSynchronizeIO)
188 LEXT(OSSynchronizeIO)
193 .macro SYNC_TLB_FLUSH
199 * void sync_tlb_flush
201 * Synchronize one or more prior TLB flush operations
205 .globl EXT(sync_tlb_flush)
213 mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs
215 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
220 * void flush_mmu_tlb_async(void)
222 * Flush all TLBs, don't wait for completion
226 .globl EXT(flush_mmu_tlb_async)
227 LEXT(flush_mmu_tlb_async)
232 * void flush_mmu_tlb(void)
238 .globl EXT(flush_mmu_tlb)
244 .macro FLUSH_CORE_TLB
246 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
251 * void flush_core_tlb_async(void)
253 * Flush local core's TLB, don't wait for completion
257 .globl EXT(flush_core_tlb_async)
258 LEXT(flush_core_tlb_async)
263 * void flush_core_tlb(void)
265 * Flush local core's TLB
269 .globl EXT(flush_core_tlb)
275 .macro FLUSH_MMU_TLB_ENTRY
277 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry
279 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
283 * void flush_mmu_tlb_entry_async(uint32_t)
285 * Flush TLB entry, don't wait for completion
289 .globl EXT(flush_mmu_tlb_entry_async)
290 LEXT(flush_mmu_tlb_entry_async)
295 * void flush_mmu_tlb_entry(uint32_t)
301 .globl EXT(flush_mmu_tlb_entry)
302 LEXT(flush_mmu_tlb_entry)
307 .macro FLUSH_MMU_TLB_ENTRIES
310 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry
312 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
314 add r0, r0, ARM_PGBYTES // Increment to the next page
315 cmp r0, r1 // Loop if current address < end address
320 * void flush_mmu_tlb_entries_async(uint32_t, uint32_t)
322 * Flush TLB entries for address range, don't wait for completion
326 .globl EXT(flush_mmu_tlb_entries_async)
327 LEXT(flush_mmu_tlb_entries_async)
328 FLUSH_MMU_TLB_ENTRIES
332 * void flush_mmu_tlb_entries(uint32_t, uint32_t)
334 * Flush TLB entries for address range
338 .globl EXT(flush_mmu_tlb_entries)
339 LEXT(flush_mmu_tlb_entries)
340 FLUSH_MMU_TLB_ENTRIES
345 .macro FLUSH_MMU_TLB_MVA_ENTRIES
347 mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva
349 mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva
354 * void flush_mmu_tlb_mva_entries_async(uint32_t)
356 * Flush TLB entries for mva, don't wait for completion
360 .globl EXT(flush_mmu_tlb_mva_entries_async)
361 LEXT(flush_mmu_tlb_mva_entries_async)
362 FLUSH_MMU_TLB_MVA_ENTRIES
366 * void flush_mmu_tlb_mva_entries_async(uint32_t)
368 * Flush TLB entries for mva
372 .globl EXT(flush_mmu_tlb_mva_entries)
373 LEXT(flush_mmu_tlb_mva_entries)
374 FLUSH_MMU_TLB_MVA_ENTRIES
378 .macro FLUSH_MMU_TLB_ASID
380 mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid
382 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
387 * void flush_mmu_tlb_asid_async(uint32_t)
389 * Flush TLB entries for asid, don't wait for completion
393 .globl EXT(flush_mmu_tlb_asid_async)
394 LEXT(flush_mmu_tlb_asid_async)
399 * void flush_mmu_tlb_asid(uint32_t)
401 * Flush TLB entries for asid
405 .globl EXT(flush_mmu_tlb_asid)
406 LEXT(flush_mmu_tlb_asid)
411 .macro FLUSH_CORE_TLB_ASID
412 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
416 * void flush_core_tlb_asid_async(uint32_t)
418 * Flush local core TLB entries for asid, don't wait for completion
422 .globl EXT(flush_core_tlb_asid_async)
423 LEXT(flush_core_tlb_asid_async)
428 * void flush_core_tlb_asid(uint32_t)
430 * Flush local core TLB entries for asid
434 .globl EXT(flush_core_tlb_asid)
435 LEXT(flush_core_tlb_asid)
441 * Set MMU Translation Table Base
445 .globl EXT(set_mmu_ttb)
447 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
448 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
449 mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0
455 * Set MMU Translation Table Base Alternate
459 .globl EXT(set_mmu_ttb_alternate)
460 LEXT(set_mmu_ttb_alternate)
461 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
462 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
463 mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1
469 * Set MMU Translation Table Base
473 .globl EXT(get_mmu_ttb)
475 mrc p15, 0, r0, c2, c0, 0 // translation table to r0
480 * get MMU control register
484 .globl EXT(get_aux_control)
485 LEXT(get_aux_control)
486 mrc p15, 0, r0, c1, c0, 1 // read aux control into r0
487 bx lr // return old bits in r0
490 * set MMU control register
494 .globl EXT(set_aux_control)
495 LEXT(set_aux_control)
496 mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control
502 * get MMU control register
506 .globl EXT(get_mmu_control)
507 LEXT(get_mmu_control)
508 mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0
509 bx lr // return old bits in r0
512 * set MMU control register
516 .globl EXT(set_mmu_control)
517 LEXT(set_mmu_control)
518 mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control
523 * MMU kernel virtual to physical address translation
527 .globl EXT(mmu_kvtop)
529 mrs r3, cpsr // Read cpsr
530 cpsid if // Disable FIQ IRQ
532 mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR
534 mrc p15, 0, r0, c7, c4, 0 // Read PAR
535 ands r2, r0, #0x1 // Test conversion aborted
536 bne mmu_kvtophys_fail
537 ands r2, r0, #0x2 // Test super section
538 mvnne r2, #0xFF000000
539 moveq r2, #0x000000FF
540 orreq r2, r2, #0x00000F00
541 bics r0, r0, r2 // Clear lower bits
542 beq mmu_kvtophys_fail
549 msr cpsr, r3 // Restore cpsr
553 * MMU user virtual to physical address translation
557 .globl EXT(mmu_uvtop)
559 mrs r3, cpsr // Read cpsr
560 cpsid if // Disable FIQ IRQ
562 mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR
564 mrc p15, 0, r0, c7, c4, 0 // Read PAR
565 ands r2, r0, #0x1 // Test conversion aborted
566 bne mmu_uvtophys_fail
567 ands r2, r0, #0x2 // Test super section
568 mvnne r2, #0xFF000000
569 moveq r2, #0x000000FF
570 orreq r2, r2, #0x00000F00
571 bics r0, r0, r2 // Clear lower bits
572 beq mmu_uvtophys_fail
579 msr cpsr, r3 // Restore cpsr
583 * MMU kernel virtual to physical address preflight write access
587 .globl EXT(mmu_kvtop_wpreflight)
588 LEXT(mmu_kvtop_wpreflight)
589 mrs r3, cpsr // Read cpsr
590 cpsid if // Disable FIQ IRQ
592 mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW
594 mrc p15, 0, r0, c7, c4, 0 // Read PAR
595 ands r2, r0, #0x1 // Test conversion aborted
596 bne mmu_kvtophys_wpreflight_fail
597 ands r2, r0, #0x2 // Test super section
598 mvnne r2, #0xFF000000
599 moveq r2, #0x000000FF
600 orreq r2, r2, #0x00000F00
601 bics r0, r0, r2 // Clear lower bits
602 beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits
605 b mmu_kvtophys_wpreflight_ret
606 mmu_kvtophys_wpreflight_fail:
608 mmu_kvtophys_wpreflight_ret:
609 msr cpsr, r3 // Restore cpsr
613 * set context id register
616 * set context id register
620 .globl EXT(set_context_id)
622 mcr p15, 0, r0, c13, c0, 1
627 * arg0: prefix of the external validator function (copyin or copyout)
628 * arg1: 0-based index of highest argument register that must be preserved
630 .macro COPYIO_VALIDATE
631 /* call NAME_validate to check the arguments */
632 push {r0-r$1, r7, lr}
633 add r7, sp, #(($1 + 1) * 4)
636 addne sp, #(($1 + 1) * 4)
642 #define COPYIO_SET_RECOVER() \
643 /* set recovery address */ ;\
644 stmfd sp!, { r4, r5, r6 } ;\
645 adr r3, copyio_error ;\
646 mrc p15, 0, r12, c13, c0, 4 ;\
647 ldr r4, [r12, TH_RECOVER] ;\
648 str r3, [r12, TH_RECOVER]
650 #define COPYIO_TRY_KERNEL() \
651 /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\
652 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\
653 ldr r3, [r12, ACT_MAP] ;\
654 ldr r3, [r3, MAP_PMAP] ;\
655 LOAD_ADDR(ip, kernel_pmap_store) ;\
659 #if __ARM_USER_PROTECT__
660 #define COPYIO_MAP_USER() \
661 /* disable interrupts to prevent expansion to 2GB at L1 ;\
662 * between loading ttep and storing it in ttbr0.*/ ;\
665 ldr r3, [r12, ACT_UPTW_TTB] ;\
666 mcr p15, 0, r3, c2, c0, 0 ;\
668 ldr r3, [r12, ACT_ASID] ;\
669 mcr p15, 0, r3, c13, c0, 1 ;\
672 #define COPYIO_MAP_USER()
675 #define COPYIO_HEADER() ;\
676 /* test for zero len */ ;\
682 /* if len is less than 16 bytes, just do a simple copy */
685 /* test for src and dest of the same word alignment */
692 /* 16 bytes at a time */
693 ldmia r0!, { r3, r5, r6, r12 }
694 stmia r1!, { r3, r5, r6, r12 }
696 bge L$0_wordwise_loop
697 /* fixup the len and test for completion */
701 /* copy 2 bytes at a time */
712 #if __ARM_USER_PROTECT__
713 #define COPYIO_UNMAP_USER() \
714 mrc p15, 0, r12, c13, c0, 4 ;\
715 ldr r3, [r12, ACT_KPTW_TTB] ;\
716 mcr p15, 0, r3, c2, c0, 0 ;\
718 mcr p15, 0, r3, c13, c0, 1 ;\
721 #define COPYIO_UNMAP_USER() \
722 mrc p15, 0, r12, c13, c0, 4
725 #define COPYIO_RESTORE_RECOVER() \
726 /* restore the recovery address */ ;\
727 str r4, [r12, TH_RECOVER] ;\
728 ldmfd sp!, { r4, r5, r6 }
732 * const user_addr_t user_addr,
739 .globl EXT(copyinstr)
742 moveq r0, #ENAMETOOLONG
746 COPYIO_VALIDATE copyin_user, 3
747 stmfd sp!, { r4, r5, r6 }
750 adr r3, copyinstr_error // Get address for recover
751 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
752 ldr r4, [r12, TH_RECOVER] ;\
753 str r3, [r12, TH_RECOVER]
755 mov r12, #0 // Number of bytes copied so far
757 ldrb r3, [r0], #1 // Load a byte from the source (user)
758 strb r3, [r1], #1 // Store a byte to the destination (kernel)
762 cmp r12, r2 // Room to copy more bytes?
765 // Ran out of space in the destination buffer, so return ENAMETOOLONG.
768 mov r3, #ENAMETOOLONG
771 // When we get here, we have finished copying the string. We came here from
772 // either the "beq copyinstr_done" above, in which case r3 == 0 (which is also
773 // the function result for success), or falling through from copyinstr_too_long,
774 // in which case r3 == ENAMETOOLONG.
776 str r12, [r6] // Save the count for actual
777 mov r0, r3 // Return error code from r3
780 str r4, [r12, TH_RECOVER]
781 ldmfd sp!, { r4, r5, r6 }
785 /* set error, exit routine */
790 * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
797 COPYIO_VALIDATE copyin, 2
803 COPYIO_RESTORE_RECOVER()
807 * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
814 COPYIO_VALIDATE copyout, 2
820 COPYIO_RESTORE_RECOVER()
825 * int copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
831 .globl EXT(copyin_atomic32)
832 LEXT(copyin_atomic32)
833 tst r0, #3 // Test alignment of user address
837 COPYIO_VALIDATE copyin_user, 1
841 ldr r2, [r0] // Load word from user
842 str r2, [r1] // Store to kernel_addr
843 mov r0, #0 // Success
846 COPYIO_RESTORE_RECOVER()
848 2: // misaligned copyin
853 * int copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
859 .globl EXT(copyin_atomic32_wait_if_equals)
860 LEXT(copyin_atomic32_wait_if_equals)
861 tst r0, #3 // Test alignment of user address
866 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
880 COPYIO_RESTORE_RECOVER()
882 2: // misaligned copyin
887 * int copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
893 .globl EXT(copyin_atomic64)
894 LEXT(copyin_atomic64)
895 tst r0, #7 // Test alignment of user address
899 COPYIO_VALIDATE copyin_user, 1
903 1: // ldrex/strex retry loop
904 ldrexd r2, r3, [r0] // Load double word from user
905 strexd r5, r2, r3, [r0] // (the COPYIO_*() macros make r5 safe to use as a scratch register here)
908 stm r1, {r2, r3} // Store to kernel_addr
909 mov r0, #0 // Success
912 COPYIO_RESTORE_RECOVER()
914 2: // misaligned copyin
922 str r4, [r12, TH_RECOVER]
923 ldmfd sp!, { r4, r5, r6 }
928 * int copyout_atomic32(uint32_t value, user_addr_t user_addr)
934 .globl EXT(copyout_atomic32)
935 LEXT(copyout_atomic32)
936 tst r1, #3 // Test alignment of user address
941 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
945 str r0, [r1] // Store word to user
946 mov r0, #0 // Success
949 COPYIO_RESTORE_RECOVER()
951 2: // misaligned copyout
957 * int copyout_atomic64(uint64_t value, user_addr_t user_addr)
963 .globl EXT(copyout_atomic64)
964 LEXT(copyout_atomic64)
965 tst r2, #7 // Test alignment of user address
969 COPYIO_VALIDATE copyio_user, 2 // validate user address (uses r2, r3)
973 1: // ldrex/strex retry loop
975 strexd r3, r0, r1, [r2] // Atomically store double word to user
979 mov r0, #0 // Success
982 COPYIO_RESTORE_RECOVER()
984 2: // misaligned copyout
990 * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
994 .globl EXT(copyin_kern)
1000 * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
1004 .globl EXT(copyout_kern)
1010 stmfd sp!, { r5, r6 }
1011 COPYIO_BODY copyio_kernel
1012 ldmfd sp!, { r5, r6 }
1016 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr)
1018 * Safely copy eight bytes (the fixed top of an ARM frame) from
1019 * either user or kernel memory.
1023 .globl EXT(copyinframe)
1025 COPYIO_SET_RECOVER()
1032 * uint32_t arm_debug_read_dscr(void)
1036 .globl EXT(arm_debug_read_dscr)
1037 LEXT(arm_debug_read_dscr)
1038 #if __ARM_DEBUG__ >= 6
1039 mrc p14, 0, r0, c0, c1
1046 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
1048 * Set debug registers to match the current thread state
1049 * (NULL to disable). Assume 6 breakpoints and 2
1050 * watchpoints, since that has been the case in all cores
1055 .globl EXT(arm_debug_set_cp14)
1056 LEXT(arm_debug_set_cp14)
1057 #if __ARM_DEBUG__ >= 6
1058 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1059 ldr r2, [r1, ACT_CPUDATAP] // Get current cpu
1060 str r0, [r2, CPU_USER_DEBUG] // Set current user debug
1062 // Lock the debug registers
1065 mcr p14, 0, ip, c1, c0, 4
1067 // enable monitor mode (needed to set and use debug registers)
1068 mrc p14, 0, ip, c0, c1, 0
1069 orr ip, ip, #0x8000 // set MDBGen = 1
1070 #if __ARM_DEBUG__ >= 7
1071 mcr p14, 0, ip, c0, c2, 2
1073 mcr p14, 0, ip, c0, c1, 0
1075 // first turn off all breakpoints/watchpoints
1077 mcr p14, 0, r1, c0, c0, 5 // BCR0
1078 mcr p14, 0, r1, c0, c1, 5 // BCR1
1079 mcr p14, 0, r1, c0, c2, 5 // BCR2
1080 mcr p14, 0, r1, c0, c3, 5 // BCR3
1081 mcr p14, 0, r1, c0, c4, 5 // BCR4
1082 mcr p14, 0, r1, c0, c5, 5 // BCR5
1083 mcr p14, 0, r1, c0, c0, 7 // WCR0
1084 mcr p14, 0, r1, c0, c1, 7 // WCR1
1085 // if (debug_state == NULL) disable monitor mode and return;
1087 biceq ip, ip, #0x8000 // set MDBGen = 0
1088 #if __ARM_DEBUG__ >= 7
1089 mcreq p14, 0, ip, c0, c2, 2
1091 mcreq p14, 0, ip, c0, c1, 0
1094 ldmia r0!, {r1, r2, r3, ip}
1095 mcr p14, 0, r1, c0, c0, 4 // BVR0
1096 mcr p14, 0, r2, c0, c1, 4 // BVR1
1097 mcr p14, 0, r3, c0, c2, 4 // BVR2
1098 mcr p14, 0, ip, c0, c3, 4 // BVR3
1100 mcr p14, 0, r1, c0, c4, 4 // BVR4
1101 mcr p14, 0, r2, c0, c5, 4 // BVR5
1102 add r0, r0, #40 // advance to bcr[0]
1103 ldmia r0!, {r1, r2, r3, ip}
1104 mcr p14, 0, r1, c0, c0, 5 // BCR0
1105 mcr p14, 0, r2, c0, c1, 5 // BCR1
1106 mcr p14, 0, r3, c0, c2, 5 // BCR2
1107 mcr p14, 0, ip, c0, c3, 5 // BCR3
1109 mcr p14, 0, r1, c0, c4, 5 // BCR4
1110 mcr p14, 0, r2, c0, c5, 5 // BCR5
1111 add r0, r0, #40 // advance to wvr[0]
1113 mcr p14, 0, r1, c0, c0, 6 // WVR0
1114 mcr p14, 0, r2, c0, c1, 6 // WVR1
1115 add r0, r0, #56 // advance to wcr[0]
1117 mcr p14, 0, r1, c0, c0, 7 // WCR0
1118 mcr p14, 0, r2, c0, c1, 7 // WCR1
1120 // Unlock debug registers
1122 mcr p14, 0, ip, c1, c0, 4
1127 * void fiq_context_init(boolean_t enable_fiq)
1131 .globl EXT(fiq_context_init)
1132 LEXT(fiq_context_init)
1133 mrs r3, cpsr // Save current CPSR
1134 cmp r0, #0 // Test enable_fiq
1135 bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE
1136 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1137 ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data
1140 /* Despite the fact that we use the physical timebase
1141 * register as the basis for time on our platforms, we
1142 * end up using the virtual timer in order to manage
1143 * deadlines. This is due to the fact that for our
1144 * current platforms, the interrupt generated by the
1145 * physical timer is not hooked up to anything, and is
1146 * therefore dropped on the floor. Therefore, for
1147 * timers to function they MUST be based on the virtual
1151 mov r0, #1 // Enable Timer
1152 mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL
1154 /* Enable USER access to the physical timebase (PL0PCTEN).
1155 * The rationale for providing access to the physical
1156 * timebase being that the virtual timebase is broken for
1157 * some platforms. Maintaining the offset ourselves isn't
1158 * expensive, so mandate that the userspace implementation
1159 * do timebase_phys+offset rather than trying to propogate
1160 * all of the informaiton about what works up to USER.
1162 mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0])
1164 #else /* ! __ARM_TIME__ */
1165 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled
1166 mov r8, r2 // Load the BootCPUData address
1167 ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address
1168 ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address
1169 ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value
1170 #endif /* __ARM_TIME__ */
1172 msr cpsr_c, r3 // Restore saved CPSR
1176 * void reenable_async_aborts(void)
1180 .globl EXT(reenable_async_aborts)
1181 LEXT(reenable_async_aborts)
1182 cpsie a // Re-enable async aborts
1186 * uint64_t ml_get_timebase(void)
1190 .globl EXT(ml_get_timebase)
1191 LEXT(ml_get_timebase)
1192 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1193 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1194 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__
1195 isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
1197 mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1
1198 mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0
1199 mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2
1201 bne 1b // Loop until both high values are the same
1203 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1204 ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to
1205 adds r0, r0, r2 // convert to
1206 ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time
1208 #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */
1210 ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1211 ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value
1212 ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1213 cmp r1, r2 // Make sure TB has not rolled over
1215 #endif /* __ARM_TIME__ */
1220 * uint32_t ml_get_decrementer(void)
1224 .globl EXT(ml_get_decrementer)
1225 LEXT(ml_get_decrementer)
1226 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1227 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1228 ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func
1230 bxne r2 // Call it if there is one
1232 mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL)
1234 ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value
1240 * void ml_set_decrementer(uint32_t dec_value)
1244 .globl EXT(ml_set_decrementer)
1245 LEXT(ml_set_decrementer)
1246 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1247 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1248 ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func
1250 bxne r2 // Call it if there is one
1252 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1253 mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL)
1255 mrs r2, cpsr // Save current CPSR
1256 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled.
1257 mov r12, r0 // Set the DEC value
1258 str r12, [r8, CPU_DECREMENTER] // Store DEC
1259 msr cpsr_c, r2 // Restore saved CPSR
1265 * boolean_t ml_get_interrupts_enabled(void)
1269 .globl EXT(ml_get_interrupts_enabled)
1270 LEXT(ml_get_interrupts_enabled)
1273 bic r0, r0, r2, lsr #PSR_IRQFb
1277 * Platform Specific Timebase & Decrementer Functions
1281 #if defined(ARM_BOARD_CLASS_S7002)
1284 .globl EXT(fleh_fiq_s7002)
1285 LEXT(fleh_fiq_s7002)
1286 str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt
1288 str r13, [r8, CPU_DECREMENTER]
1293 .globl EXT(s7002_get_decrementer)
1294 LEXT(s7002_get_decrementer)
1295 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1296 add ip, ip, #PMGR_INTERVAL_TMR_OFFSET
1297 ldr r0, [ip] // Get the Decrementer
1302 .globl EXT(s7002_set_decrementer)
1303 LEXT(s7002_set_decrementer)
1304 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1305 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1306 str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer
1308 #endif /* defined(ARM_BOARD_CLASS_S7002) */
1310 #if defined(ARM_BOARD_CLASS_T8002)
1313 .globl EXT(fleh_fiq_t8002)
1314 LEXT(fleh_fiq_t8002)
1315 mov r13, #kAICTmrIntStat
1316 str r11, [r10, r13] // Clear the decrementer interrupt
1318 str r13, [r8, CPU_DECREMENTER]
1323 .globl EXT(t8002_get_decrementer)
1324 LEXT(t8002_get_decrementer)
1325 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1328 ldr r0, [ip] // Get the Decrementer
1333 .globl EXT(t8002_set_decrementer)
1334 LEXT(t8002_set_decrementer)
1335 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1336 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1338 str r0, [ip, r5] // Store the new Decrementer
1340 #endif /* defined(ARM_BOARD_CLASS_T8002) */
1342 LOAD_ADDR_GEN_DEF(kernel_pmap_store)
1344 #include "globals_asm.h"
1346 /* vim: set ts=4: */