2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
32 #include <sys/errno.h>
36 .globl EXT(machine_set_current_thread)
37 LEXT(machine_set_current_thread)
38 ldr r1, [r0, ACT_CPUDATAP]
39 str r0, [r1, CPU_ACTIVE_THREAD]
40 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
41 ldr r1, [r0, TH_CTH_SELF]
42 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
43 and r2, r2, #3 // Extract cpu number
45 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
47 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
51 * void machine_idle(void)
55 .globl EXT(machine_idle)
57 cpsid if // Disable FIQ IRQ
61 cpsie if // Enable FIQ IRQ
65 * void cpu_idle_wfi(boolean_t wfi_fast):
66 * cpu_idle is the only function that should call this.
70 .globl EXT(cpu_idle_wfi)
85 * We export the address of the WFI instruction so that it can be patched; this will be
86 * ugly from a debugging perspective.
89 #if (__ARM_ARCH__ >= 7)
95 mcr p15, 0, r0, c7, c10, 4
98 mcr p15, 0, r0, c7, c0, 4
113 .globl EXT(timer_grab)
116 ldr r2, [r0, TIMER_HIGH]
117 ldr r3, [r0, TIMER_LOW]
119 ldr r1, [r0, TIMER_HIGHCHK]
126 .globl EXT(timer_advance_internal_32)
127 LEXT(timer_advance_internal_32)
128 str r1, [r0, TIMER_HIGHCHK]
130 str r2, [r0, TIMER_LOW]
132 str r1, [r0, TIMER_HIGH]
136 .globl EXT(get_vfp_enabled)
137 LEXT(get_vfp_enabled)
140 and r1, r0, #FPEXC_EN // Extact vfp enable previous state
141 mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled
143 mov r0, #0 // return false
147 /* This is no longer useful (but is exported, so this may require kext cleanup). */
149 .globl EXT(enable_kernel_vfp_context)
150 LEXT(enable_kernel_vfp_context)
153 /* uint32_t get_fpscr(void):
154 * Returns the current state of the FPSCR register.
157 .globl EXT(get_fpscr)
164 .globl EXT(set_fpscr)
165 /* void set_fpscr(uint32_t value):
166 * Set the FPSCR register.
177 * void OSSynchronizeIO(void)
181 .globl EXT(OSSynchronizeIO)
182 LEXT(OSSynchronizeIO)
187 .macro SYNC_TLB_FLUSH
193 * void sync_tlb_flush
195 * Synchronize one or more prior TLB flush operations
199 .globl EXT(sync_tlb_flush)
206 mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs
210 * void flush_mmu_tlb_async(void)
212 * Flush all TLBs, don't wait for completion
216 .globl EXT(flush_mmu_tlb_async)
217 LEXT(flush_mmu_tlb_async)
222 * void flush_mmu_tlb(void)
228 .globl EXT(flush_mmu_tlb)
234 .macro FLUSH_CORE_TLB
236 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
241 * void flush_core_tlb_async(void)
243 * Flush local core's TLB, don't wait for completion
247 .globl EXT(flush_core_tlb_async)
248 LEXT(flush_core_tlb_async)
253 * void flush_core_tlb(void)
255 * Flush local core's TLB
259 .globl EXT(flush_core_tlb)
265 .macro FLUSH_MMU_TLB_ENTRY
266 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry
269 * void flush_mmu_tlb_entry_async(uint32_t)
271 * Flush TLB entry, don't wait for completion
275 .globl EXT(flush_mmu_tlb_entry_async)
276 LEXT(flush_mmu_tlb_entry_async)
281 * void flush_mmu_tlb_entry(uint32_t)
287 .globl EXT(flush_mmu_tlb_entry)
288 LEXT(flush_mmu_tlb_entry)
293 .macro FLUSH_MMU_TLB_ENTRIES
295 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry
296 add r0, r0, ARM_PGBYTES // Increment to the next page
297 cmp r0, r1 // Loop if current address < end address
302 * void flush_mmu_tlb_entries_async(uint32_t, uint32_t)
304 * Flush TLB entries for address range, don't wait for completion
308 .globl EXT(flush_mmu_tlb_entries_async)
309 LEXT(flush_mmu_tlb_entries_async)
310 FLUSH_MMU_TLB_ENTRIES
314 * void flush_mmu_tlb_entries(uint32_t, uint32_t)
316 * Flush TLB entries for address range
320 .globl EXT(flush_mmu_tlb_entries)
321 LEXT(flush_mmu_tlb_entries)
322 FLUSH_MMU_TLB_ENTRIES
327 .macro FLUSH_MMU_TLB_MVA_ENTRIES
328 mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva
332 * void flush_mmu_tlb_mva_entries_async(uint32_t)
334 * Flush TLB entries for mva, don't wait for completion
338 .globl EXT(flush_mmu_tlb_mva_entries_async)
339 LEXT(flush_mmu_tlb_mva_entries_async)
340 FLUSH_MMU_TLB_MVA_ENTRIES
344 * void flush_mmu_tlb_mva_entries_async(uint32_t)
346 * Flush TLB entries for mva
350 .globl EXT(flush_mmu_tlb_mva_entries)
351 LEXT(flush_mmu_tlb_mva_entries)
352 FLUSH_MMU_TLB_MVA_ENTRIES
356 .macro FLUSH_MMU_TLB_ASID
357 mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid
361 * void flush_mmu_tlb_asid_async(uint32_t)
363 * Flush TLB entries for asid, don't wait for completion
367 .globl EXT(flush_mmu_tlb_asid_async)
368 LEXT(flush_mmu_tlb_asid_async)
373 * void flush_mmu_tlb_asid(uint32_t)
375 * Flush TLB entries for asid
379 .globl EXT(flush_mmu_tlb_asid)
380 LEXT(flush_mmu_tlb_asid)
385 .macro FLUSH_CORE_TLB_ASID
386 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
390 * void flush_core_tlb_asid_async(uint32_t)
392 * Flush local core TLB entries for asid, don't wait for completion
396 .globl EXT(flush_core_tlb_asid_async)
397 LEXT(flush_core_tlb_asid_async)
402 * void flush_core_tlb_asid(uint32_t)
404 * Flush local core TLB entries for asid
408 .globl EXT(flush_core_tlb_asid)
409 LEXT(flush_core_tlb_asid)
415 * Set MMU Translation Table Base
419 .globl EXT(set_mmu_ttb)
421 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
422 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
423 mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0
429 * Set MMU Translation Table Base Alternate
433 .globl EXT(set_mmu_ttb_alternate)
434 LEXT(set_mmu_ttb_alternate)
435 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
436 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
437 mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1
443 * Set MMU Translation Table Base
447 .globl EXT(get_mmu_ttb)
449 mrc p15, 0, r0, c2, c0, 0 // translation table to r0
453 * get MMU control register
457 .globl EXT(get_aux_control)
458 LEXT(get_aux_control)
459 mrc p15, 0, r0, c1, c0, 1 // read aux control into r0
460 bx lr // return old bits in r0
463 * set MMU control register
467 .globl EXT(set_aux_control)
468 LEXT(set_aux_control)
469 mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control
475 * get MMU control register
479 .globl EXT(get_mmu_control)
480 LEXT(get_mmu_control)
481 mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0
482 bx lr // return old bits in r0
485 * set MMU control register
489 .globl EXT(set_mmu_control)
490 LEXT(set_mmu_control)
491 mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control
496 * MMU kernel virtual to physical address translation
500 .globl EXT(mmu_kvtop)
502 mrs r3, cpsr // Read cpsr
503 cpsid if // Disable FIQ IRQ
505 mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR
507 mrc p15, 0, r0, c7, c4, 0 // Read PAR
508 ands r2, r0, #0x1 // Test conversion aborted
509 bne mmu_kvtophys_fail
510 ands r2, r0, #0x2 // Test super section
511 mvnne r2, #0xFF000000
512 moveq r2, #0x000000FF
513 orreq r2, r2, #0x00000F00
514 bics r0, r0, r2 // Clear lower bits
515 beq mmu_kvtophys_fail
522 msr cpsr, r3 // Restore cpsr
526 * MMU user virtual to physical address translation
530 .globl EXT(mmu_uvtop)
532 mrs r3, cpsr // Read cpsr
533 cpsid if // Disable FIQ IRQ
535 mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR
537 mrc p15, 0, r0, c7, c4, 0 // Read PAR
538 ands r2, r0, #0x1 // Test conversion aborted
539 bne mmu_uvtophys_fail
540 ands r2, r0, #0x2 // Test super section
541 mvnne r2, #0xFF000000
542 moveq r2, #0x000000FF
543 orreq r2, r2, #0x00000F00
544 bics r0, r0, r2 // Clear lower bits
545 beq mmu_uvtophys_fail
552 msr cpsr, r3 // Restore cpsr
556 * MMU kernel virtual to physical address preflight write access
560 .globl EXT(mmu_kvtop_wpreflight)
561 LEXT(mmu_kvtop_wpreflight)
562 mrs r3, cpsr // Read cpsr
563 cpsid if // Disable FIQ IRQ
565 mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW
567 mrc p15, 0, r0, c7, c4, 0 // Read PAR
568 ands r2, r0, #0x1 // Test conversion aborted
569 bne mmu_kvtophys_wpreflight_fail
570 ands r2, r0, #0x2 // Test super section
571 mvnne r2, #0xFF000000
572 moveq r2, #0x000000FF
573 orreq r2, r2, #0x00000F00
574 bics r0, r0, r2 // Clear lower bits
575 beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits
578 b mmu_kvtophys_wpreflight_ret
579 mmu_kvtophys_wpreflight_fail:
581 mmu_kvtophys_wpreflight_ret:
582 msr cpsr, r3 // Restore cpsr
586 * set context id register
589 * set context id register
593 .globl EXT(set_context_id)
595 mcr p15, 0, r0, c13, c0, 1
600 * arg0: prefix of the external validator function (copyin or copyout)
601 * arg1: 0-based index of highest argument register that must be preserved
603 .macro COPYIO_VALIDATE
604 /* call NAME_validate to check the arguments */
605 push {r0-r$1, r7, lr}
606 add r7, sp, #(($1 + 1) * 4)
609 addne sp, #(($1 + 1) * 4)
615 #define COPYIO_SET_RECOVER() \
616 /* set recovery address */ ;\
617 stmfd sp!, { r4, r5, r6 } ;\
618 adr r3, copyio_error ;\
619 mrc p15, 0, r12, c13, c0, 4 ;\
620 ldr r4, [r12, TH_RECOVER] ;\
621 str r3, [r12, TH_RECOVER]
623 #define COPYIO_TRY_KERNEL() \
624 /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\
625 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\
626 ldr r3, [r12, ACT_MAP] ;\
627 ldr r3, [r3, MAP_PMAP] ;\
628 LOAD_ADDR(ip, kernel_pmap_store) ;\
632 #if __ARM_USER_PROTECT__
633 #define COPYIO_MAP_USER() \
634 /* disable interrupts to prevent expansion to 2GB at L1 ;\
635 * between loading ttep and storing it in ttbr0.*/ ;\
638 ldr r3, [r12, ACT_UPTW_TTB] ;\
639 mcr p15, 0, r3, c2, c0, 0 ;\
641 ldr r3, [r12, ACT_ASID] ;\
642 mcr p15, 0, r3, c13, c0, 1 ;\
645 #define COPYIO_MAP_USER()
648 #define COPYIO_HEADER() ;\
649 /* test for zero len */ ;\
655 /* if len is less than 16 bytes, just do a simple copy */
658 /* test for src and dest of the same word alignment */
665 /* 16 bytes at a time */
666 ldmia r0!, { r3, r5, r6, r12 }
667 stmia r1!, { r3, r5, r6, r12 }
669 bge L$0_wordwise_loop
670 /* fixup the len and test for completion */
674 /* copy 2 bytes at a time */
685 #if __ARM_USER_PROTECT__
686 #define COPYIO_UNMAP_USER() \
687 mrc p15, 0, r12, c13, c0, 4 ;\
688 ldr r3, [r12, ACT_KPTW_TTB] ;\
689 mcr p15, 0, r3, c2, c0, 0 ;\
691 mcr p15, 0, r3, c13, c0, 1 ;\
694 #define COPYIO_UNMAP_USER() \
695 mrc p15, 0, r12, c13, c0, 4
698 #define COPYIO_RESTORE_RECOVER() \
699 /* restore the recovery address */ ;\
700 str r4, [r12, TH_RECOVER] ;\
701 ldmfd sp!, { r4, r5, r6 }
705 * const user_addr_t user_addr,
712 .globl EXT(copyinstr)
715 moveq r0, #ENAMETOOLONG
719 COPYIO_VALIDATE copyin_user, 3
720 stmfd sp!, { r4, r5, r6 }
723 adr r3, copyinstr_error // Get address for recover
724 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
725 ldr r4, [r12, TH_RECOVER] ;\
726 str r3, [r12, TH_RECOVER]
728 mov r12, #0 // Number of bytes copied so far
730 ldrb r3, [r0], #1 // Load a byte from the source (user)
731 strb r3, [r1], #1 // Store a byte to the destination (kernel)
735 cmp r12, r2 // Room to copy more bytes?
738 // Ran out of space in the destination buffer, so return ENAMETOOLONG.
741 mov r3, #ENAMETOOLONG
744 // When we get here, we have finished copying the string. We came here from
745 // either the "beq copyinstr_done" above, in which case r3 == 0 (which is also
746 // the function result for success), or falling through from copyinstr_too_long,
747 // in which case r3 == ENAMETOOLONG.
749 str r12, [r6] // Save the count for actual
750 mov r0, r3 // Return error code from r3
753 str r4, [r12, TH_RECOVER]
754 ldmfd sp!, { r4, r5, r6 }
758 /* set error, exit routine */
763 * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
770 COPYIO_VALIDATE copyin, 2
776 COPYIO_RESTORE_RECOVER()
780 * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
787 COPYIO_VALIDATE copyout, 2
793 COPYIO_RESTORE_RECOVER()
798 * int copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
804 .globl EXT(copyin_atomic32)
805 LEXT(copyin_atomic32)
806 tst r0, #3 // Test alignment of user address
810 COPYIO_VALIDATE copyin_user, 1
814 ldr r2, [r0] // Load word from user
815 str r2, [r1] // Store to kernel_addr
816 mov r0, #0 // Success
819 COPYIO_RESTORE_RECOVER()
821 2: // misaligned copyin
826 * int copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
832 .globl EXT(copyin_atomic32_wait_if_equals)
833 LEXT(copyin_atomic32_wait_if_equals)
834 tst r0, #3 // Test alignment of user address
839 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
853 COPYIO_RESTORE_RECOVER()
855 2: // misaligned copyin
860 * int copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
866 .globl EXT(copyin_atomic64)
867 LEXT(copyin_atomic64)
868 tst r0, #7 // Test alignment of user address
872 COPYIO_VALIDATE copyin_user, 1
876 1: // ldrex/strex retry loop
877 ldrexd r2, r3, [r0] // Load double word from user
878 strexd r5, r2, r3, [r0] // (the COPYIO_*() macros make r5 safe to use as a scratch register here)
881 stm r1, {r2, r3} // Store to kernel_addr
882 mov r0, #0 // Success
885 COPYIO_RESTORE_RECOVER()
887 2: // misaligned copyin
895 str r4, [r12, TH_RECOVER]
896 ldmfd sp!, { r4, r5, r6 }
901 * int copyout_atomic32(uint32_t value, user_addr_t user_addr)
907 .globl EXT(copyout_atomic32)
908 LEXT(copyout_atomic32)
909 tst r1, #3 // Test alignment of user address
914 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
918 str r0, [r1] // Store word to user
919 mov r0, #0 // Success
922 COPYIO_RESTORE_RECOVER()
924 2: // misaligned copyout
930 * int copyout_atomic64(uint64_t value, user_addr_t user_addr)
936 .globl EXT(copyout_atomic64)
937 LEXT(copyout_atomic64)
938 tst r2, #7 // Test alignment of user address
942 COPYIO_VALIDATE copyio_user, 2 // validate user address (uses r2, r3)
946 1: // ldrex/strex retry loop
948 strexd r3, r0, r1, [r2] // Atomically store double word to user
952 mov r0, #0 // Success
955 COPYIO_RESTORE_RECOVER()
957 2: // misaligned copyout
963 * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
967 .globl EXT(copyin_kern)
973 * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
977 .globl EXT(copyout_kern)
983 stmfd sp!, { r5, r6 }
984 COPYIO_BODY copyio_kernel
985 ldmfd sp!, { r5, r6 }
989 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr)
991 * Safely copy eight bytes (the fixed top of an ARM frame) from
992 * either user or kernel memory.
996 .globl EXT(copyinframe)
1005 * uint32_t arm_debug_read_dscr(void)
1009 .globl EXT(arm_debug_read_dscr)
1010 LEXT(arm_debug_read_dscr)
1011 #if __ARM_DEBUG__ >= 6
1012 mrc p14, 0, r0, c0, c1
1019 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
1021 * Set debug registers to match the current thread state
1022 * (NULL to disable). Assume 6 breakpoints and 2
1023 * watchpoints, since that has been the case in all cores
1028 .globl EXT(arm_debug_set_cp14)
1029 LEXT(arm_debug_set_cp14)
1030 #if __ARM_DEBUG__ >= 6
1031 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1032 ldr r2, [r1, ACT_CPUDATAP] // Get current cpu
1033 str r0, [r2, CPU_USER_DEBUG] // Set current user debug
1035 // Lock the debug registers
1038 mcr p14, 0, ip, c1, c0, 4
1040 // enable monitor mode (needed to set and use debug registers)
1041 mrc p14, 0, ip, c0, c1, 0
1042 orr ip, ip, #0x8000 // set MDBGen = 1
1043 #if __ARM_DEBUG__ >= 7
1044 mcr p14, 0, ip, c0, c2, 2
1046 mcr p14, 0, ip, c0, c1, 0
1048 // first turn off all breakpoints/watchpoints
1050 mcr p14, 0, r1, c0, c0, 5 // BCR0
1051 mcr p14, 0, r1, c0, c1, 5 // BCR1
1052 mcr p14, 0, r1, c0, c2, 5 // BCR2
1053 mcr p14, 0, r1, c0, c3, 5 // BCR3
1054 mcr p14, 0, r1, c0, c4, 5 // BCR4
1055 mcr p14, 0, r1, c0, c5, 5 // BCR5
1056 mcr p14, 0, r1, c0, c0, 7 // WCR0
1057 mcr p14, 0, r1, c0, c1, 7 // WCR1
1058 // if (debug_state == NULL) disable monitor mode and return;
1060 biceq ip, ip, #0x8000 // set MDBGen = 0
1061 #if __ARM_DEBUG__ >= 7
1062 mcreq p14, 0, ip, c0, c2, 2
1064 mcreq p14, 0, ip, c0, c1, 0
1067 ldmia r0!, {r1, r2, r3, ip}
1068 mcr p14, 0, r1, c0, c0, 4 // BVR0
1069 mcr p14, 0, r2, c0, c1, 4 // BVR1
1070 mcr p14, 0, r3, c0, c2, 4 // BVR2
1071 mcr p14, 0, ip, c0, c3, 4 // BVR3
1073 mcr p14, 0, r1, c0, c4, 4 // BVR4
1074 mcr p14, 0, r2, c0, c5, 4 // BVR5
1075 add r0, r0, #40 // advance to bcr[0]
1076 ldmia r0!, {r1, r2, r3, ip}
1077 mcr p14, 0, r1, c0, c0, 5 // BCR0
1078 mcr p14, 0, r2, c0, c1, 5 // BCR1
1079 mcr p14, 0, r3, c0, c2, 5 // BCR2
1080 mcr p14, 0, ip, c0, c3, 5 // BCR3
1082 mcr p14, 0, r1, c0, c4, 5 // BCR4
1083 mcr p14, 0, r2, c0, c5, 5 // BCR5
1084 add r0, r0, #40 // advance to wvr[0]
1086 mcr p14, 0, r1, c0, c0, 6 // WVR0
1087 mcr p14, 0, r2, c0, c1, 6 // WVR1
1088 add r0, r0, #56 // advance to wcr[0]
1090 mcr p14, 0, r1, c0, c0, 7 // WCR0
1091 mcr p14, 0, r2, c0, c1, 7 // WCR1
1093 // Unlock debug registers
1095 mcr p14, 0, ip, c1, c0, 4
1100 * void fiq_context_init(boolean_t enable_fiq)
1104 .globl EXT(fiq_context_init)
1105 LEXT(fiq_context_init)
1106 mrs r3, cpsr // Save current CPSR
1107 cmp r0, #0 // Test enable_fiq
1108 bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE
1109 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1110 ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data
1113 /* Despite the fact that we use the physical timebase
1114 * register as the basis for time on our platforms, we
1115 * end up using the virtual timer in order to manage
1116 * deadlines. This is due to the fact that for our
1117 * current platforms, the interrupt generated by the
1118 * physical timer is not hooked up to anything, and is
1119 * therefore dropped on the floor. Therefore, for
1120 * timers to function they MUST be based on the virtual
1124 mov r0, #1 // Enable Timer
1125 mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL
1127 /* Enable USER access to the physical timebase (PL0PCTEN).
1128 * The rationale for providing access to the physical
1129 * timebase being that the virtual timebase is broken for
1130 * some platforms. Maintaining the offset ourselves isn't
1131 * expensive, so mandate that the userspace implementation
1132 * do timebase_phys+offset rather than trying to propogate
1133 * all of the informaiton about what works up to USER.
1135 mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0])
1137 #else /* ! __ARM_TIME__ */
1138 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled
1139 mov r8, r2 // Load the BootCPUData address
1140 ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address
1141 ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address
1142 ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value
1143 #endif /* __ARM_TIME__ */
1145 msr cpsr_c, r3 // Restore saved CPSR
1149 * void reenable_async_aborts(void)
1153 .globl EXT(reenable_async_aborts)
1154 LEXT(reenable_async_aborts)
1155 cpsie a // Re-enable async aborts
1159 * uint64_t ml_get_timebase(void)
1163 .globl EXT(ml_get_timebase)
1164 LEXT(ml_get_timebase)
1165 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1166 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1167 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__
1168 isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
1170 mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1
1171 mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0
1172 mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2
1174 bne 1b // Loop until both high values are the same
1176 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1177 ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to
1178 adds r0, r0, r2 // convert to
1179 ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time
1181 #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */
1183 ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1184 ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value
1185 ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1186 cmp r1, r2 // Make sure TB has not rolled over
1188 #endif /* __ARM_TIME__ */
1193 * uint32_t ml_get_decrementer(void)
1197 .globl EXT(ml_get_decrementer)
1198 LEXT(ml_get_decrementer)
1199 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1200 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1201 ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func
1203 bxne r2 // Call it if there is one
1205 mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL)
1207 ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value
1213 * void ml_set_decrementer(uint32_t dec_value)
1217 .globl EXT(ml_set_decrementer)
1218 LEXT(ml_set_decrementer)
1219 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1220 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1221 ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func
1223 bxne r2 // Call it if there is one
1225 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1226 mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL)
1228 mrs r2, cpsr // Save current CPSR
1229 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled.
1230 mov r12, r0 // Set the DEC value
1231 str r12, [r8, CPU_DECREMENTER] // Store DEC
1232 msr cpsr_c, r2 // Restore saved CPSR
1238 * boolean_t ml_get_interrupts_enabled(void)
1242 .globl EXT(ml_get_interrupts_enabled)
1243 LEXT(ml_get_interrupts_enabled)
1246 bic r0, r0, r2, lsr #PSR_IRQFb
1250 * Platform Specific Timebase & Decrementer Functions
1254 #if defined(ARM_BOARD_CLASS_T8002)
1257 .globl EXT(fleh_fiq_t8002)
1258 LEXT(fleh_fiq_t8002)
1259 mov r13, #kAICTmrIntStat
1260 str r11, [r10, r13] // Clear the decrementer interrupt
1262 str r13, [r8, CPU_DECREMENTER]
1267 .globl EXT(t8002_get_decrementer)
1268 LEXT(t8002_get_decrementer)
1269 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1272 ldr r0, [ip] // Get the Decrementer
1277 .globl EXT(t8002_set_decrementer)
1278 LEXT(t8002_set_decrementer)
1279 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1280 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1282 str r0, [ip, r5] // Store the new Decrementer
1284 #endif /* defined(ARM_BOARD_CLASS_T8002) */
1286 LOAD_ADDR_GEN_DEF(kernel_pmap_store)
1288 #include "globals_asm.h"
1290 /* vim: set ts=4: */