]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/machine_routines_asm.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines_asm.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm/proc_reg.h>
31#include <arm/pmap.h>
32#include <sys/errno.h>
33#include "assym.s"
34
35 .align 2
36 .globl EXT(machine_set_current_thread)
37LEXT(machine_set_current_thread)
ea3f0419
A
38 ldr r1, [r0, ACT_CPUDATAP]
39 str r0, [r1, CPU_ACTIVE_THREAD]
5ba3f43e
A
40 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
41 ldr r1, [r0, TH_CTH_SELF]
42 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
43 and r2, r2, #3 // Extract cpu number
44 orr r1, r1, r2 //
45 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
94ff46dc 46 mov r1, #0
5ba3f43e
A
47 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
48 bx lr
49
50/*
51 * void machine_idle(void)
52 */
53 .text
54 .align 2
55 .globl EXT(machine_idle)
56LEXT(machine_idle)
57 cpsid if // Disable FIQ IRQ
58 mov ip, lr
59 bl EXT(Idle_context)
60 mov lr, ip
61 cpsie if // Enable FIQ IRQ
62 bx lr
63
64/*
65 * void cpu_idle_wfi(boolean_t wfi_fast):
66 * cpu_idle is the only function that should call this.
67 */
68 .text
69 .align 2
70 .globl EXT(cpu_idle_wfi)
71LEXT(cpu_idle_wfi)
72 mov r1, #32
73 mov r2, #1200
74 cmp r0, #0
75 beq 3f
76 mov r1, #1
77 b 2f
78 .align 5
791:
80 add r0, r0, #1
81 mov r1, r2
822:
83
84/*
85 * We export the address of the WFI instruction so that it can be patched; this will be
86 * ugly from a debugging perspective.
87 */
88
89#if (__ARM_ARCH__ >= 7)
90 dsb
91 .globl EXT(wfi_inst)
92LEXT(wfi_inst)
93 wfi
94#else
95 mcr p15, 0, r0, c7, c10, 4
96 .globl EXT(wfi_inst)
97LEXT(wfi_inst)
98 mcr p15, 0, r0, c7, c0, 4
99#endif
1003:
101 subs r1, r1, #1
102 bne 3b
103 nop
104 nop
105 nop
106 nop
107 nop
108 cmp r0, #0
109 beq 1b
110 bx lr
111
112 .align 2
113 .globl EXT(timer_grab)
114LEXT(timer_grab)
1150:
116 ldr r2, [r0, TIMER_HIGH]
117 ldr r3, [r0, TIMER_LOW]
5ba3f43e 118 dmb ish // dmb ish
5ba3f43e
A
119 ldr r1, [r0, TIMER_HIGHCHK]
120 cmp r1, r2
121 bne 0b
122 mov r0, r3
123 bx lr
124
125 .align 2
d9a64523
A
126 .globl EXT(timer_advance_internal_32)
127LEXT(timer_advance_internal_32)
5ba3f43e 128 str r1, [r0, TIMER_HIGHCHK]
5ba3f43e 129 dmb ish // dmb ish
5ba3f43e 130 str r2, [r0, TIMER_LOW]
5ba3f43e 131 dmb ish // dmb ish
5ba3f43e
A
132 str r1, [r0, TIMER_HIGH]
133 bx lr
134
135 .align 2
136 .globl EXT(get_vfp_enabled)
137LEXT(get_vfp_enabled)
138#if __ARM_VFP__
139 fmrx r0, fpexc
140 and r1, r0, #FPEXC_EN // Extact vfp enable previous state
141 mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled
142#else
143 mov r0, #0 // return false
144#endif
145 bx lr
146
147/* This is no longer useful (but is exported, so this may require kext cleanup). */
148 .align 2
149 .globl EXT(enable_kernel_vfp_context)
150LEXT(enable_kernel_vfp_context)
151 bx lr
152
153/* uint32_t get_fpscr(void):
154 * Returns the current state of the FPSCR register.
155 */
156 .align 2
157 .globl EXT(get_fpscr)
158LEXT(get_fpscr)
159#if __ARM_VFP__
160 fmrx r0, fpscr
161#endif
162 bx lr
163 .align 2
164 .globl EXT(set_fpscr)
165/* void set_fpscr(uint32_t value):
166 * Set the FPSCR register.
167 */
168LEXT(set_fpscr)
169#if __ARM_VFP__
170 fmxr fpscr, r0
171#else
172 mov r0, #0
173#endif
174 bx lr
175
5ba3f43e
A
176/*
177 * void OSSynchronizeIO(void)
178 */
179 .text
180 .align 2
181 .globl EXT(OSSynchronizeIO)
182LEXT(OSSynchronizeIO)
183 .align 2
184 dsb
185 bx lr
186
d9a64523
A
187.macro SYNC_TLB_FLUSH
188 dsb ish
189 isb
190.endmacro
191
5ba3f43e 192/*
d9a64523 193 * void sync_tlb_flush
5ba3f43e 194 *
d9a64523 195 * Synchronize one or more prior TLB flush operations
5ba3f43e
A
196 */
197 .text
198 .align 2
d9a64523
A
199 .globl EXT(sync_tlb_flush)
200LEXT(sync_tlb_flush)
201 SYNC_TLB_FLUSH
202 bx lr
203
204.macro FLUSH_MMU_TLB
5ba3f43e 205 mov r0, #0
5ba3f43e 206 mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs
d9a64523
A
207.endmacro
208
209/*
210 * void flush_mmu_tlb_async(void)
211 *
212 * Flush all TLBs, don't wait for completion
213 */
214 .text
215 .align 2
216 .globl EXT(flush_mmu_tlb_async)
217LEXT(flush_mmu_tlb_async)
218 FLUSH_MMU_TLB
219 bx lr
220
221/*
222 * void flush_mmu_tlb(void)
223 *
224 * Flush all TLBs
225 */
226 .text
227 .align 2
228 .globl EXT(flush_mmu_tlb)
229LEXT(flush_mmu_tlb)
230 FLUSH_MMU_TLB
231 SYNC_TLB_FLUSH
232 bx lr
233
234.macro FLUSH_CORE_TLB
235 mov r0, #0
236 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
237.endmacro
238
239/*
240 *
241 * void flush_core_tlb_async(void)
242 *
243 * Flush local core's TLB, don't wait for completion
244 */
245 .text
246 .align 2
247 .globl EXT(flush_core_tlb_async)
248LEXT(flush_core_tlb_async)
249 FLUSH_CORE_TLB
250 bx lr
5ba3f43e
A
251
252/*
253 * void flush_core_tlb(void)
254 *
d9a64523 255 * Flush local core's TLB
5ba3f43e
A
256 */
257 .text
258 .align 2
259 .globl EXT(flush_core_tlb)
260LEXT(flush_core_tlb)
d9a64523
A
261 FLUSH_CORE_TLB
262 SYNC_TLB_FLUSH
263 bx lr
264
265.macro FLUSH_MMU_TLB_ENTRY
d9a64523 266 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry
d9a64523
A
267.endmacro
268/*
269 * void flush_mmu_tlb_entry_async(uint32_t)
270 *
271 * Flush TLB entry, don't wait for completion
272 */
273 .text
274 .align 2
275 .globl EXT(flush_mmu_tlb_entry_async)
276LEXT(flush_mmu_tlb_entry_async)
277 FLUSH_MMU_TLB_ENTRY
278 bx lr
5ba3f43e
A
279
280/*
281 * void flush_mmu_tlb_entry(uint32_t)
282 *
283 * Flush TLB entry
284 */
285 .text
286 .align 2
287 .globl EXT(flush_mmu_tlb_entry)
288LEXT(flush_mmu_tlb_entry)
d9a64523
A
289 FLUSH_MMU_TLB_ENTRY
290 SYNC_TLB_FLUSH
291 bx lr
292
293.macro FLUSH_MMU_TLB_ENTRIES
2941:
d9a64523 295 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry
d9a64523
A
296 add r0, r0, ARM_PGBYTES // Increment to the next page
297 cmp r0, r1 // Loop if current address < end address
298 blt 1b
299.endmacro
300
301/*
302 * void flush_mmu_tlb_entries_async(uint32_t, uint32_t)
303 *
304 * Flush TLB entries for address range, don't wait for completion
305 */
306 .text
307 .align 2
308 .globl EXT(flush_mmu_tlb_entries_async)
309LEXT(flush_mmu_tlb_entries_async)
310 FLUSH_MMU_TLB_ENTRIES
311 bx lr
5ba3f43e
A
312
313/*
314 * void flush_mmu_tlb_entries(uint32_t, uint32_t)
315 *
d9a64523 316 * Flush TLB entries for address range
5ba3f43e
A
317 */
318 .text
319 .align 2
320 .globl EXT(flush_mmu_tlb_entries)
321LEXT(flush_mmu_tlb_entries)
d9a64523
A
322 FLUSH_MMU_TLB_ENTRIES
323 SYNC_TLB_FLUSH
324 bx lr
325
326
327.macro FLUSH_MMU_TLB_MVA_ENTRIES
d9a64523 328 mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva
d9a64523 329.endmacro
5ba3f43e 330
d9a64523
A
331/*
332 * void flush_mmu_tlb_mva_entries_async(uint32_t)
333 *
334 * Flush TLB entries for mva, don't wait for completion
335 */
336 .text
337 .align 2
338 .globl EXT(flush_mmu_tlb_mva_entries_async)
339LEXT(flush_mmu_tlb_mva_entries_async)
340 FLUSH_MMU_TLB_MVA_ENTRIES
341 bx lr
5ba3f43e
A
342
343/*
d9a64523 344 * void flush_mmu_tlb_mva_entries_async(uint32_t)
5ba3f43e
A
345 *
346 * Flush TLB entries for mva
347 */
348 .text
349 .align 2
350 .globl EXT(flush_mmu_tlb_mva_entries)
351LEXT(flush_mmu_tlb_mva_entries)
d9a64523
A
352 FLUSH_MMU_TLB_MVA_ENTRIES
353 SYNC_TLB_FLUSH
354 bx lr
355
356.macro FLUSH_MMU_TLB_ASID
d9a64523 357 mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid
d9a64523
A
358.endmacro
359
360/*
361 * void flush_mmu_tlb_asid_async(uint32_t)
362 *
363 * Flush TLB entries for asid, don't wait for completion
364 */
365 .text
366 .align 2
367 .globl EXT(flush_mmu_tlb_asid_async)
368LEXT(flush_mmu_tlb_asid_async)
369 FLUSH_MMU_TLB_ASID
370 bx lr
5ba3f43e
A
371
372/*
373 * void flush_mmu_tlb_asid(uint32_t)
374 *
d9a64523 375 * Flush TLB entries for asid
5ba3f43e
A
376 */
377 .text
378 .align 2
379 .globl EXT(flush_mmu_tlb_asid)
380LEXT(flush_mmu_tlb_asid)
d9a64523
A
381 FLUSH_MMU_TLB_ASID
382 SYNC_TLB_FLUSH
383 bx lr
384
385.macro FLUSH_CORE_TLB_ASID
5ba3f43e 386 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
d9a64523
A
387.endmacro
388
389/*
390 * void flush_core_tlb_asid_async(uint32_t)
391 *
392 * Flush local core TLB entries for asid, don't wait for completion
393 */
394 .text
395 .align 2
396 .globl EXT(flush_core_tlb_asid_async)
397LEXT(flush_core_tlb_asid_async)
398 FLUSH_CORE_TLB_ASID
399 bx lr
5ba3f43e
A
400
401/*
402 * void flush_core_tlb_asid(uint32_t)
403 *
d9a64523 404 * Flush local core TLB entries for asid
5ba3f43e
A
405 */
406 .text
407 .align 2
408 .globl EXT(flush_core_tlb_asid)
409LEXT(flush_core_tlb_asid)
d9a64523
A
410 FLUSH_CORE_TLB_ASID
411 SYNC_TLB_FLUSH
412 bx lr
5ba3f43e
A
413
414/*
415 * Set MMU Translation Table Base
416 */
417 .text
418 .align 2
419 .globl EXT(set_mmu_ttb)
420LEXT(set_mmu_ttb)
421 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
422 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
423 mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0
424 dsb ish
425 isb
426 bx lr
427
428/*
429 * Set MMU Translation Table Base Alternate
430 */
431 .text
432 .align 2
433 .globl EXT(set_mmu_ttb_alternate)
434LEXT(set_mmu_ttb_alternate)
435 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
436 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
437 mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1
438 dsb ish
439 isb
440 bx lr
441
442/*
443 * Set MMU Translation Table Base
444 */
445 .text
446 .align 2
447 .globl EXT(get_mmu_ttb)
448LEXT(get_mmu_ttb)
449 mrc p15, 0, r0, c2, c0, 0 // translation table to r0
5ba3f43e
A
450 bx lr
451
452/*
453 * get MMU control register
454 */
455 .text
456 .align 2
457 .globl EXT(get_aux_control)
458LEXT(get_aux_control)
459 mrc p15, 0, r0, c1, c0, 1 // read aux control into r0
460 bx lr // return old bits in r0
461
462/*
463 * set MMU control register
464 */
465 .text
466 .align 2
467 .globl EXT(set_aux_control)
468LEXT(set_aux_control)
469 mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control
470 isb
471 bx lr
472
473
474/*
475 * get MMU control register
476 */
477 .text
478 .align 2
479 .globl EXT(get_mmu_control)
480LEXT(get_mmu_control)
481 mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0
482 bx lr // return old bits in r0
483
484/*
485 * set MMU control register
486 */
487 .text
488 .align 2
489 .globl EXT(set_mmu_control)
490LEXT(set_mmu_control)
491 mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control
492 isb
493 bx lr
494
495/*
496 * MMU kernel virtual to physical address translation
497 */
498 .text
499 .align 2
500 .globl EXT(mmu_kvtop)
501LEXT(mmu_kvtop)
502 mrs r3, cpsr // Read cpsr
503 cpsid if // Disable FIQ IRQ
504 mov r1, r0
505 mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR
506 isb
507 mrc p15, 0, r0, c7, c4, 0 // Read PAR
508 ands r2, r0, #0x1 // Test conversion aborted
509 bne mmu_kvtophys_fail
510 ands r2, r0, #0x2 // Test super section
511 mvnne r2, #0xFF000000
512 moveq r2, #0x000000FF
513 orreq r2, r2, #0x00000F00
514 bics r0, r0, r2 // Clear lower bits
515 beq mmu_kvtophys_fail
516 and r1, r1, r2
517 orr r0, r0, r1
518 b mmu_kvtophys_ret
519mmu_kvtophys_fail:
520 mov r0, #0
521mmu_kvtophys_ret:
522 msr cpsr, r3 // Restore cpsr
523 bx lr
524
525/*
526 * MMU user virtual to physical address translation
527 */
528 .text
529 .align 2
530 .globl EXT(mmu_uvtop)
531LEXT(mmu_uvtop)
532 mrs r3, cpsr // Read cpsr
533 cpsid if // Disable FIQ IRQ
534 mov r1, r0
535 mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR
536 isb
537 mrc p15, 0, r0, c7, c4, 0 // Read PAR
538 ands r2, r0, #0x1 // Test conversion aborted
539 bne mmu_uvtophys_fail
540 ands r2, r0, #0x2 // Test super section
541 mvnne r2, #0xFF000000
542 moveq r2, #0x000000FF
543 orreq r2, r2, #0x00000F00
544 bics r0, r0, r2 // Clear lower bits
545 beq mmu_uvtophys_fail
546 and r1, r1, r2
547 orr r0, r0, r1
548 b mmu_uvtophys_ret
549mmu_uvtophys_fail:
550 mov r0, #0
551mmu_uvtophys_ret:
552 msr cpsr, r3 // Restore cpsr
553 bx lr
554
555/*
556 * MMU kernel virtual to physical address preflight write access
557 */
558 .text
559 .align 2
560 .globl EXT(mmu_kvtop_wpreflight)
561LEXT(mmu_kvtop_wpreflight)
562 mrs r3, cpsr // Read cpsr
563 cpsid if // Disable FIQ IRQ
564 mov r1, r0
565 mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW
566 isb
567 mrc p15, 0, r0, c7, c4, 0 // Read PAR
568 ands r2, r0, #0x1 // Test conversion aborted
569 bne mmu_kvtophys_wpreflight_fail
570 ands r2, r0, #0x2 // Test super section
571 mvnne r2, #0xFF000000
572 moveq r2, #0x000000FF
573 orreq r2, r2, #0x00000F00
574 bics r0, r0, r2 // Clear lower bits
575 beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits
576 and r1, r1, r2
577 orr r0, r0, r1
578 b mmu_kvtophys_wpreflight_ret
579mmu_kvtophys_wpreflight_fail:
580 mov r0, #0
581mmu_kvtophys_wpreflight_ret:
582 msr cpsr, r3 // Restore cpsr
583 bx lr
584
585/*
586 * set context id register
587 */
588/*
589 * set context id register
590 */
591 .text
592 .align 2
593 .globl EXT(set_context_id)
594LEXT(set_context_id)
595 mcr p15, 0, r0, c13, c0, 1
596 isb
597 bx lr
598
cb323159
A
599/*
600 * arg0: prefix of the external validator function (copyin or copyout)
601 * arg1: 0-based index of highest argument register that must be preserved
602 */
603.macro COPYIO_VALIDATE
604 /* call NAME_validate to check the arguments */
605 push {r0-r$1, r7, lr}
606 add r7, sp, #(($1 + 1) * 4)
607 blx EXT($0_validate)
608 cmp r0, #0
609 addne sp, #(($1 + 1) * 4)
610 popne {r7, pc}
611 pop {r0-r$1, r7, lr}
612.endmacro
613
5ba3f43e
A
614
615#define COPYIO_SET_RECOVER() \
616 /* set recovery address */ ;\
617 stmfd sp!, { r4, r5, r6 } ;\
618 adr r3, copyio_error ;\
619 mrc p15, 0, r12, c13, c0, 4 ;\
620 ldr r4, [r12, TH_RECOVER] ;\
621 str r3, [r12, TH_RECOVER]
622
d9a64523
A
623#define COPYIO_TRY_KERNEL() \
624 /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\
625 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\
626 ldr r3, [r12, ACT_MAP] ;\
627 ldr r3, [r3, MAP_PMAP] ;\
628 LOAD_ADDR(ip, kernel_pmap_store) ;\
629 cmp r3, ip ;\
630 beq copyio_kern_body
631
5ba3f43e
A
632#if __ARM_USER_PROTECT__
633#define COPYIO_MAP_USER() \
634 /* disable interrupts to prevent expansion to 2GB at L1 ;\
635 * between loading ttep and storing it in ttbr0.*/ ;\
636 mrs r5, cpsr ;\
637 cpsid if ;\
638 ldr r3, [r12, ACT_UPTW_TTB] ;\
639 mcr p15, 0, r3, c2, c0, 0 ;\
640 msr cpsr, r5 ;\
641 ldr r3, [r12, ACT_ASID] ;\
642 mcr p15, 0, r3, c13, c0, 1 ;\
643 isb
644#else
645#define COPYIO_MAP_USER()
646#endif
647
d9a64523 648#define COPYIO_HEADER() ;\
5ba3f43e
A
649 /* test for zero len */ ;\
650 cmp r2, #0 ;\
651 moveq r0, #0 ;\
652 bxeq lr
653
654.macro COPYIO_BODY
655 /* if len is less than 16 bytes, just do a simple copy */
656 cmp r2, #16
657 blt L$0_bytewise
658 /* test for src and dest of the same word alignment */
659 orr r3, r0, r1
660 tst r3, #3
661 bne L$0_bytewise
662L$0_wordwise:
663 sub r2, r2, #16
664L$0_wordwise_loop:
665 /* 16 bytes at a time */
666 ldmia r0!, { r3, r5, r6, r12 }
667 stmia r1!, { r3, r5, r6, r12 }
668 subs r2, r2, #16
669 bge L$0_wordwise_loop
670 /* fixup the len and test for completion */
671 adds r2, r2, #16
672 beq L$0_noerror
673L$0_bytewise:
674 /* copy 2 bytes at a time */
675 subs r2, r2, #2
676 ldrb r3, [r0], #1
677 ldrbpl r12, [r0], #1
678 strb r3, [r1], #1
679 strbpl r12, [r1], #1
680 bhi L$0_bytewise
681L$0_noerror:
682 mov r0, #0
683.endmacro
684
685#if __ARM_USER_PROTECT__
686#define COPYIO_UNMAP_USER() \
687 mrc p15, 0, r12, c13, c0, 4 ;\
688 ldr r3, [r12, ACT_KPTW_TTB] ;\
689 mcr p15, 0, r3, c2, c0, 0 ;\
690 mov r3, #0 ;\
691 mcr p15, 0, r3, c13, c0, 1 ;\
692 isb
693#else
694#define COPYIO_UNMAP_USER() \
695 mrc p15, 0, r12, c13, c0, 4
696#endif
697
698#define COPYIO_RESTORE_RECOVER() \
699 /* restore the recovery address */ ;\
700 str r4, [r12, TH_RECOVER] ;\
701 ldmfd sp!, { r4, r5, r6 }
702
703/*
704 * int copyinstr(
705 * const user_addr_t user_addr,
706 * char *kernel_addr,
707 * vm_size_t max,
708 * vm_size_t *actual)
709 */
710 .text
711 .align 2
712 .globl EXT(copyinstr)
713LEXT(copyinstr)
d9a64523
A
714 cmp r2, #0
715 moveq r0, #ENAMETOOLONG
716 moveq r12, #0
717 streq r12, [r3]
718 bxeq lr
cb323159 719 COPYIO_VALIDATE copyin_user, 3
5ba3f43e
A
720 stmfd sp!, { r4, r5, r6 }
721
722 mov r6, r3
5ba3f43e
A
723 adr r3, copyinstr_error // Get address for recover
724 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
725 ldr r4, [r12, TH_RECOVER] ;\
726 str r3, [r12, TH_RECOVER]
727 COPYIO_MAP_USER()
728 mov r12, #0 // Number of bytes copied so far
5ba3f43e
A
729copyinstr_loop:
730 ldrb r3, [r0], #1 // Load a byte from the source (user)
731 strb r3, [r1], #1 // Store a byte to the destination (kernel)
732 add r12, r12, #1
733 cmp r3, #0
734 beq copyinstr_done
735 cmp r12, r2 // Room to copy more bytes?
736 bne copyinstr_loop
737//
738// Ran out of space in the destination buffer, so return ENAMETOOLONG.
739//
740copyinstr_too_long:
741 mov r3, #ENAMETOOLONG
742copyinstr_done:
743//
744// When we get here, we have finished copying the string. We came here from
d9a64523 745// either the "beq copyinstr_done" above, in which case r3 == 0 (which is also
5ba3f43e 746// the function result for success), or falling through from copyinstr_too_long,
d9a64523 747// in which case r3 == ENAMETOOLONG.
5ba3f43e
A
748//
749 str r12, [r6] // Save the count for actual
750 mov r0, r3 // Return error code from r3
751copyinstr_exit:
752 COPYIO_UNMAP_USER()
753 str r4, [r12, TH_RECOVER]
5ba3f43e
A
754 ldmfd sp!, { r4, r5, r6 }
755 bx lr
756
757copyinstr_error:
758 /* set error, exit routine */
759 mov r0, #EFAULT
760 b copyinstr_exit
761
5ba3f43e
A
762/*
763 * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
764 */
765 .text
766 .align 2
767 .globl EXT(copyin)
768LEXT(copyin)
d9a64523 769 COPYIO_HEADER()
cb323159 770 COPYIO_VALIDATE copyin, 2
d9a64523 771 COPYIO_TRY_KERNEL()
5ba3f43e
A
772 COPYIO_SET_RECOVER()
773 COPYIO_MAP_USER()
774 COPYIO_BODY copyin
775 COPYIO_UNMAP_USER()
776 COPYIO_RESTORE_RECOVER()
777 bx lr
778
779/*
780 * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
781 */
782 .text
783 .align 2
784 .globl EXT(copyout)
785LEXT(copyout)
d9a64523 786 COPYIO_HEADER()
cb323159 787 COPYIO_VALIDATE copyout, 2
d9a64523 788 COPYIO_TRY_KERNEL()
5ba3f43e
A
789 COPYIO_SET_RECOVER()
790 COPYIO_MAP_USER()
791 COPYIO_BODY copyout
792 COPYIO_UNMAP_USER()
793 COPYIO_RESTORE_RECOVER()
794 bx lr
795
796
797/*
cb323159
A
798 * int copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
799 * r0: user_addr
800 * r1: kernel_addr
801 */
802 .text
803 .align 2
804 .globl EXT(copyin_atomic32)
805LEXT(copyin_atomic32)
806 tst r0, #3 // Test alignment of user address
807 bne 2f
808
809 mov r2, #4
810 COPYIO_VALIDATE copyin_user, 1
811 COPYIO_SET_RECOVER()
812 COPYIO_MAP_USER()
813
814 ldr r2, [r0] // Load word from user
815 str r2, [r1] // Store to kernel_addr
816 mov r0, #0 // Success
817
818 COPYIO_UNMAP_USER()
819 COPYIO_RESTORE_RECOVER()
820 bx lr
8212: // misaligned copyin
822 mov r0, #EINVAL
823 bx lr
824
825/*
826 * int copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
827 * r0: user_addr
828 * r1: value
829 */
830 .text
831 .align 2
832 .globl EXT(copyin_atomic32_wait_if_equals)
833LEXT(copyin_atomic32_wait_if_equals)
834 tst r0, #3 // Test alignment of user address
835 bne 2f
836
837 mov r2, r0
838 mov r3, #4
839 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
840 COPYIO_SET_RECOVER()
841 COPYIO_MAP_USER()
842
843 ldrex r2, [r0]
844 cmp r2, r1
845 movne r0, ESTALE
846 bne 1f
847 mov r0, #0
848 wfe
8491:
850 clrex
851
852 COPYIO_UNMAP_USER()
853 COPYIO_RESTORE_RECOVER()
854 bx lr
8552: // misaligned copyin
856 mov r0, #EINVAL
857 bx lr
858
859/*
860 * int copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
861 * r0: user_addr
862 * r1: kernel_addr
5ba3f43e
A
863 */
864 .text
865 .align 2
cb323159
A
866 .globl EXT(copyin_atomic64)
867LEXT(copyin_atomic64)
868 tst r0, #7 // Test alignment of user address
869 bne 2f
5ba3f43e 870
cb323159
A
871 mov r2, #8
872 COPYIO_VALIDATE copyin_user, 1
5ba3f43e
A
873 COPYIO_SET_RECOVER()
874 COPYIO_MAP_USER()
875
cb323159
A
8761: // ldrex/strex retry loop
877 ldrexd r2, r3, [r0] // Load double word from user
878 strexd r5, r2, r3, [r0] // (the COPYIO_*() macros make r5 safe to use as a scratch register here)
879 cmp r5, #0
880 bne 1b
5ba3f43e
A
881 stm r1, {r2, r3} // Store to kernel_addr
882 mov r0, #0 // Success
883
884 COPYIO_UNMAP_USER()
885 COPYIO_RESTORE_RECOVER()
886 bx lr
cb323159 8872: // misaligned copyin
5ba3f43e
A
888 mov r0, #EINVAL
889 bx lr
5ba3f43e
A
890
891
892copyio_error:
893 mov r0, #EFAULT
894 COPYIO_UNMAP_USER()
895 str r4, [r12, TH_RECOVER]
896 ldmfd sp!, { r4, r5, r6 }
897 bx lr
898
cb323159
A
899
900/*
901 * int copyout_atomic32(uint32_t value, user_addr_t user_addr)
902 * r0: value
903 * r1: user_addr
904 */
905 .text
906 .align 2
907 .globl EXT(copyout_atomic32)
908LEXT(copyout_atomic32)
909 tst r1, #3 // Test alignment of user address
910 bne 2f
911
912 mov r2, r1
913 mov r3, #4
914 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
915 COPYIO_SET_RECOVER()
916 COPYIO_MAP_USER()
917
918 str r0, [r1] // Store word to user
919 mov r0, #0 // Success
920
921 COPYIO_UNMAP_USER()
922 COPYIO_RESTORE_RECOVER()
923 bx lr
9242: // misaligned copyout
925 mov r0, #EINVAL
926 bx lr
927
928
929/*
930 * int copyout_atomic64(uint64_t value, user_addr_t user_addr)
931 * r0, r1: value
932 * r2: user_addr
933 */
934 .text
935 .align 2
936 .globl EXT(copyout_atomic64)
937LEXT(copyout_atomic64)
938 tst r2, #7 // Test alignment of user address
939 bne 2f
940
941 mov r3, #8
942 COPYIO_VALIDATE copyio_user, 2 // validate user address (uses r2, r3)
943 COPYIO_SET_RECOVER()
944 COPYIO_MAP_USER()
945
9461: // ldrex/strex retry loop
947 ldrexd r4, r5, [r2]
948 strexd r3, r0, r1, [r2] // Atomically store double word to user
949 cmp r3, #0
950 bne 1b
951
952 mov r0, #0 // Success
953
954 COPYIO_UNMAP_USER()
955 COPYIO_RESTORE_RECOVER()
956 bx lr
9572: // misaligned copyout
958 mov r0, #EINVAL
959 bx lr
960
961
5ba3f43e
A
962/*
963 * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
964 */
965 .text
966 .align 2
967 .globl EXT(copyin_kern)
968LEXT(copyin_kern)
d9a64523
A
969 COPYIO_HEADER()
970 b copyio_kern_body
5ba3f43e
A
971
972/*
973 * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
974 */
975 .text
976 .align 2
977 .globl EXT(copyout_kern)
978LEXT(copyout_kern)
d9a64523
A
979 COPYIO_HEADER()
980 b copyio_kern_body
5ba3f43e 981
d9a64523 982copyio_kern_body:
5ba3f43e
A
983 stmfd sp!, { r5, r6 }
984 COPYIO_BODY copyio_kernel
985 ldmfd sp!, { r5, r6 }
986 bx lr
987
988/*
989 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr)
990 *
991 * Safely copy eight bytes (the fixed top of an ARM frame) from
992 * either user or kernel memory.
993 */
994 .text
995 .align 2
996 .globl EXT(copyinframe)
997LEXT(copyinframe)
998 COPYIO_SET_RECOVER()
999 COPYIO_MAP_USER()
1000 ldmia r0, {r2, r3}
1001 stmia r1, {r2, r3}
1002 b Lcopyin_noerror
1003
1004/*
1005 * uint32_t arm_debug_read_dscr(void)
1006 */
1007 .text
1008 .align 2
1009 .globl EXT(arm_debug_read_dscr)
1010LEXT(arm_debug_read_dscr)
1011#if __ARM_DEBUG__ >= 6
1012 mrc p14, 0, r0, c0, c1
1013#else
1014 mov r0, #0
1015#endif
1016 bx lr
1017
1018/*
1019 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
1020 *
1021 * Set debug registers to match the current thread state
1022 * (NULL to disable). Assume 6 breakpoints and 2
1023 * watchpoints, since that has been the case in all cores
1024 * thus far.
1025 */
1026 .text
1027 .align 2
1028 .globl EXT(arm_debug_set_cp14)
1029LEXT(arm_debug_set_cp14)
1030#if __ARM_DEBUG__ >= 6
1031 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1032 ldr r2, [r1, ACT_CPUDATAP] // Get current cpu
1033 str r0, [r2, CPU_USER_DEBUG] // Set current user debug
1034
1035 // Lock the debug registers
1036 movw ip, #0xCE55
1037 movt ip, #0xC5AC
1038 mcr p14, 0, ip, c1, c0, 4
1039
1040 // enable monitor mode (needed to set and use debug registers)
1041 mrc p14, 0, ip, c0, c1, 0
1042 orr ip, ip, #0x8000 // set MDBGen = 1
1043#if __ARM_DEBUG__ >= 7
1044 mcr p14, 0, ip, c0, c2, 2
1045#else
1046 mcr p14, 0, ip, c0, c1, 0
1047#endif
1048 // first turn off all breakpoints/watchpoints
1049 mov r1, #0
1050 mcr p14, 0, r1, c0, c0, 5 // BCR0
1051 mcr p14, 0, r1, c0, c1, 5 // BCR1
1052 mcr p14, 0, r1, c0, c2, 5 // BCR2
1053 mcr p14, 0, r1, c0, c3, 5 // BCR3
1054 mcr p14, 0, r1, c0, c4, 5 // BCR4
1055 mcr p14, 0, r1, c0, c5, 5 // BCR5
1056 mcr p14, 0, r1, c0, c0, 7 // WCR0
1057 mcr p14, 0, r1, c0, c1, 7 // WCR1
1058 // if (debug_state == NULL) disable monitor mode and return;
1059 cmp r0, #0
1060 biceq ip, ip, #0x8000 // set MDBGen = 0
1061#if __ARM_DEBUG__ >= 7
1062 mcreq p14, 0, ip, c0, c2, 2
1063#else
1064 mcreq p14, 0, ip, c0, c1, 0
1065#endif
1066 bxeq lr
1067 ldmia r0!, {r1, r2, r3, ip}
1068 mcr p14, 0, r1, c0, c0, 4 // BVR0
1069 mcr p14, 0, r2, c0, c1, 4 // BVR1
1070 mcr p14, 0, r3, c0, c2, 4 // BVR2
1071 mcr p14, 0, ip, c0, c3, 4 // BVR3
1072 ldmia r0!, {r1, r2}
1073 mcr p14, 0, r1, c0, c4, 4 // BVR4
1074 mcr p14, 0, r2, c0, c5, 4 // BVR5
1075 add r0, r0, #40 // advance to bcr[0]
1076 ldmia r0!, {r1, r2, r3, ip}
1077 mcr p14, 0, r1, c0, c0, 5 // BCR0
1078 mcr p14, 0, r2, c0, c1, 5 // BCR1
1079 mcr p14, 0, r3, c0, c2, 5 // BCR2
1080 mcr p14, 0, ip, c0, c3, 5 // BCR3
1081 ldmia r0!, {r1, r2}
1082 mcr p14, 0, r1, c0, c4, 5 // BCR4
1083 mcr p14, 0, r2, c0, c5, 5 // BCR5
1084 add r0, r0, #40 // advance to wvr[0]
1085 ldmia r0!, {r1, r2}
1086 mcr p14, 0, r1, c0, c0, 6 // WVR0
1087 mcr p14, 0, r2, c0, c1, 6 // WVR1
1088 add r0, r0, #56 // advance to wcr[0]
1089 ldmia r0!, {r1, r2}
1090 mcr p14, 0, r1, c0, c0, 7 // WCR0
1091 mcr p14, 0, r2, c0, c1, 7 // WCR1
1092
1093 // Unlock debug registers
1094 mov ip, #0
1095 mcr p14, 0, ip, c1, c0, 4
1096#endif
1097 bx lr
1098
1099/*
1100 * void fiq_context_init(boolean_t enable_fiq)
1101 */
1102 .text
1103 .align 2
1104 .globl EXT(fiq_context_init)
1105LEXT(fiq_context_init)
1106 mrs r3, cpsr // Save current CPSR
1107 cmp r0, #0 // Test enable_fiq
1108 bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE
1109 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1110 ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data
1111
1112#if __ARM_TIME__
1113 /* Despite the fact that we use the physical timebase
1114 * register as the basis for time on our platforms, we
1115 * end up using the virtual timer in order to manage
1116 * deadlines. This is due to the fact that for our
1117 * current platforms, the interrupt generated by the
1118 * physical timer is not hooked up to anything, and is
1119 * therefore dropped on the floor. Therefore, for
1120 * timers to function they MUST be based on the virtual
1121 * timer.
1122 */
1123
1124 mov r0, #1 // Enable Timer
1125 mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL
1126
1127 /* Enable USER access to the physical timebase (PL0PCTEN).
1128 * The rationale for providing access to the physical
1129 * timebase being that the virtual timebase is broken for
1130 * some platforms. Maintaining the offset ourselves isn't
1131 * expensive, so mandate that the userspace implementation
1132 * do timebase_phys+offset rather than trying to propogate
1133 * all of the informaiton about what works up to USER.
1134 */
1135 mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0])
1136
1137#else /* ! __ARM_TIME__ */
1138 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled
1139 mov r8, r2 // Load the BootCPUData address
1140 ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address
1141 ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address
1142 ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value
1143#endif /* __ARM_TIME__ */
1144
1145 msr cpsr_c, r3 // Restore saved CPSR
1146 bx lr
1147
1148/*
1149 * void reenable_async_aborts(void)
1150 */
1151 .text
1152 .align 2
1153 .globl EXT(reenable_async_aborts)
1154LEXT(reenable_async_aborts)
1155 cpsie a // Re-enable async aborts
1156 bx lr
1157
1158/*
2a1bd2d3 1159 * uint64_t ml_get_speculative_timebase(void)
5ba3f43e
A
1160 */
1161 .text
1162 .align 2
2a1bd2d3
A
1163 .globl EXT(ml_get_speculative_timebase)
1164LEXT(ml_get_speculative_timebase)
5ba3f43e
A
11651:
1166 mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1
1167 mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0
1168 mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2
1169 cmp r1, r2
1170 bne 1b // Loop until both high values are the same
1171
2a1bd2d3 1172 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
5ba3f43e
A
1173 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1174 ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to
1175 adds r0, r0, r2 // convert to
1176 ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time
1177 adc r1, r1, r2 //
5ba3f43e
A
1178 bx lr // return
1179
2a1bd2d3
A
1180/*
1181 * uint64_t ml_get_timebase(void)
1182 */
1183 .text
1184 .align 2
1185 .globl EXT(ml_get_timebase)
1186LEXT(ml_get_timebase)
1187 isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
1188 b EXT(ml_get_speculative_timebase)
1189
1190/*
1191 * uint64_t ml_get_timebase_entropy(void)
1192 */
1193 .text
1194 .align 2
1195 .globl EXT(ml_get_timebase_entropy)
1196LEXT(ml_get_timebase_entropy)
1197 b EXT(ml_get_speculative_timebase)
5ba3f43e
A
1198
1199/*
1200 * uint32_t ml_get_decrementer(void)
1201 */
1202 .text
1203 .align 2
1204 .globl EXT(ml_get_decrementer)
1205LEXT(ml_get_decrementer)
1206 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1207 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1208 ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func
1209 cmp r2, #0
1210 bxne r2 // Call it if there is one
1211#if __ARM_TIME__
1212 mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL)
1213#else
1214 ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value
1215#endif
1216 bx lr // return
1217
1218
1219/*
1220 * void ml_set_decrementer(uint32_t dec_value)
1221 */
1222 .text
1223 .align 2
1224 .globl EXT(ml_set_decrementer)
1225LEXT(ml_set_decrementer)
1226 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1227 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1228 ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func
1229 cmp r2, #0
1230 bxne r2 // Call it if there is one
1231#if __ARM_TIME__
1232 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1233 mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL)
1234#else
1235 mrs r2, cpsr // Save current CPSR
1236 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled.
1237 mov r12, r0 // Set the DEC value
1238 str r12, [r8, CPU_DECREMENTER] // Store DEC
1239 msr cpsr_c, r2 // Restore saved CPSR
1240#endif
1241 bx lr
1242
1243
1244/*
1245 * boolean_t ml_get_interrupts_enabled(void)
1246 */
1247 .text
1248 .align 2
1249 .globl EXT(ml_get_interrupts_enabled)
1250LEXT(ml_get_interrupts_enabled)
1251 mrs r2, cpsr
1252 mov r0, #1
1253 bic r0, r0, r2, lsr #PSR_IRQFb
1254 bx lr
1255
1256/*
1257 * Platform Specific Timebase & Decrementer Functions
1258 *
1259 */
1260
5ba3f43e
A
1261#if defined(ARM_BOARD_CLASS_T8002)
1262 .text
1263 .align 2
1264 .globl EXT(fleh_fiq_t8002)
1265LEXT(fleh_fiq_t8002)
1266 mov r13, #kAICTmrIntStat
1267 str r11, [r10, r13] // Clear the decrementer interrupt
1268 mvn r13, #0
1269 str r13, [r8, CPU_DECREMENTER]
1270 b EXT(fleh_dec)
1271
1272 .text
1273 .align 2
1274 .globl EXT(t8002_get_decrementer)
1275LEXT(t8002_get_decrementer)
1276 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1277 mov r0, #kAICTmrCnt
1278 add ip, ip, r0
1279 ldr r0, [ip] // Get the Decrementer
1280 bx lr
1281
1282 .text
1283 .align 2
1284 .globl EXT(t8002_set_decrementer)
1285LEXT(t8002_set_decrementer)
1286 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1287 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1288 mov r5, #kAICTmrCnt
1289 str r0, [ip, r5] // Store the new Decrementer
1290 bx lr
1291#endif /* defined(ARM_BOARD_CLASS_T8002) */
1292
1293LOAD_ADDR_GEN_DEF(kernel_pmap_store)
1294
1295#include "globals_asm.h"
1296
1297/* vim: set ts=4: */