]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines_asm.s
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines_asm.s
1 /*
2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
31 #include <arm/pmap.h>
32 #include <sys/errno.h>
33 #include "assym.s"
34
35 .align 2
36 .globl EXT(machine_set_current_thread)
37 LEXT(machine_set_current_thread)
38 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
39 ldr r1, [r0, TH_CTH_SELF]
40 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
41 and r2, r2, #3 // Extract cpu number
42 orr r1, r1, r2 //
43 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
44 ldr r1, [r0, TH_CTH_DATA]
45 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
46 bx lr
47
48 /*
49 * void machine_idle(void)
50 */
51 .text
52 .align 2
53 .globl EXT(machine_idle)
54 LEXT(machine_idle)
55 cpsid if // Disable FIQ IRQ
56 mov ip, lr
57 bl EXT(Idle_context)
58 mov lr, ip
59 cpsie if // Enable FIQ IRQ
60 bx lr
61
62 /*
63 * void cpu_idle_wfi(boolean_t wfi_fast):
64 * cpu_idle is the only function that should call this.
65 */
66 .text
67 .align 2
68 .globl EXT(cpu_idle_wfi)
69 LEXT(cpu_idle_wfi)
70 mov r1, #32
71 mov r2, #1200
72 cmp r0, #0
73 beq 3f
74 mov r1, #1
75 b 2f
76 .align 5
77 1:
78 add r0, r0, #1
79 mov r1, r2
80 2:
81
82 /*
83 * We export the address of the WFI instruction so that it can be patched; this will be
84 * ugly from a debugging perspective.
85 */
86
87 #if (__ARM_ARCH__ >= 7)
88 dsb
89 .globl EXT(wfi_inst)
90 LEXT(wfi_inst)
91 wfi
92 #else
93 mcr p15, 0, r0, c7, c10, 4
94 .globl EXT(wfi_inst)
95 LEXT(wfi_inst)
96 mcr p15, 0, r0, c7, c0, 4
97 #endif
98 3:
99 subs r1, r1, #1
100 bne 3b
101 nop
102 nop
103 nop
104 nop
105 nop
106 cmp r0, #0
107 beq 1b
108 bx lr
109
110 .align 2
111 .globl EXT(timer_grab)
112 LEXT(timer_grab)
113 0:
114 ldr r2, [r0, TIMER_HIGH]
115 ldr r3, [r0, TIMER_LOW]
116 #if __ARM_SMP__
117 dmb ish // dmb ish
118 #endif
119 ldr r1, [r0, TIMER_HIGHCHK]
120 cmp r1, r2
121 bne 0b
122 mov r0, r3
123 bx lr
124
125 .align 2
126 .globl EXT(timer_update)
127 LEXT(timer_update)
128 str r1, [r0, TIMER_HIGHCHK]
129 #if __ARM_SMP__
130 dmb ish // dmb ish
131 #endif
132 str r2, [r0, TIMER_LOW]
133 #if __ARM_SMP__
134 dmb ish // dmb ish
135 #endif
136 str r1, [r0, TIMER_HIGH]
137 bx lr
138
139 .align 2
140 .globl EXT(get_vfp_enabled)
141 LEXT(get_vfp_enabled)
142 #if __ARM_VFP__
143 fmrx r0, fpexc
144 and r1, r0, #FPEXC_EN // Extact vfp enable previous state
145 mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled
146 #else
147 mov r0, #0 // return false
148 #endif
149 bx lr
150
151 /* This is no longer useful (but is exported, so this may require kext cleanup). */
152 .align 2
153 .globl EXT(enable_kernel_vfp_context)
154 LEXT(enable_kernel_vfp_context)
155 bx lr
156
157 /* uint32_t get_fpscr(void):
158 * Returns the current state of the FPSCR register.
159 */
160 .align 2
161 .globl EXT(get_fpscr)
162 LEXT(get_fpscr)
163 #if __ARM_VFP__
164 fmrx r0, fpscr
165 #endif
166 bx lr
167 .align 2
168 .globl EXT(set_fpscr)
169 /* void set_fpscr(uint32_t value):
170 * Set the FPSCR register.
171 */
172 LEXT(set_fpscr)
173 #if __ARM_VFP__
174 fmxr fpscr, r0
175 #else
176 mov r0, #0
177 #endif
178 bx lr
179
180 #if (__ARM_VFP__ >= 3)
181 .align 2
182 .globl EXT(get_mvfr0)
183 LEXT(get_mvfr0)
184 vmrs r0, mvfr0
185 bx lr
186 .globl EXT(get_mvfr1)
187 LEXT(get_mvfr1)
188 vmrs r0, mvfr1
189 bx lr
190 #endif
191
192 /*
193 * void OSSynchronizeIO(void)
194 */
195 .text
196 .align 2
197 .globl EXT(OSSynchronizeIO)
198 LEXT(OSSynchronizeIO)
199 .align 2
200 dsb
201 bx lr
202
203 /*
204 * void flush_mmu_tlb(void)
205 *
206 * Flush all TLBs
207 */
208 .text
209 .align 2
210 .globl EXT(flush_mmu_tlb)
211 LEXT(flush_mmu_tlb)
212 mov r0, #0
213 #if __ARM_SMP__
214 mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs
215 #else
216 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
217 #endif
218 dsb ish
219 isb
220 bx lr
221
222 /*
223 * void flush_core_tlb(void)
224 *
225 * Flush core TLB
226 */
227 .text
228 .align 2
229 .globl EXT(flush_core_tlb)
230 LEXT(flush_core_tlb)
231 mov r0, #0
232 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
233 dsb ish
234 isb
235 bx lr
236
237 /*
238 * void flush_mmu_tlb_entry(uint32_t)
239 *
240 * Flush TLB entry
241 */
242 .text
243 .align 2
244 .globl EXT(flush_mmu_tlb_entry)
245 LEXT(flush_mmu_tlb_entry)
246 #if __ARM_SMP__
247 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry
248 #else
249 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
250 #endif
251 dsb ish
252 isb
253 bx lr
254
255 /*
256 * void flush_mmu_tlb_entries(uint32_t, uint32_t)
257 *
258 * Flush TLB entries
259 */
260 .text
261 .align 2
262 .globl EXT(flush_mmu_tlb_entries)
263 LEXT(flush_mmu_tlb_entries)
264 1:
265 #if __ARM_SMP__
266 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry
267 #else
268 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
269 #endif
270 add r0, r0, ARM_PGBYTES // Increment to the next page
271 cmp r0, r1 // Loop if current address < end address
272 blt 1b
273 dsb ish // Synchronize
274 isb
275 bx lr
276
277
278 /*
279 * void flush_mmu_tlb_mva_entries(uint32_t)
280 *
281 * Flush TLB entries for mva
282 */
283 .text
284 .align 2
285 .globl EXT(flush_mmu_tlb_mva_entries)
286 LEXT(flush_mmu_tlb_mva_entries)
287 #if __ARM_SMP__
288 mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva
289 #else
290 mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva
291 #endif
292 dsb ish
293 isb
294 bx lr
295
296 /*
297 * void flush_mmu_tlb_asid(uint32_t)
298 *
299 * Flush TLB entriesfor requested asid
300 */
301 .text
302 .align 2
303 .globl EXT(flush_mmu_tlb_asid)
304 LEXT(flush_mmu_tlb_asid)
305 #if __ARM_SMP__
306 mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid
307 #else
308 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
309 #endif
310 dsb ish
311 isb
312 bx lr
313
314 /*
315 * void flush_core_tlb_asid(uint32_t)
316 *
317 * Flush TLB entries for core for requested asid
318 */
319 .text
320 .align 2
321 .globl EXT(flush_core_tlb_asid)
322 LEXT(flush_core_tlb_asid)
323 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
324 dsb ish
325 isb
326 bx lr
327
328 /*
329 * Set MMU Translation Table Base
330 */
331 .text
332 .align 2
333 .globl EXT(set_mmu_ttb)
334 LEXT(set_mmu_ttb)
335 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
336 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
337 mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0
338 dsb ish
339 isb
340 bx lr
341
342 /*
343 * Set MMU Translation Table Base Alternate
344 */
345 .text
346 .align 2
347 .globl EXT(set_mmu_ttb_alternate)
348 LEXT(set_mmu_ttb_alternate)
349 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
350 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
351 mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1
352 dsb ish
353 isb
354 bx lr
355
356 /*
357 * Set MMU Translation Table Base
358 */
359 .text
360 .align 2
361 .globl EXT(get_mmu_ttb)
362 LEXT(get_mmu_ttb)
363 mrc p15, 0, r0, c2, c0, 0 // translation table to r0
364 isb
365 bx lr
366
367 /*
368 * get MMU control register
369 */
370 .text
371 .align 2
372 .globl EXT(get_aux_control)
373 LEXT(get_aux_control)
374 mrc p15, 0, r0, c1, c0, 1 // read aux control into r0
375 bx lr // return old bits in r0
376
377 /*
378 * set MMU control register
379 */
380 .text
381 .align 2
382 .globl EXT(set_aux_control)
383 LEXT(set_aux_control)
384 mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control
385 isb
386 bx lr
387
388
389 /*
390 * get MMU control register
391 */
392 .text
393 .align 2
394 .globl EXT(get_mmu_control)
395 LEXT(get_mmu_control)
396 mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0
397 bx lr // return old bits in r0
398
399 /*
400 * set MMU control register
401 */
402 .text
403 .align 2
404 .globl EXT(set_mmu_control)
405 LEXT(set_mmu_control)
406 mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control
407 isb
408 bx lr
409
410 /*
411 * MMU kernel virtual to physical address translation
412 */
413 .text
414 .align 2
415 .globl EXT(mmu_kvtop)
416 LEXT(mmu_kvtop)
417 mrs r3, cpsr // Read cpsr
418 cpsid if // Disable FIQ IRQ
419 mov r1, r0
420 mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR
421 isb
422 mrc p15, 0, r0, c7, c4, 0 // Read PAR
423 ands r2, r0, #0x1 // Test conversion aborted
424 bne mmu_kvtophys_fail
425 ands r2, r0, #0x2 // Test super section
426 mvnne r2, #0xFF000000
427 moveq r2, #0x000000FF
428 orreq r2, r2, #0x00000F00
429 bics r0, r0, r2 // Clear lower bits
430 beq mmu_kvtophys_fail
431 and r1, r1, r2
432 orr r0, r0, r1
433 b mmu_kvtophys_ret
434 mmu_kvtophys_fail:
435 mov r0, #0
436 mmu_kvtophys_ret:
437 msr cpsr, r3 // Restore cpsr
438 bx lr
439
440 /*
441 * MMU user virtual to physical address translation
442 */
443 .text
444 .align 2
445 .globl EXT(mmu_uvtop)
446 LEXT(mmu_uvtop)
447 mrs r3, cpsr // Read cpsr
448 cpsid if // Disable FIQ IRQ
449 mov r1, r0
450 mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR
451 isb
452 mrc p15, 0, r0, c7, c4, 0 // Read PAR
453 ands r2, r0, #0x1 // Test conversion aborted
454 bne mmu_uvtophys_fail
455 ands r2, r0, #0x2 // Test super section
456 mvnne r2, #0xFF000000
457 moveq r2, #0x000000FF
458 orreq r2, r2, #0x00000F00
459 bics r0, r0, r2 // Clear lower bits
460 beq mmu_uvtophys_fail
461 and r1, r1, r2
462 orr r0, r0, r1
463 b mmu_uvtophys_ret
464 mmu_uvtophys_fail:
465 mov r0, #0
466 mmu_uvtophys_ret:
467 msr cpsr, r3 // Restore cpsr
468 bx lr
469
470 /*
471 * MMU kernel virtual to physical address preflight write access
472 */
473 .text
474 .align 2
475 .globl EXT(mmu_kvtop_wpreflight)
476 LEXT(mmu_kvtop_wpreflight)
477 mrs r3, cpsr // Read cpsr
478 cpsid if // Disable FIQ IRQ
479 mov r1, r0
480 mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW
481 isb
482 mrc p15, 0, r0, c7, c4, 0 // Read PAR
483 ands r2, r0, #0x1 // Test conversion aborted
484 bne mmu_kvtophys_wpreflight_fail
485 ands r2, r0, #0x2 // Test super section
486 mvnne r2, #0xFF000000
487 moveq r2, #0x000000FF
488 orreq r2, r2, #0x00000F00
489 bics r0, r0, r2 // Clear lower bits
490 beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits
491 and r1, r1, r2
492 orr r0, r0, r1
493 b mmu_kvtophys_wpreflight_ret
494 mmu_kvtophys_wpreflight_fail:
495 mov r0, #0
496 mmu_kvtophys_wpreflight_ret:
497 msr cpsr, r3 // Restore cpsr
498 bx lr
499
500 /*
501 * set context id register
502 */
503 /*
504 * set context id register
505 */
506 .text
507 .align 2
508 .globl EXT(set_context_id)
509 LEXT(set_context_id)
510 mcr p15, 0, r0, c13, c0, 1
511 isb
512 bx lr
513
514 #define COPYIO_HEADER(rUser, kLabel) \
515 /* test for zero len */ ;\
516 cmp r2, #0 ;\
517 moveq r0, #0 ;\
518 bxeq lr ;\
519 /* test user_addr, user_addr+len to see if it's in kernel space */ ;\
520 add r12, rUser, r2 ;\
521 cmp r12, KERNELBASE ;\
522 bhs kLabel ;\
523 cmp r12, rUser ;\
524 bcc kLabel
525
526 #define COPYIO_VALIDATE(NAME, SIZE) \
527 /* branch around for small sizes */ ;\
528 cmp r2, #(SIZE) ;\
529 bls L##NAME##_validate_done ;\
530 /* call NAME_validate to check the arguments */ ;\
531 push {r0, r1, r2, r7, lr} ;\
532 add r7, sp, #12 ;\
533 blx EXT(NAME##_validate) ;\
534 cmp r0, #0 ;\
535 addne sp, #12 ;\
536 popne {r7, pc} ;\
537 pop {r0, r1, r2, r7, lr} ;\
538 L##NAME##_validate_done:
539
540 #define COPYIO_SET_RECOVER() \
541 /* set recovery address */ ;\
542 stmfd sp!, { r4, r5, r6 } ;\
543 adr r3, copyio_error ;\
544 mrc p15, 0, r12, c13, c0, 4 ;\
545 ldr r4, [r12, TH_RECOVER] ;\
546 str r3, [r12, TH_RECOVER]
547
548 #if __ARM_USER_PROTECT__
549 #define COPYIO_MAP_USER() \
550 /* disable interrupts to prevent expansion to 2GB at L1 ;\
551 * between loading ttep and storing it in ttbr0.*/ ;\
552 mrs r5, cpsr ;\
553 cpsid if ;\
554 ldr r3, [r12, ACT_UPTW_TTB] ;\
555 mcr p15, 0, r3, c2, c0, 0 ;\
556 msr cpsr, r5 ;\
557 ldr r3, [r12, ACT_ASID] ;\
558 mcr p15, 0, r3, c13, c0, 1 ;\
559 isb
560 #else
561 #define COPYIO_MAP_USER()
562 #endif
563
564 #define COPYIO_HEADER_KERN() ;\
565 /* test for zero len */ ;\
566 cmp r2, #0 ;\
567 moveq r0, #0 ;\
568 bxeq lr
569
570 .macro COPYIO_BODY
571 /* if len is less than 16 bytes, just do a simple copy */
572 cmp r2, #16
573 blt L$0_bytewise
574 /* test for src and dest of the same word alignment */
575 orr r3, r0, r1
576 tst r3, #3
577 bne L$0_bytewise
578 L$0_wordwise:
579 sub r2, r2, #16
580 L$0_wordwise_loop:
581 /* 16 bytes at a time */
582 ldmia r0!, { r3, r5, r6, r12 }
583 stmia r1!, { r3, r5, r6, r12 }
584 subs r2, r2, #16
585 bge L$0_wordwise_loop
586 /* fixup the len and test for completion */
587 adds r2, r2, #16
588 beq L$0_noerror
589 L$0_bytewise:
590 /* copy 2 bytes at a time */
591 subs r2, r2, #2
592 ldrb r3, [r0], #1
593 ldrbpl r12, [r0], #1
594 strb r3, [r1], #1
595 strbpl r12, [r1], #1
596 bhi L$0_bytewise
597 L$0_noerror:
598 mov r0, #0
599 .endmacro
600
601 #if __ARM_USER_PROTECT__
602 #define COPYIO_UNMAP_USER() \
603 mrc p15, 0, r12, c13, c0, 4 ;\
604 ldr r3, [r12, ACT_KPTW_TTB] ;\
605 mcr p15, 0, r3, c2, c0, 0 ;\
606 mov r3, #0 ;\
607 mcr p15, 0, r3, c13, c0, 1 ;\
608 isb
609 #else
610 #define COPYIO_UNMAP_USER() \
611 mrc p15, 0, r12, c13, c0, 4
612 #endif
613
614 #define COPYIO_RESTORE_RECOVER() \
615 /* restore the recovery address */ ;\
616 str r4, [r12, TH_RECOVER] ;\
617 ldmfd sp!, { r4, r5, r6 }
618
619 /*
620 * int copyinstr(
621 * const user_addr_t user_addr,
622 * char *kernel_addr,
623 * vm_size_t max,
624 * vm_size_t *actual)
625 */
626 .text
627 .align 2
628 .globl EXT(copyinstr)
629 LEXT(copyinstr)
630 stmfd sp!, { r4, r5, r6 }
631
632 mov r6, r3
633 add r3, r0, r2 // user_addr + max
634 cmp r3, KERNELBASE // Check KERNELBASE < user_addr + max
635 bhs copyinstr_param_error // Drop out if it is
636 cmp r3, r0 // Check we're copying from user space
637 bcc copyinstr_param_error // Drop out if we aren't
638 adr r3, copyinstr_error // Get address for recover
639 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
640 ldr r4, [r12, TH_RECOVER] ;\
641 str r3, [r12, TH_RECOVER]
642 COPYIO_MAP_USER()
643 mov r12, #0 // Number of bytes copied so far
644 cmp r2, #0
645 beq copyinstr_too_long
646 copyinstr_loop:
647 ldrb r3, [r0], #1 // Load a byte from the source (user)
648 strb r3, [r1], #1 // Store a byte to the destination (kernel)
649 add r12, r12, #1
650 cmp r3, #0
651 beq copyinstr_done
652 cmp r12, r2 // Room to copy more bytes?
653 bne copyinstr_loop
654 //
655 // Ran out of space in the destination buffer, so return ENAMETOOLONG.
656 //
657 copyinstr_too_long:
658 mov r3, #ENAMETOOLONG
659 copyinstr_done:
660 //
661 // When we get here, we have finished copying the string. We came here from
662 // either the "beq copyinstr_done" above, in which case r4 == 0 (which is also
663 // the function result for success), or falling through from copyinstr_too_long,
664 // in which case r4 == ENAMETOOLONG.
665 //
666 str r12, [r6] // Save the count for actual
667 mov r0, r3 // Return error code from r3
668 copyinstr_exit:
669 COPYIO_UNMAP_USER()
670 str r4, [r12, TH_RECOVER]
671 copyinstr_exit2:
672 ldmfd sp!, { r4, r5, r6 }
673 bx lr
674
675 copyinstr_error:
676 /* set error, exit routine */
677 mov r0, #EFAULT
678 b copyinstr_exit
679
680 copyinstr_param_error:
681 /* set error, exit routine */
682 mov r0, #EFAULT
683 b copyinstr_exit2
684
685 /*
686 * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
687 */
688 .text
689 .align 2
690 .globl EXT(copyin)
691 LEXT(copyin)
692 COPYIO_HEADER(r0,copyio_kernel)
693 COPYIO_VALIDATE(copyin,4096)
694 COPYIO_SET_RECOVER()
695 COPYIO_MAP_USER()
696 COPYIO_BODY copyin
697 COPYIO_UNMAP_USER()
698 COPYIO_RESTORE_RECOVER()
699 bx lr
700
701 /*
702 * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
703 */
704 .text
705 .align 2
706 .globl EXT(copyout)
707 LEXT(copyout)
708 COPYIO_HEADER(r1,copyio_kernel)
709 COPYIO_VALIDATE(copyout,4096)
710 COPYIO_SET_RECOVER()
711 COPYIO_MAP_USER()
712 COPYIO_BODY copyout
713 COPYIO_UNMAP_USER()
714 COPYIO_RESTORE_RECOVER()
715 bx lr
716
717
718 /*
719 * int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes)
720 */
721 .text
722 .align 2
723 .globl EXT(copyin_word)
724 LEXT(copyin_word)
725 cmp r2, #4 // Test if size is 4 or 8
726 cmpne r2, #8
727 bne L_copyin_invalid
728 sub r3, r2, #1
729 tst r0, r3 // Test alignment of user address
730 bne L_copyin_invalid
731
732 COPYIO_HEADER(r0,L_copyin_word_fault)
733 COPYIO_SET_RECOVER()
734 COPYIO_MAP_USER()
735
736 mov r3, #0 // Clear high register
737 cmp r2, #4 // If size is 4
738 ldreq r2, [r0] // Load word from user
739 ldrdne r2, r3, [r0] // Else Load double word from user
740 stm r1, {r2, r3} // Store to kernel_addr
741 mov r0, #0 // Success
742
743 COPYIO_UNMAP_USER()
744 COPYIO_RESTORE_RECOVER()
745 bx lr
746 L_copyin_invalid:
747 mov r0, #EINVAL
748 bx lr
749 L_copyin_word_fault:
750 mov r0, #EFAULT
751 bx lr
752
753
754 copyio_error:
755 mov r0, #EFAULT
756 COPYIO_UNMAP_USER()
757 str r4, [r12, TH_RECOVER]
758 ldmfd sp!, { r4, r5, r6 }
759 bx lr
760
761 /*
762 * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
763 */
764 .text
765 .align 2
766 .globl EXT(copyin_kern)
767 LEXT(copyin_kern)
768 COPYIO_HEADER_KERN()
769 b bypass_check
770
771 /*
772 * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
773 */
774 .text
775 .align 2
776 .globl EXT(copyout_kern)
777 LEXT(copyout_kern)
778 COPYIO_HEADER_KERN()
779 b bypass_check
780
781 copyio_kernel_error:
782 mov r0, #EFAULT
783 bx lr
784
785 copyio_kernel:
786 /* if (current_thread()->map->pmap != kernel_pmap) return EFAULT */
787 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
788 ldr r3, [r12, ACT_MAP]
789 ldr r3, [r3, MAP_PMAP]
790 LOAD_ADDR(ip, kernel_pmap_store)
791 cmp r3, ip
792 bne copyio_kernel_error
793
794 bypass_check:
795 stmfd sp!, { r5, r6 }
796 COPYIO_BODY copyio_kernel
797 ldmfd sp!, { r5, r6 }
798 bx lr
799
800 /*
801 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr)
802 *
803 * Safely copy eight bytes (the fixed top of an ARM frame) from
804 * either user or kernel memory.
805 */
806 .text
807 .align 2
808 .globl EXT(copyinframe)
809 LEXT(copyinframe)
810 COPYIO_SET_RECOVER()
811 COPYIO_MAP_USER()
812 ldmia r0, {r2, r3}
813 stmia r1, {r2, r3}
814 b Lcopyin_noerror
815
816 /*
817 * uint32_t arm_debug_read_dscr(void)
818 */
819 .text
820 .align 2
821 .globl EXT(arm_debug_read_dscr)
822 LEXT(arm_debug_read_dscr)
823 #if __ARM_DEBUG__ >= 6
824 mrc p14, 0, r0, c0, c1
825 #else
826 mov r0, #0
827 #endif
828 bx lr
829
830 /*
831 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
832 *
833 * Set debug registers to match the current thread state
834 * (NULL to disable). Assume 6 breakpoints and 2
835 * watchpoints, since that has been the case in all cores
836 * thus far.
837 */
838 .text
839 .align 2
840 .globl EXT(arm_debug_set_cp14)
841 LEXT(arm_debug_set_cp14)
842 #if __ARM_DEBUG__ >= 6
843 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
844 ldr r2, [r1, ACT_CPUDATAP] // Get current cpu
845 str r0, [r2, CPU_USER_DEBUG] // Set current user debug
846
847 // Lock the debug registers
848 movw ip, #0xCE55
849 movt ip, #0xC5AC
850 mcr p14, 0, ip, c1, c0, 4
851
852 // enable monitor mode (needed to set and use debug registers)
853 mrc p14, 0, ip, c0, c1, 0
854 orr ip, ip, #0x8000 // set MDBGen = 1
855 #if __ARM_DEBUG__ >= 7
856 mcr p14, 0, ip, c0, c2, 2
857 #else
858 mcr p14, 0, ip, c0, c1, 0
859 #endif
860 // first turn off all breakpoints/watchpoints
861 mov r1, #0
862 mcr p14, 0, r1, c0, c0, 5 // BCR0
863 mcr p14, 0, r1, c0, c1, 5 // BCR1
864 mcr p14, 0, r1, c0, c2, 5 // BCR2
865 mcr p14, 0, r1, c0, c3, 5 // BCR3
866 mcr p14, 0, r1, c0, c4, 5 // BCR4
867 mcr p14, 0, r1, c0, c5, 5 // BCR5
868 mcr p14, 0, r1, c0, c0, 7 // WCR0
869 mcr p14, 0, r1, c0, c1, 7 // WCR1
870 // if (debug_state == NULL) disable monitor mode and return;
871 cmp r0, #0
872 biceq ip, ip, #0x8000 // set MDBGen = 0
873 #if __ARM_DEBUG__ >= 7
874 mcreq p14, 0, ip, c0, c2, 2
875 #else
876 mcreq p14, 0, ip, c0, c1, 0
877 #endif
878 bxeq lr
879 ldmia r0!, {r1, r2, r3, ip}
880 mcr p14, 0, r1, c0, c0, 4 // BVR0
881 mcr p14, 0, r2, c0, c1, 4 // BVR1
882 mcr p14, 0, r3, c0, c2, 4 // BVR2
883 mcr p14, 0, ip, c0, c3, 4 // BVR3
884 ldmia r0!, {r1, r2}
885 mcr p14, 0, r1, c0, c4, 4 // BVR4
886 mcr p14, 0, r2, c0, c5, 4 // BVR5
887 add r0, r0, #40 // advance to bcr[0]
888 ldmia r0!, {r1, r2, r3, ip}
889 mcr p14, 0, r1, c0, c0, 5 // BCR0
890 mcr p14, 0, r2, c0, c1, 5 // BCR1
891 mcr p14, 0, r3, c0, c2, 5 // BCR2
892 mcr p14, 0, ip, c0, c3, 5 // BCR3
893 ldmia r0!, {r1, r2}
894 mcr p14, 0, r1, c0, c4, 5 // BCR4
895 mcr p14, 0, r2, c0, c5, 5 // BCR5
896 add r0, r0, #40 // advance to wvr[0]
897 ldmia r0!, {r1, r2}
898 mcr p14, 0, r1, c0, c0, 6 // WVR0
899 mcr p14, 0, r2, c0, c1, 6 // WVR1
900 add r0, r0, #56 // advance to wcr[0]
901 ldmia r0!, {r1, r2}
902 mcr p14, 0, r1, c0, c0, 7 // WCR0
903 mcr p14, 0, r2, c0, c1, 7 // WCR1
904
905 // Unlock debug registers
906 mov ip, #0
907 mcr p14, 0, ip, c1, c0, 4
908 #endif
909 bx lr
910
911 /*
912 * void fiq_context_init(boolean_t enable_fiq)
913 */
914 .text
915 .align 2
916 .globl EXT(fiq_context_init)
917 LEXT(fiq_context_init)
918 mrs r3, cpsr // Save current CPSR
919 cmp r0, #0 // Test enable_fiq
920 bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE
921 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
922 ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data
923
924 #if __ARM_TIME__
925 /* Despite the fact that we use the physical timebase
926 * register as the basis for time on our platforms, we
927 * end up using the virtual timer in order to manage
928 * deadlines. This is due to the fact that for our
929 * current platforms, the interrupt generated by the
930 * physical timer is not hooked up to anything, and is
931 * therefore dropped on the floor. Therefore, for
932 * timers to function they MUST be based on the virtual
933 * timer.
934 */
935
936 mov r0, #1 // Enable Timer
937 mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL
938
939 /* Enable USER access to the physical timebase (PL0PCTEN).
940 * The rationale for providing access to the physical
941 * timebase being that the virtual timebase is broken for
942 * some platforms. Maintaining the offset ourselves isn't
943 * expensive, so mandate that the userspace implementation
944 * do timebase_phys+offset rather than trying to propogate
945 * all of the informaiton about what works up to USER.
946 */
947 mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0])
948
949 #else /* ! __ARM_TIME__ */
950 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled
951 mov r8, r2 // Load the BootCPUData address
952 ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address
953 ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address
954 ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value
955 #endif /* __ARM_TIME__ */
956
957 msr cpsr_c, r3 // Restore saved CPSR
958 bx lr
959
960 /*
961 * void reenable_async_aborts(void)
962 */
963 .text
964 .align 2
965 .globl EXT(reenable_async_aborts)
966 LEXT(reenable_async_aborts)
967 cpsie a // Re-enable async aborts
968 bx lr
969
970 /*
971 * uint64_t ml_get_timebase(void)
972 */
973 .text
974 .align 2
975 .globl EXT(ml_get_timebase)
976 LEXT(ml_get_timebase)
977 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
978 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
979 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__
980 isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
981 1:
982 mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1
983 mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0
984 mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2
985 cmp r1, r2
986 bne 1b // Loop until both high values are the same
987
988 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
989 ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to
990 adds r0, r0, r2 // convert to
991 ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time
992 adc r1, r1, r2 //
993 #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */
994 1:
995 ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
996 ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value
997 ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
998 cmp r1, r2 // Make sure TB has not rolled over
999 bne 1b
1000 #endif /* __ARM_TIME__ */
1001 bx lr // return
1002
1003
1004 /*
1005 * uint32_t ml_get_decrementer(void)
1006 */
1007 .text
1008 .align 2
1009 .globl EXT(ml_get_decrementer)
1010 LEXT(ml_get_decrementer)
1011 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1012 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1013 ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func
1014 cmp r2, #0
1015 bxne r2 // Call it if there is one
1016 #if __ARM_TIME__
1017 mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL)
1018 #else
1019 ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value
1020 #endif
1021 bx lr // return
1022
1023
1024 /*
1025 * void ml_set_decrementer(uint32_t dec_value)
1026 */
1027 .text
1028 .align 2
1029 .globl EXT(ml_set_decrementer)
1030 LEXT(ml_set_decrementer)
1031 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1032 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1033 ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func
1034 cmp r2, #0
1035 bxne r2 // Call it if there is one
1036 #if __ARM_TIME__
1037 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1038 mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL)
1039 #else
1040 mrs r2, cpsr // Save current CPSR
1041 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled.
1042 mov r12, r0 // Set the DEC value
1043 str r12, [r8, CPU_DECREMENTER] // Store DEC
1044 msr cpsr_c, r2 // Restore saved CPSR
1045 #endif
1046 bx lr
1047
1048
1049 /*
1050 * boolean_t ml_get_interrupts_enabled(void)
1051 */
1052 .text
1053 .align 2
1054 .globl EXT(ml_get_interrupts_enabled)
1055 LEXT(ml_get_interrupts_enabled)
1056 mrs r2, cpsr
1057 mov r0, #1
1058 bic r0, r0, r2, lsr #PSR_IRQFb
1059 bx lr
1060
1061 /*
1062 * Platform Specific Timebase & Decrementer Functions
1063 *
1064 */
1065
1066 #if defined(ARM_BOARD_CLASS_S7002)
1067 .text
1068 .align 2
1069 .globl EXT(fleh_fiq_s7002)
1070 LEXT(fleh_fiq_s7002)
1071 str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt
1072 mvn r13, #0
1073 str r13, [r8, CPU_DECREMENTER]
1074 b EXT(fleh_dec)
1075
1076 .text
1077 .align 2
1078 .globl EXT(s7002_get_decrementer)
1079 LEXT(s7002_get_decrementer)
1080 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1081 add ip, ip, #PMGR_INTERVAL_TMR_OFFSET
1082 ldr r0, [ip] // Get the Decrementer
1083 bx lr
1084
1085 .text
1086 .align 2
1087 .globl EXT(s7002_set_decrementer)
1088 LEXT(s7002_set_decrementer)
1089 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1090 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1091 str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer
1092 bx lr
1093 #endif /* defined(ARM_BOARD_CLASS_S7002) */
1094
1095 #if defined(ARM_BOARD_CLASS_T8002)
1096 .text
1097 .align 2
1098 .globl EXT(fleh_fiq_t8002)
1099 LEXT(fleh_fiq_t8002)
1100 mov r13, #kAICTmrIntStat
1101 str r11, [r10, r13] // Clear the decrementer interrupt
1102 mvn r13, #0
1103 str r13, [r8, CPU_DECREMENTER]
1104 b EXT(fleh_dec)
1105
1106 .text
1107 .align 2
1108 .globl EXT(t8002_get_decrementer)
1109 LEXT(t8002_get_decrementer)
1110 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1111 mov r0, #kAICTmrCnt
1112 add ip, ip, r0
1113 ldr r0, [ip] // Get the Decrementer
1114 bx lr
1115
1116 .text
1117 .align 2
1118 .globl EXT(t8002_set_decrementer)
1119 LEXT(t8002_set_decrementer)
1120 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1121 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1122 mov r5, #kAICTmrCnt
1123 str r0, [ip, r5] // Store the new Decrementer
1124 bx lr
1125 #endif /* defined(ARM_BOARD_CLASS_T8002) */
1126
1127 LOAD_ADDR_GEN_DEF(kernel_pmap_store)
1128
1129 #include "globals_asm.h"
1130
1131 /* vim: set ts=4: */