]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/machine_routines_asm.s
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines_asm.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm/proc_reg.h>
31#include <arm/pmap.h>
32#include <sys/errno.h>
33#include "assym.s"
34
35 .align 2
36 .globl EXT(machine_set_current_thread)
37LEXT(machine_set_current_thread)
ea3f0419
A
38 ldr r1, [r0, ACT_CPUDATAP]
39 str r0, [r1, CPU_ACTIVE_THREAD]
5ba3f43e
A
40 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
41 ldr r1, [r0, TH_CTH_SELF]
42 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
43 and r2, r2, #3 // Extract cpu number
44 orr r1, r1, r2 //
45 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
94ff46dc 46 mov r1, #0
5ba3f43e
A
47 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
48 bx lr
49
50/*
51 * void machine_idle(void)
52 */
53 .text
54 .align 2
55 .globl EXT(machine_idle)
56LEXT(machine_idle)
57 cpsid if // Disable FIQ IRQ
58 mov ip, lr
59 bl EXT(Idle_context)
60 mov lr, ip
61 cpsie if // Enable FIQ IRQ
62 bx lr
63
64/*
65 * void cpu_idle_wfi(boolean_t wfi_fast):
66 * cpu_idle is the only function that should call this.
67 */
68 .text
69 .align 2
70 .globl EXT(cpu_idle_wfi)
71LEXT(cpu_idle_wfi)
72 mov r1, #32
73 mov r2, #1200
74 cmp r0, #0
75 beq 3f
76 mov r1, #1
77 b 2f
78 .align 5
791:
80 add r0, r0, #1
81 mov r1, r2
822:
83
84/*
85 * We export the address of the WFI instruction so that it can be patched; this will be
86 * ugly from a debugging perspective.
87 */
88
89#if (__ARM_ARCH__ >= 7)
90 dsb
91 .globl EXT(wfi_inst)
92LEXT(wfi_inst)
93 wfi
94#else
95 mcr p15, 0, r0, c7, c10, 4
96 .globl EXT(wfi_inst)
97LEXT(wfi_inst)
98 mcr p15, 0, r0, c7, c0, 4
99#endif
1003:
101 subs r1, r1, #1
102 bne 3b
103 nop
104 nop
105 nop
106 nop
107 nop
108 cmp r0, #0
109 beq 1b
110 bx lr
111
112 .align 2
113 .globl EXT(timer_grab)
114LEXT(timer_grab)
1150:
116 ldr r2, [r0, TIMER_HIGH]
117 ldr r3, [r0, TIMER_LOW]
118#if __ARM_SMP__
119 dmb ish // dmb ish
120#endif
121 ldr r1, [r0, TIMER_HIGHCHK]
122 cmp r1, r2
123 bne 0b
124 mov r0, r3
125 bx lr
126
127 .align 2
d9a64523
A
128 .globl EXT(timer_advance_internal_32)
129LEXT(timer_advance_internal_32)
5ba3f43e
A
130 str r1, [r0, TIMER_HIGHCHK]
131#if __ARM_SMP__
132 dmb ish // dmb ish
133#endif
134 str r2, [r0, TIMER_LOW]
135#if __ARM_SMP__
136 dmb ish // dmb ish
137#endif
138 str r1, [r0, TIMER_HIGH]
139 bx lr
140
141 .align 2
142 .globl EXT(get_vfp_enabled)
143LEXT(get_vfp_enabled)
144#if __ARM_VFP__
145 fmrx r0, fpexc
146 and r1, r0, #FPEXC_EN // Extact vfp enable previous state
147 mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled
148#else
149 mov r0, #0 // return false
150#endif
151 bx lr
152
153/* This is no longer useful (but is exported, so this may require kext cleanup). */
154 .align 2
155 .globl EXT(enable_kernel_vfp_context)
156LEXT(enable_kernel_vfp_context)
157 bx lr
158
159/* uint32_t get_fpscr(void):
160 * Returns the current state of the FPSCR register.
161 */
162 .align 2
163 .globl EXT(get_fpscr)
164LEXT(get_fpscr)
165#if __ARM_VFP__
166 fmrx r0, fpscr
167#endif
168 bx lr
169 .align 2
170 .globl EXT(set_fpscr)
171/* void set_fpscr(uint32_t value):
172 * Set the FPSCR register.
173 */
174LEXT(set_fpscr)
175#if __ARM_VFP__
176 fmxr fpscr, r0
177#else
178 mov r0, #0
179#endif
180 bx lr
181
5ba3f43e
A
182/*
183 * void OSSynchronizeIO(void)
184 */
185 .text
186 .align 2
187 .globl EXT(OSSynchronizeIO)
188LEXT(OSSynchronizeIO)
189 .align 2
190 dsb
191 bx lr
192
d9a64523
A
193.macro SYNC_TLB_FLUSH
194 dsb ish
195 isb
196.endmacro
197
5ba3f43e 198/*
d9a64523 199 * void sync_tlb_flush
5ba3f43e 200 *
d9a64523 201 * Synchronize one or more prior TLB flush operations
5ba3f43e
A
202 */
203 .text
204 .align 2
d9a64523
A
205 .globl EXT(sync_tlb_flush)
206LEXT(sync_tlb_flush)
207 SYNC_TLB_FLUSH
208 bx lr
209
210.macro FLUSH_MMU_TLB
5ba3f43e
A
211 mov r0, #0
212#if __ARM_SMP__
213 mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs
214#else
215 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
216#endif
d9a64523
A
217.endmacro
218
219/*
220 * void flush_mmu_tlb_async(void)
221 *
222 * Flush all TLBs, don't wait for completion
223 */
224 .text
225 .align 2
226 .globl EXT(flush_mmu_tlb_async)
227LEXT(flush_mmu_tlb_async)
228 FLUSH_MMU_TLB
229 bx lr
230
231/*
232 * void flush_mmu_tlb(void)
233 *
234 * Flush all TLBs
235 */
236 .text
237 .align 2
238 .globl EXT(flush_mmu_tlb)
239LEXT(flush_mmu_tlb)
240 FLUSH_MMU_TLB
241 SYNC_TLB_FLUSH
242 bx lr
243
244.macro FLUSH_CORE_TLB
245 mov r0, #0
246 mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB
247.endmacro
248
249/*
250 *
251 * void flush_core_tlb_async(void)
252 *
253 * Flush local core's TLB, don't wait for completion
254 */
255 .text
256 .align 2
257 .globl EXT(flush_core_tlb_async)
258LEXT(flush_core_tlb_async)
259 FLUSH_CORE_TLB
260 bx lr
5ba3f43e
A
261
262/*
263 * void flush_core_tlb(void)
264 *
d9a64523 265 * Flush local core's TLB
5ba3f43e
A
266 */
267 .text
268 .align 2
269 .globl EXT(flush_core_tlb)
270LEXT(flush_core_tlb)
d9a64523
A
271 FLUSH_CORE_TLB
272 SYNC_TLB_FLUSH
273 bx lr
274
275.macro FLUSH_MMU_TLB_ENTRY
276#if __ARM_SMP__
277 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry
278#else
279 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
280#endif
281.endmacro
282/*
283 * void flush_mmu_tlb_entry_async(uint32_t)
284 *
285 * Flush TLB entry, don't wait for completion
286 */
287 .text
288 .align 2
289 .globl EXT(flush_mmu_tlb_entry_async)
290LEXT(flush_mmu_tlb_entry_async)
291 FLUSH_MMU_TLB_ENTRY
292 bx lr
5ba3f43e
A
293
294/*
295 * void flush_mmu_tlb_entry(uint32_t)
296 *
297 * Flush TLB entry
298 */
299 .text
300 .align 2
301 .globl EXT(flush_mmu_tlb_entry)
302LEXT(flush_mmu_tlb_entry)
d9a64523
A
303 FLUSH_MMU_TLB_ENTRY
304 SYNC_TLB_FLUSH
305 bx lr
306
307.macro FLUSH_MMU_TLB_ENTRIES
3081:
5ba3f43e 309#if __ARM_SMP__
d9a64523 310 mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry
5ba3f43e
A
311#else
312 mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry
313#endif
d9a64523
A
314 add r0, r0, ARM_PGBYTES // Increment to the next page
315 cmp r0, r1 // Loop if current address < end address
316 blt 1b
317.endmacro
318
319/*
320 * void flush_mmu_tlb_entries_async(uint32_t, uint32_t)
321 *
322 * Flush TLB entries for address range, don't wait for completion
323 */
324 .text
325 .align 2
326 .globl EXT(flush_mmu_tlb_entries_async)
327LEXT(flush_mmu_tlb_entries_async)
328 FLUSH_MMU_TLB_ENTRIES
329 bx lr
5ba3f43e
A
330
331/*
332 * void flush_mmu_tlb_entries(uint32_t, uint32_t)
333 *
d9a64523 334 * Flush TLB entries for address range
5ba3f43e
A
335 */
336 .text
337 .align 2
338 .globl EXT(flush_mmu_tlb_entries)
339LEXT(flush_mmu_tlb_entries)
d9a64523
A
340 FLUSH_MMU_TLB_ENTRIES
341 SYNC_TLB_FLUSH
342 bx lr
343
344
345.macro FLUSH_MMU_TLB_MVA_ENTRIES
5ba3f43e 346#if __ARM_SMP__
d9a64523 347 mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva
5ba3f43e 348#else
d9a64523 349 mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva
5ba3f43e 350#endif
d9a64523 351.endmacro
5ba3f43e 352
d9a64523
A
353/*
354 * void flush_mmu_tlb_mva_entries_async(uint32_t)
355 *
356 * Flush TLB entries for mva, don't wait for completion
357 */
358 .text
359 .align 2
360 .globl EXT(flush_mmu_tlb_mva_entries_async)
361LEXT(flush_mmu_tlb_mva_entries_async)
362 FLUSH_MMU_TLB_MVA_ENTRIES
363 bx lr
5ba3f43e
A
364
365/*
d9a64523 366 * void flush_mmu_tlb_mva_entries_async(uint32_t)
5ba3f43e
A
367 *
368 * Flush TLB entries for mva
369 */
370 .text
371 .align 2
372 .globl EXT(flush_mmu_tlb_mva_entries)
373LEXT(flush_mmu_tlb_mva_entries)
d9a64523
A
374 FLUSH_MMU_TLB_MVA_ENTRIES
375 SYNC_TLB_FLUSH
376 bx lr
377
378.macro FLUSH_MMU_TLB_ASID
5ba3f43e 379#if __ARM_SMP__
d9a64523 380 mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid
5ba3f43e 381#else
d9a64523 382 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
5ba3f43e 383#endif
d9a64523
A
384.endmacro
385
386/*
387 * void flush_mmu_tlb_asid_async(uint32_t)
388 *
389 * Flush TLB entries for asid, don't wait for completion
390 */
391 .text
392 .align 2
393 .globl EXT(flush_mmu_tlb_asid_async)
394LEXT(flush_mmu_tlb_asid_async)
395 FLUSH_MMU_TLB_ASID
396 bx lr
5ba3f43e
A
397
398/*
399 * void flush_mmu_tlb_asid(uint32_t)
400 *
d9a64523 401 * Flush TLB entries for asid
5ba3f43e
A
402 */
403 .text
404 .align 2
405 .globl EXT(flush_mmu_tlb_asid)
406LEXT(flush_mmu_tlb_asid)
d9a64523
A
407 FLUSH_MMU_TLB_ASID
408 SYNC_TLB_FLUSH
409 bx lr
410
411.macro FLUSH_CORE_TLB_ASID
5ba3f43e 412 mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid
d9a64523
A
413.endmacro
414
415/*
416 * void flush_core_tlb_asid_async(uint32_t)
417 *
418 * Flush local core TLB entries for asid, don't wait for completion
419 */
420 .text
421 .align 2
422 .globl EXT(flush_core_tlb_asid_async)
423LEXT(flush_core_tlb_asid_async)
424 FLUSH_CORE_TLB_ASID
425 bx lr
5ba3f43e
A
426
427/*
428 * void flush_core_tlb_asid(uint32_t)
429 *
d9a64523 430 * Flush local core TLB entries for asid
5ba3f43e
A
431 */
432 .text
433 .align 2
434 .globl EXT(flush_core_tlb_asid)
435LEXT(flush_core_tlb_asid)
d9a64523
A
436 FLUSH_CORE_TLB_ASID
437 SYNC_TLB_FLUSH
438 bx lr
5ba3f43e
A
439
440/*
441 * Set MMU Translation Table Base
442 */
443 .text
444 .align 2
445 .globl EXT(set_mmu_ttb)
446LEXT(set_mmu_ttb)
447 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
448 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
449 mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0
450 dsb ish
451 isb
452 bx lr
453
454/*
455 * Set MMU Translation Table Base Alternate
456 */
457 .text
458 .align 2
459 .globl EXT(set_mmu_ttb_alternate)
460LEXT(set_mmu_ttb_alternate)
461 orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute
462 orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute
463 mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1
464 dsb ish
465 isb
466 bx lr
467
468/*
469 * Set MMU Translation Table Base
470 */
471 .text
472 .align 2
473 .globl EXT(get_mmu_ttb)
474LEXT(get_mmu_ttb)
475 mrc p15, 0, r0, c2, c0, 0 // translation table to r0
476 isb
477 bx lr
478
479/*
480 * get MMU control register
481 */
482 .text
483 .align 2
484 .globl EXT(get_aux_control)
485LEXT(get_aux_control)
486 mrc p15, 0, r0, c1, c0, 1 // read aux control into r0
487 bx lr // return old bits in r0
488
489/*
490 * set MMU control register
491 */
492 .text
493 .align 2
494 .globl EXT(set_aux_control)
495LEXT(set_aux_control)
496 mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control
497 isb
498 bx lr
499
500
501/*
502 * get MMU control register
503 */
504 .text
505 .align 2
506 .globl EXT(get_mmu_control)
507LEXT(get_mmu_control)
508 mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0
509 bx lr // return old bits in r0
510
511/*
512 * set MMU control register
513 */
514 .text
515 .align 2
516 .globl EXT(set_mmu_control)
517LEXT(set_mmu_control)
518 mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control
519 isb
520 bx lr
521
522/*
523 * MMU kernel virtual to physical address translation
524 */
525 .text
526 .align 2
527 .globl EXT(mmu_kvtop)
528LEXT(mmu_kvtop)
529 mrs r3, cpsr // Read cpsr
530 cpsid if // Disable FIQ IRQ
531 mov r1, r0
532 mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR
533 isb
534 mrc p15, 0, r0, c7, c4, 0 // Read PAR
535 ands r2, r0, #0x1 // Test conversion aborted
536 bne mmu_kvtophys_fail
537 ands r2, r0, #0x2 // Test super section
538 mvnne r2, #0xFF000000
539 moveq r2, #0x000000FF
540 orreq r2, r2, #0x00000F00
541 bics r0, r0, r2 // Clear lower bits
542 beq mmu_kvtophys_fail
543 and r1, r1, r2
544 orr r0, r0, r1
545 b mmu_kvtophys_ret
546mmu_kvtophys_fail:
547 mov r0, #0
548mmu_kvtophys_ret:
549 msr cpsr, r3 // Restore cpsr
550 bx lr
551
552/*
553 * MMU user virtual to physical address translation
554 */
555 .text
556 .align 2
557 .globl EXT(mmu_uvtop)
558LEXT(mmu_uvtop)
559 mrs r3, cpsr // Read cpsr
560 cpsid if // Disable FIQ IRQ
561 mov r1, r0
562 mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR
563 isb
564 mrc p15, 0, r0, c7, c4, 0 // Read PAR
565 ands r2, r0, #0x1 // Test conversion aborted
566 bne mmu_uvtophys_fail
567 ands r2, r0, #0x2 // Test super section
568 mvnne r2, #0xFF000000
569 moveq r2, #0x000000FF
570 orreq r2, r2, #0x00000F00
571 bics r0, r0, r2 // Clear lower bits
572 beq mmu_uvtophys_fail
573 and r1, r1, r2
574 orr r0, r0, r1
575 b mmu_uvtophys_ret
576mmu_uvtophys_fail:
577 mov r0, #0
578mmu_uvtophys_ret:
579 msr cpsr, r3 // Restore cpsr
580 bx lr
581
582/*
583 * MMU kernel virtual to physical address preflight write access
584 */
585 .text
586 .align 2
587 .globl EXT(mmu_kvtop_wpreflight)
588LEXT(mmu_kvtop_wpreflight)
589 mrs r3, cpsr // Read cpsr
590 cpsid if // Disable FIQ IRQ
591 mov r1, r0
592 mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW
593 isb
594 mrc p15, 0, r0, c7, c4, 0 // Read PAR
595 ands r2, r0, #0x1 // Test conversion aborted
596 bne mmu_kvtophys_wpreflight_fail
597 ands r2, r0, #0x2 // Test super section
598 mvnne r2, #0xFF000000
599 moveq r2, #0x000000FF
600 orreq r2, r2, #0x00000F00
601 bics r0, r0, r2 // Clear lower bits
602 beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits
603 and r1, r1, r2
604 orr r0, r0, r1
605 b mmu_kvtophys_wpreflight_ret
606mmu_kvtophys_wpreflight_fail:
607 mov r0, #0
608mmu_kvtophys_wpreflight_ret:
609 msr cpsr, r3 // Restore cpsr
610 bx lr
611
612/*
613 * set context id register
614 */
615/*
616 * set context id register
617 */
618 .text
619 .align 2
620 .globl EXT(set_context_id)
621LEXT(set_context_id)
622 mcr p15, 0, r0, c13, c0, 1
623 isb
624 bx lr
625
cb323159
A
626/*
627 * arg0: prefix of the external validator function (copyin or copyout)
628 * arg1: 0-based index of highest argument register that must be preserved
629 */
630.macro COPYIO_VALIDATE
631 /* call NAME_validate to check the arguments */
632 push {r0-r$1, r7, lr}
633 add r7, sp, #(($1 + 1) * 4)
634 blx EXT($0_validate)
635 cmp r0, #0
636 addne sp, #(($1 + 1) * 4)
637 popne {r7, pc}
638 pop {r0-r$1, r7, lr}
639.endmacro
640
5ba3f43e
A
641
642#define COPYIO_SET_RECOVER() \
643 /* set recovery address */ ;\
644 stmfd sp!, { r4, r5, r6 } ;\
645 adr r3, copyio_error ;\
646 mrc p15, 0, r12, c13, c0, 4 ;\
647 ldr r4, [r12, TH_RECOVER] ;\
648 str r3, [r12, TH_RECOVER]
649
d9a64523
A
650#define COPYIO_TRY_KERNEL() \
651 /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\
652 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\
653 ldr r3, [r12, ACT_MAP] ;\
654 ldr r3, [r3, MAP_PMAP] ;\
655 LOAD_ADDR(ip, kernel_pmap_store) ;\
656 cmp r3, ip ;\
657 beq copyio_kern_body
658
5ba3f43e
A
659#if __ARM_USER_PROTECT__
660#define COPYIO_MAP_USER() \
661 /* disable interrupts to prevent expansion to 2GB at L1 ;\
662 * between loading ttep and storing it in ttbr0.*/ ;\
663 mrs r5, cpsr ;\
664 cpsid if ;\
665 ldr r3, [r12, ACT_UPTW_TTB] ;\
666 mcr p15, 0, r3, c2, c0, 0 ;\
667 msr cpsr, r5 ;\
668 ldr r3, [r12, ACT_ASID] ;\
669 mcr p15, 0, r3, c13, c0, 1 ;\
670 isb
671#else
672#define COPYIO_MAP_USER()
673#endif
674
d9a64523 675#define COPYIO_HEADER() ;\
5ba3f43e
A
676 /* test for zero len */ ;\
677 cmp r2, #0 ;\
678 moveq r0, #0 ;\
679 bxeq lr
680
681.macro COPYIO_BODY
682 /* if len is less than 16 bytes, just do a simple copy */
683 cmp r2, #16
684 blt L$0_bytewise
685 /* test for src and dest of the same word alignment */
686 orr r3, r0, r1
687 tst r3, #3
688 bne L$0_bytewise
689L$0_wordwise:
690 sub r2, r2, #16
691L$0_wordwise_loop:
692 /* 16 bytes at a time */
693 ldmia r0!, { r3, r5, r6, r12 }
694 stmia r1!, { r3, r5, r6, r12 }
695 subs r2, r2, #16
696 bge L$0_wordwise_loop
697 /* fixup the len and test for completion */
698 adds r2, r2, #16
699 beq L$0_noerror
700L$0_bytewise:
701 /* copy 2 bytes at a time */
702 subs r2, r2, #2
703 ldrb r3, [r0], #1
704 ldrbpl r12, [r0], #1
705 strb r3, [r1], #1
706 strbpl r12, [r1], #1
707 bhi L$0_bytewise
708L$0_noerror:
709 mov r0, #0
710.endmacro
711
712#if __ARM_USER_PROTECT__
713#define COPYIO_UNMAP_USER() \
714 mrc p15, 0, r12, c13, c0, 4 ;\
715 ldr r3, [r12, ACT_KPTW_TTB] ;\
716 mcr p15, 0, r3, c2, c0, 0 ;\
717 mov r3, #0 ;\
718 mcr p15, 0, r3, c13, c0, 1 ;\
719 isb
720#else
721#define COPYIO_UNMAP_USER() \
722 mrc p15, 0, r12, c13, c0, 4
723#endif
724
725#define COPYIO_RESTORE_RECOVER() \
726 /* restore the recovery address */ ;\
727 str r4, [r12, TH_RECOVER] ;\
728 ldmfd sp!, { r4, r5, r6 }
729
730/*
731 * int copyinstr(
732 * const user_addr_t user_addr,
733 * char *kernel_addr,
734 * vm_size_t max,
735 * vm_size_t *actual)
736 */
737 .text
738 .align 2
739 .globl EXT(copyinstr)
740LEXT(copyinstr)
d9a64523
A
741 cmp r2, #0
742 moveq r0, #ENAMETOOLONG
743 moveq r12, #0
744 streq r12, [r3]
745 bxeq lr
cb323159 746 COPYIO_VALIDATE copyin_user, 3
5ba3f43e
A
747 stmfd sp!, { r4, r5, r6 }
748
749 mov r6, r3
5ba3f43e
A
750 adr r3, copyinstr_error // Get address for recover
751 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
752 ldr r4, [r12, TH_RECOVER] ;\
753 str r3, [r12, TH_RECOVER]
754 COPYIO_MAP_USER()
755 mov r12, #0 // Number of bytes copied so far
5ba3f43e
A
756copyinstr_loop:
757 ldrb r3, [r0], #1 // Load a byte from the source (user)
758 strb r3, [r1], #1 // Store a byte to the destination (kernel)
759 add r12, r12, #1
760 cmp r3, #0
761 beq copyinstr_done
762 cmp r12, r2 // Room to copy more bytes?
763 bne copyinstr_loop
764//
765// Ran out of space in the destination buffer, so return ENAMETOOLONG.
766//
767copyinstr_too_long:
768 mov r3, #ENAMETOOLONG
769copyinstr_done:
770//
771// When we get here, we have finished copying the string. We came here from
d9a64523 772// either the "beq copyinstr_done" above, in which case r3 == 0 (which is also
5ba3f43e 773// the function result for success), or falling through from copyinstr_too_long,
d9a64523 774// in which case r3 == ENAMETOOLONG.
5ba3f43e
A
775//
776 str r12, [r6] // Save the count for actual
777 mov r0, r3 // Return error code from r3
778copyinstr_exit:
779 COPYIO_UNMAP_USER()
780 str r4, [r12, TH_RECOVER]
5ba3f43e
A
781 ldmfd sp!, { r4, r5, r6 }
782 bx lr
783
784copyinstr_error:
785 /* set error, exit routine */
786 mov r0, #EFAULT
787 b copyinstr_exit
788
5ba3f43e
A
789/*
790 * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
791 */
792 .text
793 .align 2
794 .globl EXT(copyin)
795LEXT(copyin)
d9a64523 796 COPYIO_HEADER()
cb323159 797 COPYIO_VALIDATE copyin, 2
d9a64523 798 COPYIO_TRY_KERNEL()
5ba3f43e
A
799 COPYIO_SET_RECOVER()
800 COPYIO_MAP_USER()
801 COPYIO_BODY copyin
802 COPYIO_UNMAP_USER()
803 COPYIO_RESTORE_RECOVER()
804 bx lr
805
806/*
807 * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
808 */
809 .text
810 .align 2
811 .globl EXT(copyout)
812LEXT(copyout)
d9a64523 813 COPYIO_HEADER()
cb323159 814 COPYIO_VALIDATE copyout, 2
d9a64523 815 COPYIO_TRY_KERNEL()
5ba3f43e
A
816 COPYIO_SET_RECOVER()
817 COPYIO_MAP_USER()
818 COPYIO_BODY copyout
819 COPYIO_UNMAP_USER()
820 COPYIO_RESTORE_RECOVER()
821 bx lr
822
823
824/*
cb323159
A
825 * int copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
826 * r0: user_addr
827 * r1: kernel_addr
828 */
829 .text
830 .align 2
831 .globl EXT(copyin_atomic32)
832LEXT(copyin_atomic32)
833 tst r0, #3 // Test alignment of user address
834 bne 2f
835
836 mov r2, #4
837 COPYIO_VALIDATE copyin_user, 1
838 COPYIO_SET_RECOVER()
839 COPYIO_MAP_USER()
840
841 ldr r2, [r0] // Load word from user
842 str r2, [r1] // Store to kernel_addr
843 mov r0, #0 // Success
844
845 COPYIO_UNMAP_USER()
846 COPYIO_RESTORE_RECOVER()
847 bx lr
8482: // misaligned copyin
849 mov r0, #EINVAL
850 bx lr
851
852/*
853 * int copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
854 * r0: user_addr
855 * r1: value
856 */
857 .text
858 .align 2
859 .globl EXT(copyin_atomic32_wait_if_equals)
860LEXT(copyin_atomic32_wait_if_equals)
861 tst r0, #3 // Test alignment of user address
862 bne 2f
863
864 mov r2, r0
865 mov r3, #4
866 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
867 COPYIO_SET_RECOVER()
868 COPYIO_MAP_USER()
869
870 ldrex r2, [r0]
871 cmp r2, r1
872 movne r0, ESTALE
873 bne 1f
874 mov r0, #0
875 wfe
8761:
877 clrex
878
879 COPYIO_UNMAP_USER()
880 COPYIO_RESTORE_RECOVER()
881 bx lr
8822: // misaligned copyin
883 mov r0, #EINVAL
884 bx lr
885
886/*
887 * int copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
888 * r0: user_addr
889 * r1: kernel_addr
5ba3f43e
A
890 */
891 .text
892 .align 2
cb323159
A
893 .globl EXT(copyin_atomic64)
894LEXT(copyin_atomic64)
895 tst r0, #7 // Test alignment of user address
896 bne 2f
5ba3f43e 897
cb323159
A
898 mov r2, #8
899 COPYIO_VALIDATE copyin_user, 1
5ba3f43e
A
900 COPYIO_SET_RECOVER()
901 COPYIO_MAP_USER()
902
cb323159
A
9031: // ldrex/strex retry loop
904 ldrexd r2, r3, [r0] // Load double word from user
905 strexd r5, r2, r3, [r0] // (the COPYIO_*() macros make r5 safe to use as a scratch register here)
906 cmp r5, #0
907 bne 1b
5ba3f43e
A
908 stm r1, {r2, r3} // Store to kernel_addr
909 mov r0, #0 // Success
910
911 COPYIO_UNMAP_USER()
912 COPYIO_RESTORE_RECOVER()
913 bx lr
cb323159 9142: // misaligned copyin
5ba3f43e
A
915 mov r0, #EINVAL
916 bx lr
5ba3f43e
A
917
918
919copyio_error:
920 mov r0, #EFAULT
921 COPYIO_UNMAP_USER()
922 str r4, [r12, TH_RECOVER]
923 ldmfd sp!, { r4, r5, r6 }
924 bx lr
925
cb323159
A
926
927/*
928 * int copyout_atomic32(uint32_t value, user_addr_t user_addr)
929 * r0: value
930 * r1: user_addr
931 */
932 .text
933 .align 2
934 .globl EXT(copyout_atomic32)
935LEXT(copyout_atomic32)
936 tst r1, #3 // Test alignment of user address
937 bne 2f
938
939 mov r2, r1
940 mov r3, #4
941 COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3)
942 COPYIO_SET_RECOVER()
943 COPYIO_MAP_USER()
944
945 str r0, [r1] // Store word to user
946 mov r0, #0 // Success
947
948 COPYIO_UNMAP_USER()
949 COPYIO_RESTORE_RECOVER()
950 bx lr
9512: // misaligned copyout
952 mov r0, #EINVAL
953 bx lr
954
955
956/*
957 * int copyout_atomic64(uint64_t value, user_addr_t user_addr)
958 * r0, r1: value
959 * r2: user_addr
960 */
961 .text
962 .align 2
963 .globl EXT(copyout_atomic64)
964LEXT(copyout_atomic64)
965 tst r2, #7 // Test alignment of user address
966 bne 2f
967
968 mov r3, #8
969 COPYIO_VALIDATE copyio_user, 2 // validate user address (uses r2, r3)
970 COPYIO_SET_RECOVER()
971 COPYIO_MAP_USER()
972
9731: // ldrex/strex retry loop
974 ldrexd r4, r5, [r2]
975 strexd r3, r0, r1, [r2] // Atomically store double word to user
976 cmp r3, #0
977 bne 1b
978
979 mov r0, #0 // Success
980
981 COPYIO_UNMAP_USER()
982 COPYIO_RESTORE_RECOVER()
983 bx lr
9842: // misaligned copyout
985 mov r0, #EINVAL
986 bx lr
987
988
5ba3f43e
A
989/*
990 * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
991 */
992 .text
993 .align 2
994 .globl EXT(copyin_kern)
995LEXT(copyin_kern)
d9a64523
A
996 COPYIO_HEADER()
997 b copyio_kern_body
5ba3f43e
A
998
999/*
1000 * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
1001 */
1002 .text
1003 .align 2
1004 .globl EXT(copyout_kern)
1005LEXT(copyout_kern)
d9a64523
A
1006 COPYIO_HEADER()
1007 b copyio_kern_body
5ba3f43e 1008
d9a64523 1009copyio_kern_body:
5ba3f43e
A
1010 stmfd sp!, { r5, r6 }
1011 COPYIO_BODY copyio_kernel
1012 ldmfd sp!, { r5, r6 }
1013 bx lr
1014
1015/*
1016 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr)
1017 *
1018 * Safely copy eight bytes (the fixed top of an ARM frame) from
1019 * either user or kernel memory.
1020 */
1021 .text
1022 .align 2
1023 .globl EXT(copyinframe)
1024LEXT(copyinframe)
1025 COPYIO_SET_RECOVER()
1026 COPYIO_MAP_USER()
1027 ldmia r0, {r2, r3}
1028 stmia r1, {r2, r3}
1029 b Lcopyin_noerror
1030
1031/*
1032 * uint32_t arm_debug_read_dscr(void)
1033 */
1034 .text
1035 .align 2
1036 .globl EXT(arm_debug_read_dscr)
1037LEXT(arm_debug_read_dscr)
1038#if __ARM_DEBUG__ >= 6
1039 mrc p14, 0, r0, c0, c1
1040#else
1041 mov r0, #0
1042#endif
1043 bx lr
1044
1045/*
1046 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
1047 *
1048 * Set debug registers to match the current thread state
1049 * (NULL to disable). Assume 6 breakpoints and 2
1050 * watchpoints, since that has been the case in all cores
1051 * thus far.
1052 */
1053 .text
1054 .align 2
1055 .globl EXT(arm_debug_set_cp14)
1056LEXT(arm_debug_set_cp14)
1057#if __ARM_DEBUG__ >= 6
1058 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1059 ldr r2, [r1, ACT_CPUDATAP] // Get current cpu
1060 str r0, [r2, CPU_USER_DEBUG] // Set current user debug
1061
1062 // Lock the debug registers
1063 movw ip, #0xCE55
1064 movt ip, #0xC5AC
1065 mcr p14, 0, ip, c1, c0, 4
1066
1067 // enable monitor mode (needed to set and use debug registers)
1068 mrc p14, 0, ip, c0, c1, 0
1069 orr ip, ip, #0x8000 // set MDBGen = 1
1070#if __ARM_DEBUG__ >= 7
1071 mcr p14, 0, ip, c0, c2, 2
1072#else
1073 mcr p14, 0, ip, c0, c1, 0
1074#endif
1075 // first turn off all breakpoints/watchpoints
1076 mov r1, #0
1077 mcr p14, 0, r1, c0, c0, 5 // BCR0
1078 mcr p14, 0, r1, c0, c1, 5 // BCR1
1079 mcr p14, 0, r1, c0, c2, 5 // BCR2
1080 mcr p14, 0, r1, c0, c3, 5 // BCR3
1081 mcr p14, 0, r1, c0, c4, 5 // BCR4
1082 mcr p14, 0, r1, c0, c5, 5 // BCR5
1083 mcr p14, 0, r1, c0, c0, 7 // WCR0
1084 mcr p14, 0, r1, c0, c1, 7 // WCR1
1085 // if (debug_state == NULL) disable monitor mode and return;
1086 cmp r0, #0
1087 biceq ip, ip, #0x8000 // set MDBGen = 0
1088#if __ARM_DEBUG__ >= 7
1089 mcreq p14, 0, ip, c0, c2, 2
1090#else
1091 mcreq p14, 0, ip, c0, c1, 0
1092#endif
1093 bxeq lr
1094 ldmia r0!, {r1, r2, r3, ip}
1095 mcr p14, 0, r1, c0, c0, 4 // BVR0
1096 mcr p14, 0, r2, c0, c1, 4 // BVR1
1097 mcr p14, 0, r3, c0, c2, 4 // BVR2
1098 mcr p14, 0, ip, c0, c3, 4 // BVR3
1099 ldmia r0!, {r1, r2}
1100 mcr p14, 0, r1, c0, c4, 4 // BVR4
1101 mcr p14, 0, r2, c0, c5, 4 // BVR5
1102 add r0, r0, #40 // advance to bcr[0]
1103 ldmia r0!, {r1, r2, r3, ip}
1104 mcr p14, 0, r1, c0, c0, 5 // BCR0
1105 mcr p14, 0, r2, c0, c1, 5 // BCR1
1106 mcr p14, 0, r3, c0, c2, 5 // BCR2
1107 mcr p14, 0, ip, c0, c3, 5 // BCR3
1108 ldmia r0!, {r1, r2}
1109 mcr p14, 0, r1, c0, c4, 5 // BCR4
1110 mcr p14, 0, r2, c0, c5, 5 // BCR5
1111 add r0, r0, #40 // advance to wvr[0]
1112 ldmia r0!, {r1, r2}
1113 mcr p14, 0, r1, c0, c0, 6 // WVR0
1114 mcr p14, 0, r2, c0, c1, 6 // WVR1
1115 add r0, r0, #56 // advance to wcr[0]
1116 ldmia r0!, {r1, r2}
1117 mcr p14, 0, r1, c0, c0, 7 // WCR0
1118 mcr p14, 0, r2, c0, c1, 7 // WCR1
1119
1120 // Unlock debug registers
1121 mov ip, #0
1122 mcr p14, 0, ip, c1, c0, 4
1123#endif
1124 bx lr
1125
1126/*
1127 * void fiq_context_init(boolean_t enable_fiq)
1128 */
1129 .text
1130 .align 2
1131 .globl EXT(fiq_context_init)
1132LEXT(fiq_context_init)
1133 mrs r3, cpsr // Save current CPSR
1134 cmp r0, #0 // Test enable_fiq
1135 bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE
1136 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1137 ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data
1138
1139#if __ARM_TIME__
1140 /* Despite the fact that we use the physical timebase
1141 * register as the basis for time on our platforms, we
1142 * end up using the virtual timer in order to manage
1143 * deadlines. This is due to the fact that for our
1144 * current platforms, the interrupt generated by the
1145 * physical timer is not hooked up to anything, and is
1146 * therefore dropped on the floor. Therefore, for
1147 * timers to function they MUST be based on the virtual
1148 * timer.
1149 */
1150
1151 mov r0, #1 // Enable Timer
1152 mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL
1153
1154 /* Enable USER access to the physical timebase (PL0PCTEN).
1155 * The rationale for providing access to the physical
1156 * timebase being that the virtual timebase is broken for
1157 * some platforms. Maintaining the offset ourselves isn't
1158 * expensive, so mandate that the userspace implementation
1159 * do timebase_phys+offset rather than trying to propogate
1160 * all of the informaiton about what works up to USER.
1161 */
1162 mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0])
1163
1164#else /* ! __ARM_TIME__ */
1165 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled
1166 mov r8, r2 // Load the BootCPUData address
1167 ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address
1168 ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address
1169 ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value
1170#endif /* __ARM_TIME__ */
1171
1172 msr cpsr_c, r3 // Restore saved CPSR
1173 bx lr
1174
1175/*
1176 * void reenable_async_aborts(void)
1177 */
1178 .text
1179 .align 2
1180 .globl EXT(reenable_async_aborts)
1181LEXT(reenable_async_aborts)
1182 cpsie a // Re-enable async aborts
1183 bx lr
1184
1185/*
1186 * uint64_t ml_get_timebase(void)
1187 */
1188 .text
1189 .align 2
1190 .globl EXT(ml_get_timebase)
1191LEXT(ml_get_timebase)
1192 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1193 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1194#if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__
1195 isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
11961:
1197 mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1
1198 mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0
1199 mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2
1200 cmp r1, r2
1201 bne 1b // Loop until both high values are the same
1202
1203 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1204 ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to
1205 adds r0, r0, r2 // convert to
1206 ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time
1207 adc r1, r1, r2 //
1208#else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */
12091:
1210 ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1211 ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value
1212 ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value
1213 cmp r1, r2 // Make sure TB has not rolled over
1214 bne 1b
1215#endif /* __ARM_TIME__ */
1216 bx lr // return
1217
1218
1219/*
1220 * uint32_t ml_get_decrementer(void)
1221 */
1222 .text
1223 .align 2
1224 .globl EXT(ml_get_decrementer)
1225LEXT(ml_get_decrementer)
1226 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1227 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1228 ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func
1229 cmp r2, #0
1230 bxne r2 // Call it if there is one
1231#if __ARM_TIME__
1232 mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL)
1233#else
1234 ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value
1235#endif
1236 bx lr // return
1237
1238
1239/*
1240 * void ml_set_decrementer(uint32_t dec_value)
1241 */
1242 .text
1243 .align 2
1244 .globl EXT(ml_set_decrementer)
1245LEXT(ml_set_decrementer)
1246 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
1247 ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data
1248 ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func
1249 cmp r2, #0
1250 bxne r2 // Call it if there is one
1251#if __ARM_TIME__
1252 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1253 mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL)
1254#else
1255 mrs r2, cpsr // Save current CPSR
1256 msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled.
1257 mov r12, r0 // Set the DEC value
1258 str r12, [r8, CPU_DECREMENTER] // Store DEC
1259 msr cpsr_c, r2 // Restore saved CPSR
1260#endif
1261 bx lr
1262
1263
1264/*
1265 * boolean_t ml_get_interrupts_enabled(void)
1266 */
1267 .text
1268 .align 2
1269 .globl EXT(ml_get_interrupts_enabled)
1270LEXT(ml_get_interrupts_enabled)
1271 mrs r2, cpsr
1272 mov r0, #1
1273 bic r0, r0, r2, lsr #PSR_IRQFb
1274 bx lr
1275
1276/*
1277 * Platform Specific Timebase & Decrementer Functions
1278 *
1279 */
1280
1281#if defined(ARM_BOARD_CLASS_S7002)
1282 .text
1283 .align 2
1284 .globl EXT(fleh_fiq_s7002)
1285LEXT(fleh_fiq_s7002)
1286 str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt
1287 mvn r13, #0
1288 str r13, [r8, CPU_DECREMENTER]
1289 b EXT(fleh_dec)
1290
1291 .text
1292 .align 2
1293 .globl EXT(s7002_get_decrementer)
1294LEXT(s7002_get_decrementer)
1295 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1296 add ip, ip, #PMGR_INTERVAL_TMR_OFFSET
1297 ldr r0, [ip] // Get the Decrementer
1298 bx lr
1299
1300 .text
1301 .align 2
1302 .globl EXT(s7002_set_decrementer)
1303LEXT(s7002_set_decrementer)
1304 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1305 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1306 str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer
1307 bx lr
1308#endif /* defined(ARM_BOARD_CLASS_S7002) */
1309
1310#if defined(ARM_BOARD_CLASS_T8002)
1311 .text
1312 .align 2
1313 .globl EXT(fleh_fiq_t8002)
1314LEXT(fleh_fiq_t8002)
1315 mov r13, #kAICTmrIntStat
1316 str r11, [r10, r13] // Clear the decrementer interrupt
1317 mvn r13, #0
1318 str r13, [r8, CPU_DECREMENTER]
1319 b EXT(fleh_dec)
1320
1321 .text
1322 .align 2
1323 .globl EXT(t8002_get_decrementer)
1324LEXT(t8002_get_decrementer)
1325 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1326 mov r0, #kAICTmrCnt
1327 add ip, ip, r0
1328 ldr r0, [ip] // Get the Decrementer
1329 bx lr
1330
1331 .text
1332 .align 2
1333 .globl EXT(t8002_set_decrementer)
1334LEXT(t8002_set_decrementer)
1335 str r0, [r3, CPU_DECREMENTER] // Save the new dec value
1336 ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address
1337 mov r5, #kAICTmrCnt
1338 str r0, [ip, r5] // Store the new Decrementer
1339 bx lr
1340#endif /* defined(ARM_BOARD_CLASS_T8002) */
1341
1342LOAD_ADDR_GEN_DEF(kernel_pmap_store)
1343
1344#include "globals_asm.h"
1345
1346/* vim: set ts=4: */