]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm/proc_reg.h> | |
31 | #include <arm/pmap.h> | |
32 | #include <sys/errno.h> | |
33 | #include "assym.s" | |
34 | ||
35 | .align 2 | |
36 | .globl EXT(machine_set_current_thread) | |
37 | LEXT(machine_set_current_thread) | |
38 | mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW | |
39 | ldr r1, [r0, TH_CTH_SELF] | |
40 | mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO | |
41 | and r2, r2, #3 // Extract cpu number | |
42 | orr r1, r1, r2 // | |
43 | mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO | |
44 | ldr r1, [r0, TH_CTH_DATA] | |
45 | mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW | |
46 | bx lr | |
47 | ||
48 | /* | |
49 | * void machine_idle(void) | |
50 | */ | |
51 | .text | |
52 | .align 2 | |
53 | .globl EXT(machine_idle) | |
54 | LEXT(machine_idle) | |
55 | cpsid if // Disable FIQ IRQ | |
56 | mov ip, lr | |
57 | bl EXT(Idle_context) | |
58 | mov lr, ip | |
59 | cpsie if // Enable FIQ IRQ | |
60 | bx lr | |
61 | ||
62 | /* | |
63 | * void cpu_idle_wfi(boolean_t wfi_fast): | |
64 | * cpu_idle is the only function that should call this. | |
65 | */ | |
66 | .text | |
67 | .align 2 | |
68 | .globl EXT(cpu_idle_wfi) | |
69 | LEXT(cpu_idle_wfi) | |
70 | mov r1, #32 | |
71 | mov r2, #1200 | |
72 | cmp r0, #0 | |
73 | beq 3f | |
74 | mov r1, #1 | |
75 | b 2f | |
76 | .align 5 | |
77 | 1: | |
78 | add r0, r0, #1 | |
79 | mov r1, r2 | |
80 | 2: | |
81 | ||
82 | /* | |
83 | * We export the address of the WFI instruction so that it can be patched; this will be | |
84 | * ugly from a debugging perspective. | |
85 | */ | |
86 | ||
87 | #if (__ARM_ARCH__ >= 7) | |
88 | dsb | |
89 | .globl EXT(wfi_inst) | |
90 | LEXT(wfi_inst) | |
91 | wfi | |
92 | #else | |
93 | mcr p15, 0, r0, c7, c10, 4 | |
94 | .globl EXT(wfi_inst) | |
95 | LEXT(wfi_inst) | |
96 | mcr p15, 0, r0, c7, c0, 4 | |
97 | #endif | |
98 | 3: | |
99 | subs r1, r1, #1 | |
100 | bne 3b | |
101 | nop | |
102 | nop | |
103 | nop | |
104 | nop | |
105 | nop | |
106 | cmp r0, #0 | |
107 | beq 1b | |
108 | bx lr | |
109 | ||
110 | .align 2 | |
111 | .globl EXT(timer_grab) | |
112 | LEXT(timer_grab) | |
113 | 0: | |
114 | ldr r2, [r0, TIMER_HIGH] | |
115 | ldr r3, [r0, TIMER_LOW] | |
116 | #if __ARM_SMP__ | |
117 | dmb ish // dmb ish | |
118 | #endif | |
119 | ldr r1, [r0, TIMER_HIGHCHK] | |
120 | cmp r1, r2 | |
121 | bne 0b | |
122 | mov r0, r3 | |
123 | bx lr | |
124 | ||
125 | .align 2 | |
126 | .globl EXT(timer_update) | |
127 | LEXT(timer_update) | |
128 | str r1, [r0, TIMER_HIGHCHK] | |
129 | #if __ARM_SMP__ | |
130 | dmb ish // dmb ish | |
131 | #endif | |
132 | str r2, [r0, TIMER_LOW] | |
133 | #if __ARM_SMP__ | |
134 | dmb ish // dmb ish | |
135 | #endif | |
136 | str r1, [r0, TIMER_HIGH] | |
137 | bx lr | |
138 | ||
139 | .align 2 | |
140 | .globl EXT(get_vfp_enabled) | |
141 | LEXT(get_vfp_enabled) | |
142 | #if __ARM_VFP__ | |
143 | fmrx r0, fpexc | |
144 | and r1, r0, #FPEXC_EN // Extact vfp enable previous state | |
145 | mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled | |
146 | #else | |
147 | mov r0, #0 // return false | |
148 | #endif | |
149 | bx lr | |
150 | ||
151 | /* This is no longer useful (but is exported, so this may require kext cleanup). */ | |
152 | .align 2 | |
153 | .globl EXT(enable_kernel_vfp_context) | |
154 | LEXT(enable_kernel_vfp_context) | |
155 | bx lr | |
156 | ||
157 | /* uint32_t get_fpscr(void): | |
158 | * Returns the current state of the FPSCR register. | |
159 | */ | |
160 | .align 2 | |
161 | .globl EXT(get_fpscr) | |
162 | LEXT(get_fpscr) | |
163 | #if __ARM_VFP__ | |
164 | fmrx r0, fpscr | |
165 | #endif | |
166 | bx lr | |
167 | .align 2 | |
168 | .globl EXT(set_fpscr) | |
169 | /* void set_fpscr(uint32_t value): | |
170 | * Set the FPSCR register. | |
171 | */ | |
172 | LEXT(set_fpscr) | |
173 | #if __ARM_VFP__ | |
174 | fmxr fpscr, r0 | |
175 | #else | |
176 | mov r0, #0 | |
177 | #endif | |
178 | bx lr | |
179 | ||
5ba3f43e A |
180 | /* |
181 | * void OSSynchronizeIO(void) | |
182 | */ | |
183 | .text | |
184 | .align 2 | |
185 | .globl EXT(OSSynchronizeIO) | |
186 | LEXT(OSSynchronizeIO) | |
187 | .align 2 | |
188 | dsb | |
189 | bx lr | |
190 | ||
191 | /* | |
192 | * void flush_mmu_tlb(void) | |
193 | * | |
194 | * Flush all TLBs | |
195 | */ | |
196 | .text | |
197 | .align 2 | |
198 | .globl EXT(flush_mmu_tlb) | |
199 | LEXT(flush_mmu_tlb) | |
200 | mov r0, #0 | |
201 | #if __ARM_SMP__ | |
202 | mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs | |
203 | #else | |
204 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
205 | #endif | |
206 | dsb ish | |
207 | isb | |
208 | bx lr | |
209 | ||
210 | /* | |
211 | * void flush_core_tlb(void) | |
212 | * | |
213 | * Flush core TLB | |
214 | */ | |
215 | .text | |
216 | .align 2 | |
217 | .globl EXT(flush_core_tlb) | |
218 | LEXT(flush_core_tlb) | |
219 | mov r0, #0 | |
220 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
221 | dsb ish | |
222 | isb | |
223 | bx lr | |
224 | ||
225 | /* | |
226 | * void flush_mmu_tlb_entry(uint32_t) | |
227 | * | |
228 | * Flush TLB entry | |
229 | */ | |
230 | .text | |
231 | .align 2 | |
232 | .globl EXT(flush_mmu_tlb_entry) | |
233 | LEXT(flush_mmu_tlb_entry) | |
234 | #if __ARM_SMP__ | |
235 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry | |
236 | #else | |
237 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
238 | #endif | |
239 | dsb ish | |
240 | isb | |
241 | bx lr | |
242 | ||
243 | /* | |
244 | * void flush_mmu_tlb_entries(uint32_t, uint32_t) | |
245 | * | |
246 | * Flush TLB entries | |
247 | */ | |
248 | .text | |
249 | .align 2 | |
250 | .globl EXT(flush_mmu_tlb_entries) | |
251 | LEXT(flush_mmu_tlb_entries) | |
252 | 1: | |
253 | #if __ARM_SMP__ | |
254 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry | |
255 | #else | |
256 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
257 | #endif | |
258 | add r0, r0, ARM_PGBYTES // Increment to the next page | |
259 | cmp r0, r1 // Loop if current address < end address | |
260 | blt 1b | |
261 | dsb ish // Synchronize | |
262 | isb | |
263 | bx lr | |
264 | ||
265 | ||
266 | /* | |
267 | * void flush_mmu_tlb_mva_entries(uint32_t) | |
268 | * | |
269 | * Flush TLB entries for mva | |
270 | */ | |
271 | .text | |
272 | .align 2 | |
273 | .globl EXT(flush_mmu_tlb_mva_entries) | |
274 | LEXT(flush_mmu_tlb_mva_entries) | |
275 | #if __ARM_SMP__ | |
276 | mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva | |
277 | #else | |
278 | mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva | |
279 | #endif | |
280 | dsb ish | |
281 | isb | |
282 | bx lr | |
283 | ||
284 | /* | |
285 | * void flush_mmu_tlb_asid(uint32_t) | |
286 | * | |
287 | * Flush TLB entriesfor requested asid | |
288 | */ | |
289 | .text | |
290 | .align 2 | |
291 | .globl EXT(flush_mmu_tlb_asid) | |
292 | LEXT(flush_mmu_tlb_asid) | |
293 | #if __ARM_SMP__ | |
294 | mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid | |
295 | #else | |
296 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid | |
297 | #endif | |
298 | dsb ish | |
299 | isb | |
300 | bx lr | |
301 | ||
302 | /* | |
303 | * void flush_core_tlb_asid(uint32_t) | |
304 | * | |
305 | * Flush TLB entries for core for requested asid | |
306 | */ | |
307 | .text | |
308 | .align 2 | |
309 | .globl EXT(flush_core_tlb_asid) | |
310 | LEXT(flush_core_tlb_asid) | |
311 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid | |
312 | dsb ish | |
313 | isb | |
314 | bx lr | |
315 | ||
316 | /* | |
317 | * Set MMU Translation Table Base | |
318 | */ | |
319 | .text | |
320 | .align 2 | |
321 | .globl EXT(set_mmu_ttb) | |
322 | LEXT(set_mmu_ttb) | |
323 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
324 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
325 | mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0 | |
326 | dsb ish | |
327 | isb | |
328 | bx lr | |
329 | ||
330 | /* | |
331 | * Set MMU Translation Table Base Alternate | |
332 | */ | |
333 | .text | |
334 | .align 2 | |
335 | .globl EXT(set_mmu_ttb_alternate) | |
336 | LEXT(set_mmu_ttb_alternate) | |
337 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
338 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
339 | mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1 | |
340 | dsb ish | |
341 | isb | |
342 | bx lr | |
343 | ||
344 | /* | |
345 | * Set MMU Translation Table Base | |
346 | */ | |
347 | .text | |
348 | .align 2 | |
349 | .globl EXT(get_mmu_ttb) | |
350 | LEXT(get_mmu_ttb) | |
351 | mrc p15, 0, r0, c2, c0, 0 // translation table to r0 | |
352 | isb | |
353 | bx lr | |
354 | ||
355 | /* | |
356 | * get MMU control register | |
357 | */ | |
358 | .text | |
359 | .align 2 | |
360 | .globl EXT(get_aux_control) | |
361 | LEXT(get_aux_control) | |
362 | mrc p15, 0, r0, c1, c0, 1 // read aux control into r0 | |
363 | bx lr // return old bits in r0 | |
364 | ||
365 | /* | |
366 | * set MMU control register | |
367 | */ | |
368 | .text | |
369 | .align 2 | |
370 | .globl EXT(set_aux_control) | |
371 | LEXT(set_aux_control) | |
372 | mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control | |
373 | isb | |
374 | bx lr | |
375 | ||
376 | ||
377 | /* | |
378 | * get MMU control register | |
379 | */ | |
380 | .text | |
381 | .align 2 | |
382 | .globl EXT(get_mmu_control) | |
383 | LEXT(get_mmu_control) | |
384 | mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0 | |
385 | bx lr // return old bits in r0 | |
386 | ||
387 | /* | |
388 | * set MMU control register | |
389 | */ | |
390 | .text | |
391 | .align 2 | |
392 | .globl EXT(set_mmu_control) | |
393 | LEXT(set_mmu_control) | |
394 | mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control | |
395 | isb | |
396 | bx lr | |
397 | ||
398 | /* | |
399 | * MMU kernel virtual to physical address translation | |
400 | */ | |
401 | .text | |
402 | .align 2 | |
403 | .globl EXT(mmu_kvtop) | |
404 | LEXT(mmu_kvtop) | |
405 | mrs r3, cpsr // Read cpsr | |
406 | cpsid if // Disable FIQ IRQ | |
407 | mov r1, r0 | |
408 | mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR | |
409 | isb | |
410 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
411 | ands r2, r0, #0x1 // Test conversion aborted | |
412 | bne mmu_kvtophys_fail | |
413 | ands r2, r0, #0x2 // Test super section | |
414 | mvnne r2, #0xFF000000 | |
415 | moveq r2, #0x000000FF | |
416 | orreq r2, r2, #0x00000F00 | |
417 | bics r0, r0, r2 // Clear lower bits | |
418 | beq mmu_kvtophys_fail | |
419 | and r1, r1, r2 | |
420 | orr r0, r0, r1 | |
421 | b mmu_kvtophys_ret | |
422 | mmu_kvtophys_fail: | |
423 | mov r0, #0 | |
424 | mmu_kvtophys_ret: | |
425 | msr cpsr, r3 // Restore cpsr | |
426 | bx lr | |
427 | ||
428 | /* | |
429 | * MMU user virtual to physical address translation | |
430 | */ | |
431 | .text | |
432 | .align 2 | |
433 | .globl EXT(mmu_uvtop) | |
434 | LEXT(mmu_uvtop) | |
435 | mrs r3, cpsr // Read cpsr | |
436 | cpsid if // Disable FIQ IRQ | |
437 | mov r1, r0 | |
438 | mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR | |
439 | isb | |
440 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
441 | ands r2, r0, #0x1 // Test conversion aborted | |
442 | bne mmu_uvtophys_fail | |
443 | ands r2, r0, #0x2 // Test super section | |
444 | mvnne r2, #0xFF000000 | |
445 | moveq r2, #0x000000FF | |
446 | orreq r2, r2, #0x00000F00 | |
447 | bics r0, r0, r2 // Clear lower bits | |
448 | beq mmu_uvtophys_fail | |
449 | and r1, r1, r2 | |
450 | orr r0, r0, r1 | |
451 | b mmu_uvtophys_ret | |
452 | mmu_uvtophys_fail: | |
453 | mov r0, #0 | |
454 | mmu_uvtophys_ret: | |
455 | msr cpsr, r3 // Restore cpsr | |
456 | bx lr | |
457 | ||
458 | /* | |
459 | * MMU kernel virtual to physical address preflight write access | |
460 | */ | |
461 | .text | |
462 | .align 2 | |
463 | .globl EXT(mmu_kvtop_wpreflight) | |
464 | LEXT(mmu_kvtop_wpreflight) | |
465 | mrs r3, cpsr // Read cpsr | |
466 | cpsid if // Disable FIQ IRQ | |
467 | mov r1, r0 | |
468 | mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW | |
469 | isb | |
470 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
471 | ands r2, r0, #0x1 // Test conversion aborted | |
472 | bne mmu_kvtophys_wpreflight_fail | |
473 | ands r2, r0, #0x2 // Test super section | |
474 | mvnne r2, #0xFF000000 | |
475 | moveq r2, #0x000000FF | |
476 | orreq r2, r2, #0x00000F00 | |
477 | bics r0, r0, r2 // Clear lower bits | |
478 | beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits | |
479 | and r1, r1, r2 | |
480 | orr r0, r0, r1 | |
481 | b mmu_kvtophys_wpreflight_ret | |
482 | mmu_kvtophys_wpreflight_fail: | |
483 | mov r0, #0 | |
484 | mmu_kvtophys_wpreflight_ret: | |
485 | msr cpsr, r3 // Restore cpsr | |
486 | bx lr | |
487 | ||
488 | /* | |
489 | * set context id register | |
490 | */ | |
491 | /* | |
492 | * set context id register | |
493 | */ | |
494 | .text | |
495 | .align 2 | |
496 | .globl EXT(set_context_id) | |
497 | LEXT(set_context_id) | |
498 | mcr p15, 0, r0, c13, c0, 1 | |
499 | isb | |
500 | bx lr | |
501 | ||
502 | #define COPYIO_HEADER(rUser, kLabel) \ | |
503 | /* test for zero len */ ;\ | |
504 | cmp r2, #0 ;\ | |
505 | moveq r0, #0 ;\ | |
506 | bxeq lr ;\ | |
507 | /* test user_addr, user_addr+len to see if it's in kernel space */ ;\ | |
508 | add r12, rUser, r2 ;\ | |
509 | cmp r12, KERNELBASE ;\ | |
510 | bhs kLabel ;\ | |
511 | cmp r12, rUser ;\ | |
512 | bcc kLabel | |
513 | ||
514 | #define COPYIO_VALIDATE(NAME, SIZE) \ | |
515 | /* branch around for small sizes */ ;\ | |
516 | cmp r2, #(SIZE) ;\ | |
517 | bls L##NAME##_validate_done ;\ | |
518 | /* call NAME_validate to check the arguments */ ;\ | |
519 | push {r0, r1, r2, r7, lr} ;\ | |
520 | add r7, sp, #12 ;\ | |
521 | blx EXT(NAME##_validate) ;\ | |
522 | cmp r0, #0 ;\ | |
523 | addne sp, #12 ;\ | |
524 | popne {r7, pc} ;\ | |
525 | pop {r0, r1, r2, r7, lr} ;\ | |
526 | L##NAME##_validate_done: | |
527 | ||
528 | #define COPYIO_SET_RECOVER() \ | |
529 | /* set recovery address */ ;\ | |
530 | stmfd sp!, { r4, r5, r6 } ;\ | |
531 | adr r3, copyio_error ;\ | |
532 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
533 | ldr r4, [r12, TH_RECOVER] ;\ | |
534 | str r3, [r12, TH_RECOVER] | |
535 | ||
536 | #if __ARM_USER_PROTECT__ | |
537 | #define COPYIO_MAP_USER() \ | |
538 | /* disable interrupts to prevent expansion to 2GB at L1 ;\ | |
539 | * between loading ttep and storing it in ttbr0.*/ ;\ | |
540 | mrs r5, cpsr ;\ | |
541 | cpsid if ;\ | |
542 | ldr r3, [r12, ACT_UPTW_TTB] ;\ | |
543 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
544 | msr cpsr, r5 ;\ | |
545 | ldr r3, [r12, ACT_ASID] ;\ | |
546 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
547 | isb | |
548 | #else | |
549 | #define COPYIO_MAP_USER() | |
550 | #endif | |
551 | ||
552 | #define COPYIO_HEADER_KERN() ;\ | |
553 | /* test for zero len */ ;\ | |
554 | cmp r2, #0 ;\ | |
555 | moveq r0, #0 ;\ | |
556 | bxeq lr | |
557 | ||
558 | .macro COPYIO_BODY | |
559 | /* if len is less than 16 bytes, just do a simple copy */ | |
560 | cmp r2, #16 | |
561 | blt L$0_bytewise | |
562 | /* test for src and dest of the same word alignment */ | |
563 | orr r3, r0, r1 | |
564 | tst r3, #3 | |
565 | bne L$0_bytewise | |
566 | L$0_wordwise: | |
567 | sub r2, r2, #16 | |
568 | L$0_wordwise_loop: | |
569 | /* 16 bytes at a time */ | |
570 | ldmia r0!, { r3, r5, r6, r12 } | |
571 | stmia r1!, { r3, r5, r6, r12 } | |
572 | subs r2, r2, #16 | |
573 | bge L$0_wordwise_loop | |
574 | /* fixup the len and test for completion */ | |
575 | adds r2, r2, #16 | |
576 | beq L$0_noerror | |
577 | L$0_bytewise: | |
578 | /* copy 2 bytes at a time */ | |
579 | subs r2, r2, #2 | |
580 | ldrb r3, [r0], #1 | |
581 | ldrbpl r12, [r0], #1 | |
582 | strb r3, [r1], #1 | |
583 | strbpl r12, [r1], #1 | |
584 | bhi L$0_bytewise | |
585 | L$0_noerror: | |
586 | mov r0, #0 | |
587 | .endmacro | |
588 | ||
589 | #if __ARM_USER_PROTECT__ | |
590 | #define COPYIO_UNMAP_USER() \ | |
591 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
592 | ldr r3, [r12, ACT_KPTW_TTB] ;\ | |
593 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
594 | mov r3, #0 ;\ | |
595 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
596 | isb | |
597 | #else | |
598 | #define COPYIO_UNMAP_USER() \ | |
599 | mrc p15, 0, r12, c13, c0, 4 | |
600 | #endif | |
601 | ||
602 | #define COPYIO_RESTORE_RECOVER() \ | |
603 | /* restore the recovery address */ ;\ | |
604 | str r4, [r12, TH_RECOVER] ;\ | |
605 | ldmfd sp!, { r4, r5, r6 } | |
606 | ||
607 | /* | |
608 | * int copyinstr( | |
609 | * const user_addr_t user_addr, | |
610 | * char *kernel_addr, | |
611 | * vm_size_t max, | |
612 | * vm_size_t *actual) | |
613 | */ | |
614 | .text | |
615 | .align 2 | |
616 | .globl EXT(copyinstr) | |
617 | LEXT(copyinstr) | |
618 | stmfd sp!, { r4, r5, r6 } | |
619 | ||
620 | mov r6, r3 | |
621 | add r3, r0, r2 // user_addr + max | |
622 | cmp r3, KERNELBASE // Check KERNELBASE < user_addr + max | |
623 | bhs copyinstr_param_error // Drop out if it is | |
624 | cmp r3, r0 // Check we're copying from user space | |
625 | bcc copyinstr_param_error // Drop out if we aren't | |
626 | adr r3, copyinstr_error // Get address for recover | |
627 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
628 | ldr r4, [r12, TH_RECOVER] ;\ | |
629 | str r3, [r12, TH_RECOVER] | |
630 | COPYIO_MAP_USER() | |
631 | mov r12, #0 // Number of bytes copied so far | |
632 | cmp r2, #0 | |
633 | beq copyinstr_too_long | |
634 | copyinstr_loop: | |
635 | ldrb r3, [r0], #1 // Load a byte from the source (user) | |
636 | strb r3, [r1], #1 // Store a byte to the destination (kernel) | |
637 | add r12, r12, #1 | |
638 | cmp r3, #0 | |
639 | beq copyinstr_done | |
640 | cmp r12, r2 // Room to copy more bytes? | |
641 | bne copyinstr_loop | |
642 | // | |
643 | // Ran out of space in the destination buffer, so return ENAMETOOLONG. | |
644 | // | |
645 | copyinstr_too_long: | |
646 | mov r3, #ENAMETOOLONG | |
647 | copyinstr_done: | |
648 | // | |
649 | // When we get here, we have finished copying the string. We came here from | |
650 | // either the "beq copyinstr_done" above, in which case r4 == 0 (which is also | |
651 | // the function result for success), or falling through from copyinstr_too_long, | |
652 | // in which case r4 == ENAMETOOLONG. | |
653 | // | |
654 | str r12, [r6] // Save the count for actual | |
655 | mov r0, r3 // Return error code from r3 | |
656 | copyinstr_exit: | |
657 | COPYIO_UNMAP_USER() | |
658 | str r4, [r12, TH_RECOVER] | |
659 | copyinstr_exit2: | |
660 | ldmfd sp!, { r4, r5, r6 } | |
661 | bx lr | |
662 | ||
663 | copyinstr_error: | |
664 | /* set error, exit routine */ | |
665 | mov r0, #EFAULT | |
666 | b copyinstr_exit | |
667 | ||
668 | copyinstr_param_error: | |
669 | /* set error, exit routine */ | |
670 | mov r0, #EFAULT | |
671 | b copyinstr_exit2 | |
672 | ||
673 | /* | |
674 | * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
675 | */ | |
676 | .text | |
677 | .align 2 | |
678 | .globl EXT(copyin) | |
679 | LEXT(copyin) | |
680 | COPYIO_HEADER(r0,copyio_kernel) | |
681 | COPYIO_VALIDATE(copyin,4096) | |
682 | COPYIO_SET_RECOVER() | |
683 | COPYIO_MAP_USER() | |
684 | COPYIO_BODY copyin | |
685 | COPYIO_UNMAP_USER() | |
686 | COPYIO_RESTORE_RECOVER() | |
687 | bx lr | |
688 | ||
689 | /* | |
690 | * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
691 | */ | |
692 | .text | |
693 | .align 2 | |
694 | .globl EXT(copyout) | |
695 | LEXT(copyout) | |
696 | COPYIO_HEADER(r1,copyio_kernel) | |
697 | COPYIO_VALIDATE(copyout,4096) | |
698 | COPYIO_SET_RECOVER() | |
699 | COPYIO_MAP_USER() | |
700 | COPYIO_BODY copyout | |
701 | COPYIO_UNMAP_USER() | |
702 | COPYIO_RESTORE_RECOVER() | |
703 | bx lr | |
704 | ||
705 | ||
706 | /* | |
707 | * int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes) | |
708 | */ | |
709 | .text | |
710 | .align 2 | |
711 | .globl EXT(copyin_word) | |
712 | LEXT(copyin_word) | |
713 | cmp r2, #4 // Test if size is 4 or 8 | |
714 | cmpne r2, #8 | |
715 | bne L_copyin_invalid | |
716 | sub r3, r2, #1 | |
717 | tst r0, r3 // Test alignment of user address | |
718 | bne L_copyin_invalid | |
719 | ||
720 | COPYIO_HEADER(r0,L_copyin_word_fault) | |
721 | COPYIO_SET_RECOVER() | |
722 | COPYIO_MAP_USER() | |
723 | ||
724 | mov r3, #0 // Clear high register | |
725 | cmp r2, #4 // If size is 4 | |
726 | ldreq r2, [r0] // Load word from user | |
727 | ldrdne r2, r3, [r0] // Else Load double word from user | |
728 | stm r1, {r2, r3} // Store to kernel_addr | |
729 | mov r0, #0 // Success | |
730 | ||
731 | COPYIO_UNMAP_USER() | |
732 | COPYIO_RESTORE_RECOVER() | |
733 | bx lr | |
734 | L_copyin_invalid: | |
735 | mov r0, #EINVAL | |
736 | bx lr | |
737 | L_copyin_word_fault: | |
738 | mov r0, #EFAULT | |
739 | bx lr | |
740 | ||
741 | ||
742 | copyio_error: | |
743 | mov r0, #EFAULT | |
744 | COPYIO_UNMAP_USER() | |
745 | str r4, [r12, TH_RECOVER] | |
746 | ldmfd sp!, { r4, r5, r6 } | |
747 | bx lr | |
748 | ||
749 | /* | |
750 | * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
751 | */ | |
752 | .text | |
753 | .align 2 | |
754 | .globl EXT(copyin_kern) | |
755 | LEXT(copyin_kern) | |
756 | COPYIO_HEADER_KERN() | |
757 | b bypass_check | |
758 | ||
759 | /* | |
760 | * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
761 | */ | |
762 | .text | |
763 | .align 2 | |
764 | .globl EXT(copyout_kern) | |
765 | LEXT(copyout_kern) | |
766 | COPYIO_HEADER_KERN() | |
767 | b bypass_check | |
768 | ||
769 | copyio_kernel_error: | |
770 | mov r0, #EFAULT | |
771 | bx lr | |
772 | ||
773 | copyio_kernel: | |
774 | /* if (current_thread()->map->pmap != kernel_pmap) return EFAULT */ | |
775 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
776 | ldr r3, [r12, ACT_MAP] | |
777 | ldr r3, [r3, MAP_PMAP] | |
778 | LOAD_ADDR(ip, kernel_pmap_store) | |
779 | cmp r3, ip | |
780 | bne copyio_kernel_error | |
781 | ||
782 | bypass_check: | |
783 | stmfd sp!, { r5, r6 } | |
784 | COPYIO_BODY copyio_kernel | |
785 | ldmfd sp!, { r5, r6 } | |
786 | bx lr | |
787 | ||
788 | /* | |
789 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr) | |
790 | * | |
791 | * Safely copy eight bytes (the fixed top of an ARM frame) from | |
792 | * either user or kernel memory. | |
793 | */ | |
794 | .text | |
795 | .align 2 | |
796 | .globl EXT(copyinframe) | |
797 | LEXT(copyinframe) | |
798 | COPYIO_SET_RECOVER() | |
799 | COPYIO_MAP_USER() | |
800 | ldmia r0, {r2, r3} | |
801 | stmia r1, {r2, r3} | |
802 | b Lcopyin_noerror | |
803 | ||
804 | /* | |
805 | * uint32_t arm_debug_read_dscr(void) | |
806 | */ | |
807 | .text | |
808 | .align 2 | |
809 | .globl EXT(arm_debug_read_dscr) | |
810 | LEXT(arm_debug_read_dscr) | |
811 | #if __ARM_DEBUG__ >= 6 | |
812 | mrc p14, 0, r0, c0, c1 | |
813 | #else | |
814 | mov r0, #0 | |
815 | #endif | |
816 | bx lr | |
817 | ||
818 | /* | |
819 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
820 | * | |
821 | * Set debug registers to match the current thread state | |
822 | * (NULL to disable). Assume 6 breakpoints and 2 | |
823 | * watchpoints, since that has been the case in all cores | |
824 | * thus far. | |
825 | */ | |
826 | .text | |
827 | .align 2 | |
828 | .globl EXT(arm_debug_set_cp14) | |
829 | LEXT(arm_debug_set_cp14) | |
830 | #if __ARM_DEBUG__ >= 6 | |
831 | mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW | |
832 | ldr r2, [r1, ACT_CPUDATAP] // Get current cpu | |
833 | str r0, [r2, CPU_USER_DEBUG] // Set current user debug | |
834 | ||
835 | // Lock the debug registers | |
836 | movw ip, #0xCE55 | |
837 | movt ip, #0xC5AC | |
838 | mcr p14, 0, ip, c1, c0, 4 | |
839 | ||
840 | // enable monitor mode (needed to set and use debug registers) | |
841 | mrc p14, 0, ip, c0, c1, 0 | |
842 | orr ip, ip, #0x8000 // set MDBGen = 1 | |
843 | #if __ARM_DEBUG__ >= 7 | |
844 | mcr p14, 0, ip, c0, c2, 2 | |
845 | #else | |
846 | mcr p14, 0, ip, c0, c1, 0 | |
847 | #endif | |
848 | // first turn off all breakpoints/watchpoints | |
849 | mov r1, #0 | |
850 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
851 | mcr p14, 0, r1, c0, c1, 5 // BCR1 | |
852 | mcr p14, 0, r1, c0, c2, 5 // BCR2 | |
853 | mcr p14, 0, r1, c0, c3, 5 // BCR3 | |
854 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
855 | mcr p14, 0, r1, c0, c5, 5 // BCR5 | |
856 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
857 | mcr p14, 0, r1, c0, c1, 7 // WCR1 | |
858 | // if (debug_state == NULL) disable monitor mode and return; | |
859 | cmp r0, #0 | |
860 | biceq ip, ip, #0x8000 // set MDBGen = 0 | |
861 | #if __ARM_DEBUG__ >= 7 | |
862 | mcreq p14, 0, ip, c0, c2, 2 | |
863 | #else | |
864 | mcreq p14, 0, ip, c0, c1, 0 | |
865 | #endif | |
866 | bxeq lr | |
867 | ldmia r0!, {r1, r2, r3, ip} | |
868 | mcr p14, 0, r1, c0, c0, 4 // BVR0 | |
869 | mcr p14, 0, r2, c0, c1, 4 // BVR1 | |
870 | mcr p14, 0, r3, c0, c2, 4 // BVR2 | |
871 | mcr p14, 0, ip, c0, c3, 4 // BVR3 | |
872 | ldmia r0!, {r1, r2} | |
873 | mcr p14, 0, r1, c0, c4, 4 // BVR4 | |
874 | mcr p14, 0, r2, c0, c5, 4 // BVR5 | |
875 | add r0, r0, #40 // advance to bcr[0] | |
876 | ldmia r0!, {r1, r2, r3, ip} | |
877 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
878 | mcr p14, 0, r2, c0, c1, 5 // BCR1 | |
879 | mcr p14, 0, r3, c0, c2, 5 // BCR2 | |
880 | mcr p14, 0, ip, c0, c3, 5 // BCR3 | |
881 | ldmia r0!, {r1, r2} | |
882 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
883 | mcr p14, 0, r2, c0, c5, 5 // BCR5 | |
884 | add r0, r0, #40 // advance to wvr[0] | |
885 | ldmia r0!, {r1, r2} | |
886 | mcr p14, 0, r1, c0, c0, 6 // WVR0 | |
887 | mcr p14, 0, r2, c0, c1, 6 // WVR1 | |
888 | add r0, r0, #56 // advance to wcr[0] | |
889 | ldmia r0!, {r1, r2} | |
890 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
891 | mcr p14, 0, r2, c0, c1, 7 // WCR1 | |
892 | ||
893 | // Unlock debug registers | |
894 | mov ip, #0 | |
895 | mcr p14, 0, ip, c1, c0, 4 | |
896 | #endif | |
897 | bx lr | |
898 | ||
899 | /* | |
900 | * void fiq_context_init(boolean_t enable_fiq) | |
901 | */ | |
902 | .text | |
903 | .align 2 | |
904 | .globl EXT(fiq_context_init) | |
905 | LEXT(fiq_context_init) | |
906 | mrs r3, cpsr // Save current CPSR | |
907 | cmp r0, #0 // Test enable_fiq | |
908 | bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE | |
909 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
910 | ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data | |
911 | ||
912 | #if __ARM_TIME__ | |
913 | /* Despite the fact that we use the physical timebase | |
914 | * register as the basis for time on our platforms, we | |
915 | * end up using the virtual timer in order to manage | |
916 | * deadlines. This is due to the fact that for our | |
917 | * current platforms, the interrupt generated by the | |
918 | * physical timer is not hooked up to anything, and is | |
919 | * therefore dropped on the floor. Therefore, for | |
920 | * timers to function they MUST be based on the virtual | |
921 | * timer. | |
922 | */ | |
923 | ||
924 | mov r0, #1 // Enable Timer | |
925 | mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL | |
926 | ||
927 | /* Enable USER access to the physical timebase (PL0PCTEN). | |
928 | * The rationale for providing access to the physical | |
929 | * timebase being that the virtual timebase is broken for | |
930 | * some platforms. Maintaining the offset ourselves isn't | |
931 | * expensive, so mandate that the userspace implementation | |
932 | * do timebase_phys+offset rather than trying to propogate | |
933 | * all of the informaiton about what works up to USER. | |
934 | */ | |
935 | mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0]) | |
936 | ||
937 | #else /* ! __ARM_TIME__ */ | |
938 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled | |
939 | mov r8, r2 // Load the BootCPUData address | |
940 | ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address | |
941 | ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address | |
942 | ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value | |
943 | #endif /* __ARM_TIME__ */ | |
944 | ||
945 | msr cpsr_c, r3 // Restore saved CPSR | |
946 | bx lr | |
947 | ||
948 | /* | |
949 | * void reenable_async_aborts(void) | |
950 | */ | |
951 | .text | |
952 | .align 2 | |
953 | .globl EXT(reenable_async_aborts) | |
954 | LEXT(reenable_async_aborts) | |
955 | cpsie a // Re-enable async aborts | |
956 | bx lr | |
957 | ||
958 | /* | |
959 | * uint64_t ml_get_timebase(void) | |
960 | */ | |
961 | .text | |
962 | .align 2 | |
963 | .globl EXT(ml_get_timebase) | |
964 | LEXT(ml_get_timebase) | |
965 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
966 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
967 | #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ | |
968 | isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2. | |
969 | 1: | |
970 | mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1 | |
971 | mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0 | |
972 | mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2 | |
973 | cmp r1, r2 | |
974 | bne 1b // Loop until both high values are the same | |
975 | ||
976 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
977 | ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to | |
978 | adds r0, r0, r2 // convert to | |
979 | ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time | |
980 | adc r1, r1, r2 // | |
981 | #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */ | |
982 | 1: | |
983 | ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
984 | ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value | |
985 | ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
986 | cmp r1, r2 // Make sure TB has not rolled over | |
987 | bne 1b | |
988 | #endif /* __ARM_TIME__ */ | |
989 | bx lr // return | |
990 | ||
991 | ||
992 | /* | |
993 | * uint32_t ml_get_decrementer(void) | |
994 | */ | |
995 | .text | |
996 | .align 2 | |
997 | .globl EXT(ml_get_decrementer) | |
998 | LEXT(ml_get_decrementer) | |
999 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1000 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1001 | ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func | |
1002 | cmp r2, #0 | |
1003 | bxne r2 // Call it if there is one | |
1004 | #if __ARM_TIME__ | |
1005 | mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL) | |
1006 | #else | |
1007 | ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value | |
1008 | #endif | |
1009 | bx lr // return | |
1010 | ||
1011 | ||
1012 | /* | |
1013 | * void ml_set_decrementer(uint32_t dec_value) | |
1014 | */ | |
1015 | .text | |
1016 | .align 2 | |
1017 | .globl EXT(ml_set_decrementer) | |
1018 | LEXT(ml_set_decrementer) | |
1019 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1020 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1021 | ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func | |
1022 | cmp r2, #0 | |
1023 | bxne r2 // Call it if there is one | |
1024 | #if __ARM_TIME__ | |
1025 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1026 | mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL) | |
1027 | #else | |
1028 | mrs r2, cpsr // Save current CPSR | |
1029 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled. | |
1030 | mov r12, r0 // Set the DEC value | |
1031 | str r12, [r8, CPU_DECREMENTER] // Store DEC | |
1032 | msr cpsr_c, r2 // Restore saved CPSR | |
1033 | #endif | |
1034 | bx lr | |
1035 | ||
1036 | ||
1037 | /* | |
1038 | * boolean_t ml_get_interrupts_enabled(void) | |
1039 | */ | |
1040 | .text | |
1041 | .align 2 | |
1042 | .globl EXT(ml_get_interrupts_enabled) | |
1043 | LEXT(ml_get_interrupts_enabled) | |
1044 | mrs r2, cpsr | |
1045 | mov r0, #1 | |
1046 | bic r0, r0, r2, lsr #PSR_IRQFb | |
1047 | bx lr | |
1048 | ||
1049 | /* | |
1050 | * Platform Specific Timebase & Decrementer Functions | |
1051 | * | |
1052 | */ | |
1053 | ||
1054 | #if defined(ARM_BOARD_CLASS_S7002) | |
1055 | .text | |
1056 | .align 2 | |
1057 | .globl EXT(fleh_fiq_s7002) | |
1058 | LEXT(fleh_fiq_s7002) | |
1059 | str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt | |
1060 | mvn r13, #0 | |
1061 | str r13, [r8, CPU_DECREMENTER] | |
1062 | b EXT(fleh_dec) | |
1063 | ||
1064 | .text | |
1065 | .align 2 | |
1066 | .globl EXT(s7002_get_decrementer) | |
1067 | LEXT(s7002_get_decrementer) | |
1068 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1069 | add ip, ip, #PMGR_INTERVAL_TMR_OFFSET | |
1070 | ldr r0, [ip] // Get the Decrementer | |
1071 | bx lr | |
1072 | ||
1073 | .text | |
1074 | .align 2 | |
1075 | .globl EXT(s7002_set_decrementer) | |
1076 | LEXT(s7002_set_decrementer) | |
1077 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1078 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1079 | str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer | |
1080 | bx lr | |
1081 | #endif /* defined(ARM_BOARD_CLASS_S7002) */ | |
1082 | ||
1083 | #if defined(ARM_BOARD_CLASS_T8002) | |
1084 | .text | |
1085 | .align 2 | |
1086 | .globl EXT(fleh_fiq_t8002) | |
1087 | LEXT(fleh_fiq_t8002) | |
1088 | mov r13, #kAICTmrIntStat | |
1089 | str r11, [r10, r13] // Clear the decrementer interrupt | |
1090 | mvn r13, #0 | |
1091 | str r13, [r8, CPU_DECREMENTER] | |
1092 | b EXT(fleh_dec) | |
1093 | ||
1094 | .text | |
1095 | .align 2 | |
1096 | .globl EXT(t8002_get_decrementer) | |
1097 | LEXT(t8002_get_decrementer) | |
1098 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1099 | mov r0, #kAICTmrCnt | |
1100 | add ip, ip, r0 | |
1101 | ldr r0, [ip] // Get the Decrementer | |
1102 | bx lr | |
1103 | ||
1104 | .text | |
1105 | .align 2 | |
1106 | .globl EXT(t8002_set_decrementer) | |
1107 | LEXT(t8002_set_decrementer) | |
1108 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1109 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1110 | mov r5, #kAICTmrCnt | |
1111 | str r0, [ip, r5] // Store the new Decrementer | |
1112 | bx lr | |
1113 | #endif /* defined(ARM_BOARD_CLASS_T8002) */ | |
1114 | ||
1115 | LOAD_ADDR_GEN_DEF(kernel_pmap_store) | |
1116 | ||
1117 | #include "globals_asm.h" | |
1118 | ||
1119 | /* vim: set ts=4: */ |