]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm/proc_reg.h> | |
31 | #include <arm/pmap.h> | |
32 | #include <sys/errno.h> | |
33 | #include "assym.s" | |
34 | ||
35 | .align 2 | |
36 | .globl EXT(machine_set_current_thread) | |
37 | LEXT(machine_set_current_thread) | |
38 | mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW | |
39 | ldr r1, [r0, TH_CTH_SELF] | |
40 | mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO | |
41 | and r2, r2, #3 // Extract cpu number | |
42 | orr r1, r1, r2 // | |
43 | mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO | |
94ff46dc | 44 | mov r1, #0 |
5ba3f43e A |
45 | mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW |
46 | bx lr | |
47 | ||
48 | /* | |
49 | * void machine_idle(void) | |
50 | */ | |
51 | .text | |
52 | .align 2 | |
53 | .globl EXT(machine_idle) | |
54 | LEXT(machine_idle) | |
55 | cpsid if // Disable FIQ IRQ | |
56 | mov ip, lr | |
57 | bl EXT(Idle_context) | |
58 | mov lr, ip | |
59 | cpsie if // Enable FIQ IRQ | |
60 | bx lr | |
61 | ||
62 | /* | |
63 | * void cpu_idle_wfi(boolean_t wfi_fast): | |
64 | * cpu_idle is the only function that should call this. | |
65 | */ | |
66 | .text | |
67 | .align 2 | |
68 | .globl EXT(cpu_idle_wfi) | |
69 | LEXT(cpu_idle_wfi) | |
70 | mov r1, #32 | |
71 | mov r2, #1200 | |
72 | cmp r0, #0 | |
73 | beq 3f | |
74 | mov r1, #1 | |
75 | b 2f | |
76 | .align 5 | |
77 | 1: | |
78 | add r0, r0, #1 | |
79 | mov r1, r2 | |
80 | 2: | |
81 | ||
82 | /* | |
83 | * We export the address of the WFI instruction so that it can be patched; this will be | |
84 | * ugly from a debugging perspective. | |
85 | */ | |
86 | ||
87 | #if (__ARM_ARCH__ >= 7) | |
88 | dsb | |
89 | .globl EXT(wfi_inst) | |
90 | LEXT(wfi_inst) | |
91 | wfi | |
92 | #else | |
93 | mcr p15, 0, r0, c7, c10, 4 | |
94 | .globl EXT(wfi_inst) | |
95 | LEXT(wfi_inst) | |
96 | mcr p15, 0, r0, c7, c0, 4 | |
97 | #endif | |
98 | 3: | |
99 | subs r1, r1, #1 | |
100 | bne 3b | |
101 | nop | |
102 | nop | |
103 | nop | |
104 | nop | |
105 | nop | |
106 | cmp r0, #0 | |
107 | beq 1b | |
108 | bx lr | |
109 | ||
110 | .align 2 | |
111 | .globl EXT(timer_grab) | |
112 | LEXT(timer_grab) | |
113 | 0: | |
114 | ldr r2, [r0, TIMER_HIGH] | |
115 | ldr r3, [r0, TIMER_LOW] | |
116 | #if __ARM_SMP__ | |
117 | dmb ish // dmb ish | |
118 | #endif | |
119 | ldr r1, [r0, TIMER_HIGHCHK] | |
120 | cmp r1, r2 | |
121 | bne 0b | |
122 | mov r0, r3 | |
123 | bx lr | |
124 | ||
125 | .align 2 | |
d9a64523 A |
126 | .globl EXT(timer_advance_internal_32) |
127 | LEXT(timer_advance_internal_32) | |
5ba3f43e A |
128 | str r1, [r0, TIMER_HIGHCHK] |
129 | #if __ARM_SMP__ | |
130 | dmb ish // dmb ish | |
131 | #endif | |
132 | str r2, [r0, TIMER_LOW] | |
133 | #if __ARM_SMP__ | |
134 | dmb ish // dmb ish | |
135 | #endif | |
136 | str r1, [r0, TIMER_HIGH] | |
137 | bx lr | |
138 | ||
139 | .align 2 | |
140 | .globl EXT(get_vfp_enabled) | |
141 | LEXT(get_vfp_enabled) | |
142 | #if __ARM_VFP__ | |
143 | fmrx r0, fpexc | |
144 | and r1, r0, #FPEXC_EN // Extact vfp enable previous state | |
145 | mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled | |
146 | #else | |
147 | mov r0, #0 // return false | |
148 | #endif | |
149 | bx lr | |
150 | ||
151 | /* This is no longer useful (but is exported, so this may require kext cleanup). */ | |
152 | .align 2 | |
153 | .globl EXT(enable_kernel_vfp_context) | |
154 | LEXT(enable_kernel_vfp_context) | |
155 | bx lr | |
156 | ||
157 | /* uint32_t get_fpscr(void): | |
158 | * Returns the current state of the FPSCR register. | |
159 | */ | |
160 | .align 2 | |
161 | .globl EXT(get_fpscr) | |
162 | LEXT(get_fpscr) | |
163 | #if __ARM_VFP__ | |
164 | fmrx r0, fpscr | |
165 | #endif | |
166 | bx lr | |
167 | .align 2 | |
168 | .globl EXT(set_fpscr) | |
169 | /* void set_fpscr(uint32_t value): | |
170 | * Set the FPSCR register. | |
171 | */ | |
172 | LEXT(set_fpscr) | |
173 | #if __ARM_VFP__ | |
174 | fmxr fpscr, r0 | |
175 | #else | |
176 | mov r0, #0 | |
177 | #endif | |
178 | bx lr | |
179 | ||
5ba3f43e A |
180 | /* |
181 | * void OSSynchronizeIO(void) | |
182 | */ | |
183 | .text | |
184 | .align 2 | |
185 | .globl EXT(OSSynchronizeIO) | |
186 | LEXT(OSSynchronizeIO) | |
187 | .align 2 | |
188 | dsb | |
189 | bx lr | |
190 | ||
d9a64523 A |
191 | .macro SYNC_TLB_FLUSH |
192 | dsb ish | |
193 | isb | |
194 | .endmacro | |
195 | ||
5ba3f43e | 196 | /* |
d9a64523 | 197 | * void sync_tlb_flush |
5ba3f43e | 198 | * |
d9a64523 | 199 | * Synchronize one or more prior TLB flush operations |
5ba3f43e A |
200 | */ |
201 | .text | |
202 | .align 2 | |
d9a64523 A |
203 | .globl EXT(sync_tlb_flush) |
204 | LEXT(sync_tlb_flush) | |
205 | SYNC_TLB_FLUSH | |
206 | bx lr | |
207 | ||
208 | .macro FLUSH_MMU_TLB | |
5ba3f43e A |
209 | mov r0, #0 |
210 | #if __ARM_SMP__ | |
211 | mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs | |
212 | #else | |
213 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
214 | #endif | |
d9a64523 A |
215 | .endmacro |
216 | ||
217 | /* | |
218 | * void flush_mmu_tlb_async(void) | |
219 | * | |
220 | * Flush all TLBs, don't wait for completion | |
221 | */ | |
222 | .text | |
223 | .align 2 | |
224 | .globl EXT(flush_mmu_tlb_async) | |
225 | LEXT(flush_mmu_tlb_async) | |
226 | FLUSH_MMU_TLB | |
227 | bx lr | |
228 | ||
229 | /* | |
230 | * void flush_mmu_tlb(void) | |
231 | * | |
232 | * Flush all TLBs | |
233 | */ | |
234 | .text | |
235 | .align 2 | |
236 | .globl EXT(flush_mmu_tlb) | |
237 | LEXT(flush_mmu_tlb) | |
238 | FLUSH_MMU_TLB | |
239 | SYNC_TLB_FLUSH | |
240 | bx lr | |
241 | ||
242 | .macro FLUSH_CORE_TLB | |
243 | mov r0, #0 | |
244 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
245 | .endmacro | |
246 | ||
247 | /* | |
248 | * | |
249 | * void flush_core_tlb_async(void) | |
250 | * | |
251 | * Flush local core's TLB, don't wait for completion | |
252 | */ | |
253 | .text | |
254 | .align 2 | |
255 | .globl EXT(flush_core_tlb_async) | |
256 | LEXT(flush_core_tlb_async) | |
257 | FLUSH_CORE_TLB | |
258 | bx lr | |
5ba3f43e A |
259 | |
260 | /* | |
261 | * void flush_core_tlb(void) | |
262 | * | |
d9a64523 | 263 | * Flush local core's TLB |
5ba3f43e A |
264 | */ |
265 | .text | |
266 | .align 2 | |
267 | .globl EXT(flush_core_tlb) | |
268 | LEXT(flush_core_tlb) | |
d9a64523 A |
269 | FLUSH_CORE_TLB |
270 | SYNC_TLB_FLUSH | |
271 | bx lr | |
272 | ||
273 | .macro FLUSH_MMU_TLB_ENTRY | |
274 | #if __ARM_SMP__ | |
275 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry | |
276 | #else | |
277 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
278 | #endif | |
279 | .endmacro | |
280 | /* | |
281 | * void flush_mmu_tlb_entry_async(uint32_t) | |
282 | * | |
283 | * Flush TLB entry, don't wait for completion | |
284 | */ | |
285 | .text | |
286 | .align 2 | |
287 | .globl EXT(flush_mmu_tlb_entry_async) | |
288 | LEXT(flush_mmu_tlb_entry_async) | |
289 | FLUSH_MMU_TLB_ENTRY | |
290 | bx lr | |
5ba3f43e A |
291 | |
292 | /* | |
293 | * void flush_mmu_tlb_entry(uint32_t) | |
294 | * | |
295 | * Flush TLB entry | |
296 | */ | |
297 | .text | |
298 | .align 2 | |
299 | .globl EXT(flush_mmu_tlb_entry) | |
300 | LEXT(flush_mmu_tlb_entry) | |
d9a64523 A |
301 | FLUSH_MMU_TLB_ENTRY |
302 | SYNC_TLB_FLUSH | |
303 | bx lr | |
304 | ||
305 | .macro FLUSH_MMU_TLB_ENTRIES | |
306 | 1: | |
5ba3f43e | 307 | #if __ARM_SMP__ |
d9a64523 | 308 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry |
5ba3f43e A |
309 | #else |
310 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
311 | #endif | |
d9a64523 A |
312 | add r0, r0, ARM_PGBYTES // Increment to the next page |
313 | cmp r0, r1 // Loop if current address < end address | |
314 | blt 1b | |
315 | .endmacro | |
316 | ||
317 | /* | |
318 | * void flush_mmu_tlb_entries_async(uint32_t, uint32_t) | |
319 | * | |
320 | * Flush TLB entries for address range, don't wait for completion | |
321 | */ | |
322 | .text | |
323 | .align 2 | |
324 | .globl EXT(flush_mmu_tlb_entries_async) | |
325 | LEXT(flush_mmu_tlb_entries_async) | |
326 | FLUSH_MMU_TLB_ENTRIES | |
327 | bx lr | |
5ba3f43e A |
328 | |
329 | /* | |
330 | * void flush_mmu_tlb_entries(uint32_t, uint32_t) | |
331 | * | |
d9a64523 | 332 | * Flush TLB entries for address range |
5ba3f43e A |
333 | */ |
334 | .text | |
335 | .align 2 | |
336 | .globl EXT(flush_mmu_tlb_entries) | |
337 | LEXT(flush_mmu_tlb_entries) | |
d9a64523 A |
338 | FLUSH_MMU_TLB_ENTRIES |
339 | SYNC_TLB_FLUSH | |
340 | bx lr | |
341 | ||
342 | ||
343 | .macro FLUSH_MMU_TLB_MVA_ENTRIES | |
5ba3f43e | 344 | #if __ARM_SMP__ |
d9a64523 | 345 | mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva |
5ba3f43e | 346 | #else |
d9a64523 | 347 | mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva |
5ba3f43e | 348 | #endif |
d9a64523 | 349 | .endmacro |
5ba3f43e | 350 | |
d9a64523 A |
351 | /* |
352 | * void flush_mmu_tlb_mva_entries_async(uint32_t) | |
353 | * | |
354 | * Flush TLB entries for mva, don't wait for completion | |
355 | */ | |
356 | .text | |
357 | .align 2 | |
358 | .globl EXT(flush_mmu_tlb_mva_entries_async) | |
359 | LEXT(flush_mmu_tlb_mva_entries_async) | |
360 | FLUSH_MMU_TLB_MVA_ENTRIES | |
361 | bx lr | |
5ba3f43e A |
362 | |
363 | /* | |
d9a64523 | 364 | * void flush_mmu_tlb_mva_entries_async(uint32_t) |
5ba3f43e A |
365 | * |
366 | * Flush TLB entries for mva | |
367 | */ | |
368 | .text | |
369 | .align 2 | |
370 | .globl EXT(flush_mmu_tlb_mva_entries) | |
371 | LEXT(flush_mmu_tlb_mva_entries) | |
d9a64523 A |
372 | FLUSH_MMU_TLB_MVA_ENTRIES |
373 | SYNC_TLB_FLUSH | |
374 | bx lr | |
375 | ||
376 | .macro FLUSH_MMU_TLB_ASID | |
5ba3f43e | 377 | #if __ARM_SMP__ |
d9a64523 | 378 | mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid |
5ba3f43e | 379 | #else |
d9a64523 | 380 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid |
5ba3f43e | 381 | #endif |
d9a64523 A |
382 | .endmacro |
383 | ||
384 | /* | |
385 | * void flush_mmu_tlb_asid_async(uint32_t) | |
386 | * | |
387 | * Flush TLB entries for asid, don't wait for completion | |
388 | */ | |
389 | .text | |
390 | .align 2 | |
391 | .globl EXT(flush_mmu_tlb_asid_async) | |
392 | LEXT(flush_mmu_tlb_asid_async) | |
393 | FLUSH_MMU_TLB_ASID | |
394 | bx lr | |
5ba3f43e A |
395 | |
396 | /* | |
397 | * void flush_mmu_tlb_asid(uint32_t) | |
398 | * | |
d9a64523 | 399 | * Flush TLB entries for asid |
5ba3f43e A |
400 | */ |
401 | .text | |
402 | .align 2 | |
403 | .globl EXT(flush_mmu_tlb_asid) | |
404 | LEXT(flush_mmu_tlb_asid) | |
d9a64523 A |
405 | FLUSH_MMU_TLB_ASID |
406 | SYNC_TLB_FLUSH | |
407 | bx lr | |
408 | ||
409 | .macro FLUSH_CORE_TLB_ASID | |
5ba3f43e | 410 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid |
d9a64523 A |
411 | .endmacro |
412 | ||
413 | /* | |
414 | * void flush_core_tlb_asid_async(uint32_t) | |
415 | * | |
416 | * Flush local core TLB entries for asid, don't wait for completion | |
417 | */ | |
418 | .text | |
419 | .align 2 | |
420 | .globl EXT(flush_core_tlb_asid_async) | |
421 | LEXT(flush_core_tlb_asid_async) | |
422 | FLUSH_CORE_TLB_ASID | |
423 | bx lr | |
5ba3f43e A |
424 | |
425 | /* | |
426 | * void flush_core_tlb_asid(uint32_t) | |
427 | * | |
d9a64523 | 428 | * Flush local core TLB entries for asid |
5ba3f43e A |
429 | */ |
430 | .text | |
431 | .align 2 | |
432 | .globl EXT(flush_core_tlb_asid) | |
433 | LEXT(flush_core_tlb_asid) | |
d9a64523 A |
434 | FLUSH_CORE_TLB_ASID |
435 | SYNC_TLB_FLUSH | |
436 | bx lr | |
5ba3f43e A |
437 | |
438 | /* | |
439 | * Set MMU Translation Table Base | |
440 | */ | |
441 | .text | |
442 | .align 2 | |
443 | .globl EXT(set_mmu_ttb) | |
444 | LEXT(set_mmu_ttb) | |
445 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
446 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
447 | mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0 | |
448 | dsb ish | |
449 | isb | |
450 | bx lr | |
451 | ||
452 | /* | |
453 | * Set MMU Translation Table Base Alternate | |
454 | */ | |
455 | .text | |
456 | .align 2 | |
457 | .globl EXT(set_mmu_ttb_alternate) | |
458 | LEXT(set_mmu_ttb_alternate) | |
459 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
460 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
461 | mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1 | |
462 | dsb ish | |
463 | isb | |
464 | bx lr | |
465 | ||
466 | /* | |
467 | * Set MMU Translation Table Base | |
468 | */ | |
469 | .text | |
470 | .align 2 | |
471 | .globl EXT(get_mmu_ttb) | |
472 | LEXT(get_mmu_ttb) | |
473 | mrc p15, 0, r0, c2, c0, 0 // translation table to r0 | |
474 | isb | |
475 | bx lr | |
476 | ||
477 | /* | |
478 | * get MMU control register | |
479 | */ | |
480 | .text | |
481 | .align 2 | |
482 | .globl EXT(get_aux_control) | |
483 | LEXT(get_aux_control) | |
484 | mrc p15, 0, r0, c1, c0, 1 // read aux control into r0 | |
485 | bx lr // return old bits in r0 | |
486 | ||
487 | /* | |
488 | * set MMU control register | |
489 | */ | |
490 | .text | |
491 | .align 2 | |
492 | .globl EXT(set_aux_control) | |
493 | LEXT(set_aux_control) | |
494 | mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control | |
495 | isb | |
496 | bx lr | |
497 | ||
498 | ||
499 | /* | |
500 | * get MMU control register | |
501 | */ | |
502 | .text | |
503 | .align 2 | |
504 | .globl EXT(get_mmu_control) | |
505 | LEXT(get_mmu_control) | |
506 | mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0 | |
507 | bx lr // return old bits in r0 | |
508 | ||
509 | /* | |
510 | * set MMU control register | |
511 | */ | |
512 | .text | |
513 | .align 2 | |
514 | .globl EXT(set_mmu_control) | |
515 | LEXT(set_mmu_control) | |
516 | mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control | |
517 | isb | |
518 | bx lr | |
519 | ||
520 | /* | |
521 | * MMU kernel virtual to physical address translation | |
522 | */ | |
523 | .text | |
524 | .align 2 | |
525 | .globl EXT(mmu_kvtop) | |
526 | LEXT(mmu_kvtop) | |
527 | mrs r3, cpsr // Read cpsr | |
528 | cpsid if // Disable FIQ IRQ | |
529 | mov r1, r0 | |
530 | mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR | |
531 | isb | |
532 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
533 | ands r2, r0, #0x1 // Test conversion aborted | |
534 | bne mmu_kvtophys_fail | |
535 | ands r2, r0, #0x2 // Test super section | |
536 | mvnne r2, #0xFF000000 | |
537 | moveq r2, #0x000000FF | |
538 | orreq r2, r2, #0x00000F00 | |
539 | bics r0, r0, r2 // Clear lower bits | |
540 | beq mmu_kvtophys_fail | |
541 | and r1, r1, r2 | |
542 | orr r0, r0, r1 | |
543 | b mmu_kvtophys_ret | |
544 | mmu_kvtophys_fail: | |
545 | mov r0, #0 | |
546 | mmu_kvtophys_ret: | |
547 | msr cpsr, r3 // Restore cpsr | |
548 | bx lr | |
549 | ||
550 | /* | |
551 | * MMU user virtual to physical address translation | |
552 | */ | |
553 | .text | |
554 | .align 2 | |
555 | .globl EXT(mmu_uvtop) | |
556 | LEXT(mmu_uvtop) | |
557 | mrs r3, cpsr // Read cpsr | |
558 | cpsid if // Disable FIQ IRQ | |
559 | mov r1, r0 | |
560 | mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR | |
561 | isb | |
562 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
563 | ands r2, r0, #0x1 // Test conversion aborted | |
564 | bne mmu_uvtophys_fail | |
565 | ands r2, r0, #0x2 // Test super section | |
566 | mvnne r2, #0xFF000000 | |
567 | moveq r2, #0x000000FF | |
568 | orreq r2, r2, #0x00000F00 | |
569 | bics r0, r0, r2 // Clear lower bits | |
570 | beq mmu_uvtophys_fail | |
571 | and r1, r1, r2 | |
572 | orr r0, r0, r1 | |
573 | b mmu_uvtophys_ret | |
574 | mmu_uvtophys_fail: | |
575 | mov r0, #0 | |
576 | mmu_uvtophys_ret: | |
577 | msr cpsr, r3 // Restore cpsr | |
578 | bx lr | |
579 | ||
580 | /* | |
581 | * MMU kernel virtual to physical address preflight write access | |
582 | */ | |
583 | .text | |
584 | .align 2 | |
585 | .globl EXT(mmu_kvtop_wpreflight) | |
586 | LEXT(mmu_kvtop_wpreflight) | |
587 | mrs r3, cpsr // Read cpsr | |
588 | cpsid if // Disable FIQ IRQ | |
589 | mov r1, r0 | |
590 | mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW | |
591 | isb | |
592 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
593 | ands r2, r0, #0x1 // Test conversion aborted | |
594 | bne mmu_kvtophys_wpreflight_fail | |
595 | ands r2, r0, #0x2 // Test super section | |
596 | mvnne r2, #0xFF000000 | |
597 | moveq r2, #0x000000FF | |
598 | orreq r2, r2, #0x00000F00 | |
599 | bics r0, r0, r2 // Clear lower bits | |
600 | beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits | |
601 | and r1, r1, r2 | |
602 | orr r0, r0, r1 | |
603 | b mmu_kvtophys_wpreflight_ret | |
604 | mmu_kvtophys_wpreflight_fail: | |
605 | mov r0, #0 | |
606 | mmu_kvtophys_wpreflight_ret: | |
607 | msr cpsr, r3 // Restore cpsr | |
608 | bx lr | |
609 | ||
610 | /* | |
611 | * set context id register | |
612 | */ | |
613 | /* | |
614 | * set context id register | |
615 | */ | |
616 | .text | |
617 | .align 2 | |
618 | .globl EXT(set_context_id) | |
619 | LEXT(set_context_id) | |
620 | mcr p15, 0, r0, c13, c0, 1 | |
621 | isb | |
622 | bx lr | |
623 | ||
cb323159 A |
624 | /* |
625 | * arg0: prefix of the external validator function (copyin or copyout) | |
626 | * arg1: 0-based index of highest argument register that must be preserved | |
627 | */ | |
628 | .macro COPYIO_VALIDATE | |
629 | /* call NAME_validate to check the arguments */ | |
630 | push {r0-r$1, r7, lr} | |
631 | add r7, sp, #(($1 + 1) * 4) | |
632 | blx EXT($0_validate) | |
633 | cmp r0, #0 | |
634 | addne sp, #(($1 + 1) * 4) | |
635 | popne {r7, pc} | |
636 | pop {r0-r$1, r7, lr} | |
637 | .endmacro | |
638 | ||
5ba3f43e A |
639 | |
640 | #define COPYIO_SET_RECOVER() \ | |
641 | /* set recovery address */ ;\ | |
642 | stmfd sp!, { r4, r5, r6 } ;\ | |
643 | adr r3, copyio_error ;\ | |
644 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
645 | ldr r4, [r12, TH_RECOVER] ;\ | |
646 | str r3, [r12, TH_RECOVER] | |
647 | ||
d9a64523 A |
648 | #define COPYIO_TRY_KERNEL() \ |
649 | /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\ | |
650 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\ | |
651 | ldr r3, [r12, ACT_MAP] ;\ | |
652 | ldr r3, [r3, MAP_PMAP] ;\ | |
653 | LOAD_ADDR(ip, kernel_pmap_store) ;\ | |
654 | cmp r3, ip ;\ | |
655 | beq copyio_kern_body | |
656 | ||
5ba3f43e A |
657 | #if __ARM_USER_PROTECT__ |
658 | #define COPYIO_MAP_USER() \ | |
659 | /* disable interrupts to prevent expansion to 2GB at L1 ;\ | |
660 | * between loading ttep and storing it in ttbr0.*/ ;\ | |
661 | mrs r5, cpsr ;\ | |
662 | cpsid if ;\ | |
663 | ldr r3, [r12, ACT_UPTW_TTB] ;\ | |
664 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
665 | msr cpsr, r5 ;\ | |
666 | ldr r3, [r12, ACT_ASID] ;\ | |
667 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
668 | isb | |
669 | #else | |
670 | #define COPYIO_MAP_USER() | |
671 | #endif | |
672 | ||
d9a64523 | 673 | #define COPYIO_HEADER() ;\ |
5ba3f43e A |
674 | /* test for zero len */ ;\ |
675 | cmp r2, #0 ;\ | |
676 | moveq r0, #0 ;\ | |
677 | bxeq lr | |
678 | ||
679 | .macro COPYIO_BODY | |
680 | /* if len is less than 16 bytes, just do a simple copy */ | |
681 | cmp r2, #16 | |
682 | blt L$0_bytewise | |
683 | /* test for src and dest of the same word alignment */ | |
684 | orr r3, r0, r1 | |
685 | tst r3, #3 | |
686 | bne L$0_bytewise | |
687 | L$0_wordwise: | |
688 | sub r2, r2, #16 | |
689 | L$0_wordwise_loop: | |
690 | /* 16 bytes at a time */ | |
691 | ldmia r0!, { r3, r5, r6, r12 } | |
692 | stmia r1!, { r3, r5, r6, r12 } | |
693 | subs r2, r2, #16 | |
694 | bge L$0_wordwise_loop | |
695 | /* fixup the len and test for completion */ | |
696 | adds r2, r2, #16 | |
697 | beq L$0_noerror | |
698 | L$0_bytewise: | |
699 | /* copy 2 bytes at a time */ | |
700 | subs r2, r2, #2 | |
701 | ldrb r3, [r0], #1 | |
702 | ldrbpl r12, [r0], #1 | |
703 | strb r3, [r1], #1 | |
704 | strbpl r12, [r1], #1 | |
705 | bhi L$0_bytewise | |
706 | L$0_noerror: | |
707 | mov r0, #0 | |
708 | .endmacro | |
709 | ||
710 | #if __ARM_USER_PROTECT__ | |
711 | #define COPYIO_UNMAP_USER() \ | |
712 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
713 | ldr r3, [r12, ACT_KPTW_TTB] ;\ | |
714 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
715 | mov r3, #0 ;\ | |
716 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
717 | isb | |
718 | #else | |
719 | #define COPYIO_UNMAP_USER() \ | |
720 | mrc p15, 0, r12, c13, c0, 4 | |
721 | #endif | |
722 | ||
723 | #define COPYIO_RESTORE_RECOVER() \ | |
724 | /* restore the recovery address */ ;\ | |
725 | str r4, [r12, TH_RECOVER] ;\ | |
726 | ldmfd sp!, { r4, r5, r6 } | |
727 | ||
728 | /* | |
729 | * int copyinstr( | |
730 | * const user_addr_t user_addr, | |
731 | * char *kernel_addr, | |
732 | * vm_size_t max, | |
733 | * vm_size_t *actual) | |
734 | */ | |
735 | .text | |
736 | .align 2 | |
737 | .globl EXT(copyinstr) | |
738 | LEXT(copyinstr) | |
d9a64523 A |
739 | cmp r2, #0 |
740 | moveq r0, #ENAMETOOLONG | |
741 | moveq r12, #0 | |
742 | streq r12, [r3] | |
743 | bxeq lr | |
cb323159 | 744 | COPYIO_VALIDATE copyin_user, 3 |
5ba3f43e A |
745 | stmfd sp!, { r4, r5, r6 } |
746 | ||
747 | mov r6, r3 | |
5ba3f43e A |
748 | adr r3, copyinstr_error // Get address for recover |
749 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
750 | ldr r4, [r12, TH_RECOVER] ;\ | |
751 | str r3, [r12, TH_RECOVER] | |
752 | COPYIO_MAP_USER() | |
753 | mov r12, #0 // Number of bytes copied so far | |
5ba3f43e A |
754 | copyinstr_loop: |
755 | ldrb r3, [r0], #1 // Load a byte from the source (user) | |
756 | strb r3, [r1], #1 // Store a byte to the destination (kernel) | |
757 | add r12, r12, #1 | |
758 | cmp r3, #0 | |
759 | beq copyinstr_done | |
760 | cmp r12, r2 // Room to copy more bytes? | |
761 | bne copyinstr_loop | |
762 | // | |
763 | // Ran out of space in the destination buffer, so return ENAMETOOLONG. | |
764 | // | |
765 | copyinstr_too_long: | |
766 | mov r3, #ENAMETOOLONG | |
767 | copyinstr_done: | |
768 | // | |
769 | // When we get here, we have finished copying the string. We came here from | |
d9a64523 | 770 | // either the "beq copyinstr_done" above, in which case r3 == 0 (which is also |
5ba3f43e | 771 | // the function result for success), or falling through from copyinstr_too_long, |
d9a64523 | 772 | // in which case r3 == ENAMETOOLONG. |
5ba3f43e A |
773 | // |
774 | str r12, [r6] // Save the count for actual | |
775 | mov r0, r3 // Return error code from r3 | |
776 | copyinstr_exit: | |
777 | COPYIO_UNMAP_USER() | |
778 | str r4, [r12, TH_RECOVER] | |
5ba3f43e A |
779 | ldmfd sp!, { r4, r5, r6 } |
780 | bx lr | |
781 | ||
782 | copyinstr_error: | |
783 | /* set error, exit routine */ | |
784 | mov r0, #EFAULT | |
785 | b copyinstr_exit | |
786 | ||
5ba3f43e A |
787 | /* |
788 | * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
789 | */ | |
790 | .text | |
791 | .align 2 | |
792 | .globl EXT(copyin) | |
793 | LEXT(copyin) | |
d9a64523 | 794 | COPYIO_HEADER() |
cb323159 | 795 | COPYIO_VALIDATE copyin, 2 |
d9a64523 | 796 | COPYIO_TRY_KERNEL() |
5ba3f43e A |
797 | COPYIO_SET_RECOVER() |
798 | COPYIO_MAP_USER() | |
799 | COPYIO_BODY copyin | |
800 | COPYIO_UNMAP_USER() | |
801 | COPYIO_RESTORE_RECOVER() | |
802 | bx lr | |
803 | ||
804 | /* | |
805 | * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
806 | */ | |
807 | .text | |
808 | .align 2 | |
809 | .globl EXT(copyout) | |
810 | LEXT(copyout) | |
d9a64523 | 811 | COPYIO_HEADER() |
cb323159 | 812 | COPYIO_VALIDATE copyout, 2 |
d9a64523 | 813 | COPYIO_TRY_KERNEL() |
5ba3f43e A |
814 | COPYIO_SET_RECOVER() |
815 | COPYIO_MAP_USER() | |
816 | COPYIO_BODY copyout | |
817 | COPYIO_UNMAP_USER() | |
818 | COPYIO_RESTORE_RECOVER() | |
819 | bx lr | |
820 | ||
821 | ||
822 | /* | |
cb323159 A |
823 | * int copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr) |
824 | * r0: user_addr | |
825 | * r1: kernel_addr | |
826 | */ | |
827 | .text | |
828 | .align 2 | |
829 | .globl EXT(copyin_atomic32) | |
830 | LEXT(copyin_atomic32) | |
831 | tst r0, #3 // Test alignment of user address | |
832 | bne 2f | |
833 | ||
834 | mov r2, #4 | |
835 | COPYIO_VALIDATE copyin_user, 1 | |
836 | COPYIO_SET_RECOVER() | |
837 | COPYIO_MAP_USER() | |
838 | ||
839 | ldr r2, [r0] // Load word from user | |
840 | str r2, [r1] // Store to kernel_addr | |
841 | mov r0, #0 // Success | |
842 | ||
843 | COPYIO_UNMAP_USER() | |
844 | COPYIO_RESTORE_RECOVER() | |
845 | bx lr | |
846 | 2: // misaligned copyin | |
847 | mov r0, #EINVAL | |
848 | bx lr | |
849 | ||
850 | /* | |
851 | * int copyin_atomic32_wait_if_equals(const char *src, uint32_t value) | |
852 | * r0: user_addr | |
853 | * r1: value | |
854 | */ | |
855 | .text | |
856 | .align 2 | |
857 | .globl EXT(copyin_atomic32_wait_if_equals) | |
858 | LEXT(copyin_atomic32_wait_if_equals) | |
859 | tst r0, #3 // Test alignment of user address | |
860 | bne 2f | |
861 | ||
862 | mov r2, r0 | |
863 | mov r3, #4 | |
864 | COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3) | |
865 | COPYIO_SET_RECOVER() | |
866 | COPYIO_MAP_USER() | |
867 | ||
868 | ldrex r2, [r0] | |
869 | cmp r2, r1 | |
870 | movne r0, ESTALE | |
871 | bne 1f | |
872 | mov r0, #0 | |
873 | wfe | |
874 | 1: | |
875 | clrex | |
876 | ||
877 | COPYIO_UNMAP_USER() | |
878 | COPYIO_RESTORE_RECOVER() | |
879 | bx lr | |
880 | 2: // misaligned copyin | |
881 | mov r0, #EINVAL | |
882 | bx lr | |
883 | ||
884 | /* | |
885 | * int copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr) | |
886 | * r0: user_addr | |
887 | * r1: kernel_addr | |
5ba3f43e A |
888 | */ |
889 | .text | |
890 | .align 2 | |
cb323159 A |
891 | .globl EXT(copyin_atomic64) |
892 | LEXT(copyin_atomic64) | |
893 | tst r0, #7 // Test alignment of user address | |
894 | bne 2f | |
5ba3f43e | 895 | |
cb323159 A |
896 | mov r2, #8 |
897 | COPYIO_VALIDATE copyin_user, 1 | |
5ba3f43e A |
898 | COPYIO_SET_RECOVER() |
899 | COPYIO_MAP_USER() | |
900 | ||
cb323159 A |
901 | 1: // ldrex/strex retry loop |
902 | ldrexd r2, r3, [r0] // Load double word from user | |
903 | strexd r5, r2, r3, [r0] // (the COPYIO_*() macros make r5 safe to use as a scratch register here) | |
904 | cmp r5, #0 | |
905 | bne 1b | |
5ba3f43e A |
906 | stm r1, {r2, r3} // Store to kernel_addr |
907 | mov r0, #0 // Success | |
908 | ||
909 | COPYIO_UNMAP_USER() | |
910 | COPYIO_RESTORE_RECOVER() | |
911 | bx lr | |
cb323159 | 912 | 2: // misaligned copyin |
5ba3f43e A |
913 | mov r0, #EINVAL |
914 | bx lr | |
5ba3f43e A |
915 | |
916 | ||
917 | copyio_error: | |
918 | mov r0, #EFAULT | |
919 | COPYIO_UNMAP_USER() | |
920 | str r4, [r12, TH_RECOVER] | |
921 | ldmfd sp!, { r4, r5, r6 } | |
922 | bx lr | |
923 | ||
cb323159 A |
924 | |
925 | /* | |
926 | * int copyout_atomic32(uint32_t value, user_addr_t user_addr) | |
927 | * r0: value | |
928 | * r1: user_addr | |
929 | */ | |
930 | .text | |
931 | .align 2 | |
932 | .globl EXT(copyout_atomic32) | |
933 | LEXT(copyout_atomic32) | |
934 | tst r1, #3 // Test alignment of user address | |
935 | bne 2f | |
936 | ||
937 | mov r2, r1 | |
938 | mov r3, #4 | |
939 | COPYIO_VALIDATE copyio_user, 1 // validate user address (uses r2, r3) | |
940 | COPYIO_SET_RECOVER() | |
941 | COPYIO_MAP_USER() | |
942 | ||
943 | str r0, [r1] // Store word to user | |
944 | mov r0, #0 // Success | |
945 | ||
946 | COPYIO_UNMAP_USER() | |
947 | COPYIO_RESTORE_RECOVER() | |
948 | bx lr | |
949 | 2: // misaligned copyout | |
950 | mov r0, #EINVAL | |
951 | bx lr | |
952 | ||
953 | ||
954 | /* | |
955 | * int copyout_atomic64(uint64_t value, user_addr_t user_addr) | |
956 | * r0, r1: value | |
957 | * r2: user_addr | |
958 | */ | |
959 | .text | |
960 | .align 2 | |
961 | .globl EXT(copyout_atomic64) | |
962 | LEXT(copyout_atomic64) | |
963 | tst r2, #7 // Test alignment of user address | |
964 | bne 2f | |
965 | ||
966 | mov r3, #8 | |
967 | COPYIO_VALIDATE copyio_user, 2 // validate user address (uses r2, r3) | |
968 | COPYIO_SET_RECOVER() | |
969 | COPYIO_MAP_USER() | |
970 | ||
971 | 1: // ldrex/strex retry loop | |
972 | ldrexd r4, r5, [r2] | |
973 | strexd r3, r0, r1, [r2] // Atomically store double word to user | |
974 | cmp r3, #0 | |
975 | bne 1b | |
976 | ||
977 | mov r0, #0 // Success | |
978 | ||
979 | COPYIO_UNMAP_USER() | |
980 | COPYIO_RESTORE_RECOVER() | |
981 | bx lr | |
982 | 2: // misaligned copyout | |
983 | mov r0, #EINVAL | |
984 | bx lr | |
985 | ||
986 | ||
5ba3f43e A |
987 | /* |
988 | * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
989 | */ | |
990 | .text | |
991 | .align 2 | |
992 | .globl EXT(copyin_kern) | |
993 | LEXT(copyin_kern) | |
d9a64523 A |
994 | COPYIO_HEADER() |
995 | b copyio_kern_body | |
5ba3f43e A |
996 | |
997 | /* | |
998 | * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
999 | */ | |
1000 | .text | |
1001 | .align 2 | |
1002 | .globl EXT(copyout_kern) | |
1003 | LEXT(copyout_kern) | |
d9a64523 A |
1004 | COPYIO_HEADER() |
1005 | b copyio_kern_body | |
5ba3f43e | 1006 | |
d9a64523 | 1007 | copyio_kern_body: |
5ba3f43e A |
1008 | stmfd sp!, { r5, r6 } |
1009 | COPYIO_BODY copyio_kernel | |
1010 | ldmfd sp!, { r5, r6 } | |
1011 | bx lr | |
1012 | ||
1013 | /* | |
1014 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr) | |
1015 | * | |
1016 | * Safely copy eight bytes (the fixed top of an ARM frame) from | |
1017 | * either user or kernel memory. | |
1018 | */ | |
1019 | .text | |
1020 | .align 2 | |
1021 | .globl EXT(copyinframe) | |
1022 | LEXT(copyinframe) | |
1023 | COPYIO_SET_RECOVER() | |
1024 | COPYIO_MAP_USER() | |
1025 | ldmia r0, {r2, r3} | |
1026 | stmia r1, {r2, r3} | |
1027 | b Lcopyin_noerror | |
1028 | ||
1029 | /* | |
1030 | * uint32_t arm_debug_read_dscr(void) | |
1031 | */ | |
1032 | .text | |
1033 | .align 2 | |
1034 | .globl EXT(arm_debug_read_dscr) | |
1035 | LEXT(arm_debug_read_dscr) | |
1036 | #if __ARM_DEBUG__ >= 6 | |
1037 | mrc p14, 0, r0, c0, c1 | |
1038 | #else | |
1039 | mov r0, #0 | |
1040 | #endif | |
1041 | bx lr | |
1042 | ||
1043 | /* | |
1044 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
1045 | * | |
1046 | * Set debug registers to match the current thread state | |
1047 | * (NULL to disable). Assume 6 breakpoints and 2 | |
1048 | * watchpoints, since that has been the case in all cores | |
1049 | * thus far. | |
1050 | */ | |
1051 | .text | |
1052 | .align 2 | |
1053 | .globl EXT(arm_debug_set_cp14) | |
1054 | LEXT(arm_debug_set_cp14) | |
1055 | #if __ARM_DEBUG__ >= 6 | |
1056 | mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW | |
1057 | ldr r2, [r1, ACT_CPUDATAP] // Get current cpu | |
1058 | str r0, [r2, CPU_USER_DEBUG] // Set current user debug | |
1059 | ||
1060 | // Lock the debug registers | |
1061 | movw ip, #0xCE55 | |
1062 | movt ip, #0xC5AC | |
1063 | mcr p14, 0, ip, c1, c0, 4 | |
1064 | ||
1065 | // enable monitor mode (needed to set and use debug registers) | |
1066 | mrc p14, 0, ip, c0, c1, 0 | |
1067 | orr ip, ip, #0x8000 // set MDBGen = 1 | |
1068 | #if __ARM_DEBUG__ >= 7 | |
1069 | mcr p14, 0, ip, c0, c2, 2 | |
1070 | #else | |
1071 | mcr p14, 0, ip, c0, c1, 0 | |
1072 | #endif | |
1073 | // first turn off all breakpoints/watchpoints | |
1074 | mov r1, #0 | |
1075 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
1076 | mcr p14, 0, r1, c0, c1, 5 // BCR1 | |
1077 | mcr p14, 0, r1, c0, c2, 5 // BCR2 | |
1078 | mcr p14, 0, r1, c0, c3, 5 // BCR3 | |
1079 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
1080 | mcr p14, 0, r1, c0, c5, 5 // BCR5 | |
1081 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
1082 | mcr p14, 0, r1, c0, c1, 7 // WCR1 | |
1083 | // if (debug_state == NULL) disable monitor mode and return; | |
1084 | cmp r0, #0 | |
1085 | biceq ip, ip, #0x8000 // set MDBGen = 0 | |
1086 | #if __ARM_DEBUG__ >= 7 | |
1087 | mcreq p14, 0, ip, c0, c2, 2 | |
1088 | #else | |
1089 | mcreq p14, 0, ip, c0, c1, 0 | |
1090 | #endif | |
1091 | bxeq lr | |
1092 | ldmia r0!, {r1, r2, r3, ip} | |
1093 | mcr p14, 0, r1, c0, c0, 4 // BVR0 | |
1094 | mcr p14, 0, r2, c0, c1, 4 // BVR1 | |
1095 | mcr p14, 0, r3, c0, c2, 4 // BVR2 | |
1096 | mcr p14, 0, ip, c0, c3, 4 // BVR3 | |
1097 | ldmia r0!, {r1, r2} | |
1098 | mcr p14, 0, r1, c0, c4, 4 // BVR4 | |
1099 | mcr p14, 0, r2, c0, c5, 4 // BVR5 | |
1100 | add r0, r0, #40 // advance to bcr[0] | |
1101 | ldmia r0!, {r1, r2, r3, ip} | |
1102 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
1103 | mcr p14, 0, r2, c0, c1, 5 // BCR1 | |
1104 | mcr p14, 0, r3, c0, c2, 5 // BCR2 | |
1105 | mcr p14, 0, ip, c0, c3, 5 // BCR3 | |
1106 | ldmia r0!, {r1, r2} | |
1107 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
1108 | mcr p14, 0, r2, c0, c5, 5 // BCR5 | |
1109 | add r0, r0, #40 // advance to wvr[0] | |
1110 | ldmia r0!, {r1, r2} | |
1111 | mcr p14, 0, r1, c0, c0, 6 // WVR0 | |
1112 | mcr p14, 0, r2, c0, c1, 6 // WVR1 | |
1113 | add r0, r0, #56 // advance to wcr[0] | |
1114 | ldmia r0!, {r1, r2} | |
1115 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
1116 | mcr p14, 0, r2, c0, c1, 7 // WCR1 | |
1117 | ||
1118 | // Unlock debug registers | |
1119 | mov ip, #0 | |
1120 | mcr p14, 0, ip, c1, c0, 4 | |
1121 | #endif | |
1122 | bx lr | |
1123 | ||
1124 | /* | |
1125 | * void fiq_context_init(boolean_t enable_fiq) | |
1126 | */ | |
1127 | .text | |
1128 | .align 2 | |
1129 | .globl EXT(fiq_context_init) | |
1130 | LEXT(fiq_context_init) | |
1131 | mrs r3, cpsr // Save current CPSR | |
1132 | cmp r0, #0 // Test enable_fiq | |
1133 | bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE | |
1134 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1135 | ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data | |
1136 | ||
1137 | #if __ARM_TIME__ | |
1138 | /* Despite the fact that we use the physical timebase | |
1139 | * register as the basis for time on our platforms, we | |
1140 | * end up using the virtual timer in order to manage | |
1141 | * deadlines. This is due to the fact that for our | |
1142 | * current platforms, the interrupt generated by the | |
1143 | * physical timer is not hooked up to anything, and is | |
1144 | * therefore dropped on the floor. Therefore, for | |
1145 | * timers to function they MUST be based on the virtual | |
1146 | * timer. | |
1147 | */ | |
1148 | ||
1149 | mov r0, #1 // Enable Timer | |
1150 | mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL | |
1151 | ||
1152 | /* Enable USER access to the physical timebase (PL0PCTEN). | |
1153 | * The rationale for providing access to the physical | |
1154 | * timebase being that the virtual timebase is broken for | |
1155 | * some platforms. Maintaining the offset ourselves isn't | |
1156 | * expensive, so mandate that the userspace implementation | |
1157 | * do timebase_phys+offset rather than trying to propogate | |
1158 | * all of the informaiton about what works up to USER. | |
1159 | */ | |
1160 | mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0]) | |
1161 | ||
1162 | #else /* ! __ARM_TIME__ */ | |
1163 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled | |
1164 | mov r8, r2 // Load the BootCPUData address | |
1165 | ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address | |
1166 | ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address | |
1167 | ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value | |
1168 | #endif /* __ARM_TIME__ */ | |
1169 | ||
1170 | msr cpsr_c, r3 // Restore saved CPSR | |
1171 | bx lr | |
1172 | ||
1173 | /* | |
1174 | * void reenable_async_aborts(void) | |
1175 | */ | |
1176 | .text | |
1177 | .align 2 | |
1178 | .globl EXT(reenable_async_aborts) | |
1179 | LEXT(reenable_async_aborts) | |
1180 | cpsie a // Re-enable async aborts | |
1181 | bx lr | |
1182 | ||
1183 | /* | |
1184 | * uint64_t ml_get_timebase(void) | |
1185 | */ | |
1186 | .text | |
1187 | .align 2 | |
1188 | .globl EXT(ml_get_timebase) | |
1189 | LEXT(ml_get_timebase) | |
1190 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1191 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1192 | #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ | |
1193 | isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2. | |
1194 | 1: | |
1195 | mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1 | |
1196 | mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0 | |
1197 | mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2 | |
1198 | cmp r1, r2 | |
1199 | bne 1b // Loop until both high values are the same | |
1200 | ||
1201 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1202 | ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to | |
1203 | adds r0, r0, r2 // convert to | |
1204 | ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time | |
1205 | adc r1, r1, r2 // | |
1206 | #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */ | |
1207 | 1: | |
1208 | ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
1209 | ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value | |
1210 | ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
1211 | cmp r1, r2 // Make sure TB has not rolled over | |
1212 | bne 1b | |
1213 | #endif /* __ARM_TIME__ */ | |
1214 | bx lr // return | |
1215 | ||
1216 | ||
1217 | /* | |
1218 | * uint32_t ml_get_decrementer(void) | |
1219 | */ | |
1220 | .text | |
1221 | .align 2 | |
1222 | .globl EXT(ml_get_decrementer) | |
1223 | LEXT(ml_get_decrementer) | |
1224 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1225 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1226 | ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func | |
1227 | cmp r2, #0 | |
1228 | bxne r2 // Call it if there is one | |
1229 | #if __ARM_TIME__ | |
1230 | mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL) | |
1231 | #else | |
1232 | ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value | |
1233 | #endif | |
1234 | bx lr // return | |
1235 | ||
1236 | ||
1237 | /* | |
1238 | * void ml_set_decrementer(uint32_t dec_value) | |
1239 | */ | |
1240 | .text | |
1241 | .align 2 | |
1242 | .globl EXT(ml_set_decrementer) | |
1243 | LEXT(ml_set_decrementer) | |
1244 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1245 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1246 | ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func | |
1247 | cmp r2, #0 | |
1248 | bxne r2 // Call it if there is one | |
1249 | #if __ARM_TIME__ | |
1250 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1251 | mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL) | |
1252 | #else | |
1253 | mrs r2, cpsr // Save current CPSR | |
1254 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled. | |
1255 | mov r12, r0 // Set the DEC value | |
1256 | str r12, [r8, CPU_DECREMENTER] // Store DEC | |
1257 | msr cpsr_c, r2 // Restore saved CPSR | |
1258 | #endif | |
1259 | bx lr | |
1260 | ||
1261 | ||
1262 | /* | |
1263 | * boolean_t ml_get_interrupts_enabled(void) | |
1264 | */ | |
1265 | .text | |
1266 | .align 2 | |
1267 | .globl EXT(ml_get_interrupts_enabled) | |
1268 | LEXT(ml_get_interrupts_enabled) | |
1269 | mrs r2, cpsr | |
1270 | mov r0, #1 | |
1271 | bic r0, r0, r2, lsr #PSR_IRQFb | |
1272 | bx lr | |
1273 | ||
1274 | /* | |
1275 | * Platform Specific Timebase & Decrementer Functions | |
1276 | * | |
1277 | */ | |
1278 | ||
1279 | #if defined(ARM_BOARD_CLASS_S7002) | |
1280 | .text | |
1281 | .align 2 | |
1282 | .globl EXT(fleh_fiq_s7002) | |
1283 | LEXT(fleh_fiq_s7002) | |
1284 | str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt | |
1285 | mvn r13, #0 | |
1286 | str r13, [r8, CPU_DECREMENTER] | |
1287 | b EXT(fleh_dec) | |
1288 | ||
1289 | .text | |
1290 | .align 2 | |
1291 | .globl EXT(s7002_get_decrementer) | |
1292 | LEXT(s7002_get_decrementer) | |
1293 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1294 | add ip, ip, #PMGR_INTERVAL_TMR_OFFSET | |
1295 | ldr r0, [ip] // Get the Decrementer | |
1296 | bx lr | |
1297 | ||
1298 | .text | |
1299 | .align 2 | |
1300 | .globl EXT(s7002_set_decrementer) | |
1301 | LEXT(s7002_set_decrementer) | |
1302 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1303 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1304 | str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer | |
1305 | bx lr | |
1306 | #endif /* defined(ARM_BOARD_CLASS_S7002) */ | |
1307 | ||
1308 | #if defined(ARM_BOARD_CLASS_T8002) | |
1309 | .text | |
1310 | .align 2 | |
1311 | .globl EXT(fleh_fiq_t8002) | |
1312 | LEXT(fleh_fiq_t8002) | |
1313 | mov r13, #kAICTmrIntStat | |
1314 | str r11, [r10, r13] // Clear the decrementer interrupt | |
1315 | mvn r13, #0 | |
1316 | str r13, [r8, CPU_DECREMENTER] | |
1317 | b EXT(fleh_dec) | |
1318 | ||
1319 | .text | |
1320 | .align 2 | |
1321 | .globl EXT(t8002_get_decrementer) | |
1322 | LEXT(t8002_get_decrementer) | |
1323 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1324 | mov r0, #kAICTmrCnt | |
1325 | add ip, ip, r0 | |
1326 | ldr r0, [ip] // Get the Decrementer | |
1327 | bx lr | |
1328 | ||
1329 | .text | |
1330 | .align 2 | |
1331 | .globl EXT(t8002_set_decrementer) | |
1332 | LEXT(t8002_set_decrementer) | |
1333 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1334 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1335 | mov r5, #kAICTmrCnt | |
1336 | str r0, [ip, r5] // Store the new Decrementer | |
1337 | bx lr | |
1338 | #endif /* defined(ARM_BOARD_CLASS_T8002) */ | |
1339 | ||
1340 | LOAD_ADDR_GEN_DEF(kernel_pmap_store) | |
1341 | ||
1342 | #include "globals_asm.h" | |
1343 | ||
1344 | /* vim: set ts=4: */ |