]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm/proc_reg.h> | |
31 | #include <arm/pmap.h> | |
32 | #include <sys/errno.h> | |
33 | #include "assym.s" | |
34 | ||
35 | .align 2 | |
36 | .globl EXT(machine_set_current_thread) | |
37 | LEXT(machine_set_current_thread) | |
38 | mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW | |
39 | ldr r1, [r0, TH_CTH_SELF] | |
40 | mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO | |
41 | and r2, r2, #3 // Extract cpu number | |
42 | orr r1, r1, r2 // | |
43 | mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO | |
44 | ldr r1, [r0, TH_CTH_DATA] | |
45 | mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW | |
46 | bx lr | |
47 | ||
48 | /* | |
49 | * void machine_idle(void) | |
50 | */ | |
51 | .text | |
52 | .align 2 | |
53 | .globl EXT(machine_idle) | |
54 | LEXT(machine_idle) | |
55 | cpsid if // Disable FIQ IRQ | |
56 | mov ip, lr | |
57 | bl EXT(Idle_context) | |
58 | mov lr, ip | |
59 | cpsie if // Enable FIQ IRQ | |
60 | bx lr | |
61 | ||
62 | /* | |
63 | * void cpu_idle_wfi(boolean_t wfi_fast): | |
64 | * cpu_idle is the only function that should call this. | |
65 | */ | |
66 | .text | |
67 | .align 2 | |
68 | .globl EXT(cpu_idle_wfi) | |
69 | LEXT(cpu_idle_wfi) | |
70 | mov r1, #32 | |
71 | mov r2, #1200 | |
72 | cmp r0, #0 | |
73 | beq 3f | |
74 | mov r1, #1 | |
75 | b 2f | |
76 | .align 5 | |
77 | 1: | |
78 | add r0, r0, #1 | |
79 | mov r1, r2 | |
80 | 2: | |
81 | ||
82 | /* | |
83 | * We export the address of the WFI instruction so that it can be patched; this will be | |
84 | * ugly from a debugging perspective. | |
85 | */ | |
86 | ||
87 | #if (__ARM_ARCH__ >= 7) | |
88 | dsb | |
89 | .globl EXT(wfi_inst) | |
90 | LEXT(wfi_inst) | |
91 | wfi | |
92 | #else | |
93 | mcr p15, 0, r0, c7, c10, 4 | |
94 | .globl EXT(wfi_inst) | |
95 | LEXT(wfi_inst) | |
96 | mcr p15, 0, r0, c7, c0, 4 | |
97 | #endif | |
98 | 3: | |
99 | subs r1, r1, #1 | |
100 | bne 3b | |
101 | nop | |
102 | nop | |
103 | nop | |
104 | nop | |
105 | nop | |
106 | cmp r0, #0 | |
107 | beq 1b | |
108 | bx lr | |
109 | ||
110 | .align 2 | |
111 | .globl EXT(timer_grab) | |
112 | LEXT(timer_grab) | |
113 | 0: | |
114 | ldr r2, [r0, TIMER_HIGH] | |
115 | ldr r3, [r0, TIMER_LOW] | |
116 | #if __ARM_SMP__ | |
117 | dmb ish // dmb ish | |
118 | #endif | |
119 | ldr r1, [r0, TIMER_HIGHCHK] | |
120 | cmp r1, r2 | |
121 | bne 0b | |
122 | mov r0, r3 | |
123 | bx lr | |
124 | ||
125 | .align 2 | |
d9a64523 A |
126 | .globl EXT(timer_advance_internal_32) |
127 | LEXT(timer_advance_internal_32) | |
5ba3f43e A |
128 | str r1, [r0, TIMER_HIGHCHK] |
129 | #if __ARM_SMP__ | |
130 | dmb ish // dmb ish | |
131 | #endif | |
132 | str r2, [r0, TIMER_LOW] | |
133 | #if __ARM_SMP__ | |
134 | dmb ish // dmb ish | |
135 | #endif | |
136 | str r1, [r0, TIMER_HIGH] | |
137 | bx lr | |
138 | ||
139 | .align 2 | |
140 | .globl EXT(get_vfp_enabled) | |
141 | LEXT(get_vfp_enabled) | |
142 | #if __ARM_VFP__ | |
143 | fmrx r0, fpexc | |
144 | and r1, r0, #FPEXC_EN // Extact vfp enable previous state | |
145 | mov r0, r1, LSR #FPEXC_EN_BIT // Return 1 if enabled, 0 if disabled | |
146 | #else | |
147 | mov r0, #0 // return false | |
148 | #endif | |
149 | bx lr | |
150 | ||
151 | /* This is no longer useful (but is exported, so this may require kext cleanup). */ | |
152 | .align 2 | |
153 | .globl EXT(enable_kernel_vfp_context) | |
154 | LEXT(enable_kernel_vfp_context) | |
155 | bx lr | |
156 | ||
157 | /* uint32_t get_fpscr(void): | |
158 | * Returns the current state of the FPSCR register. | |
159 | */ | |
160 | .align 2 | |
161 | .globl EXT(get_fpscr) | |
162 | LEXT(get_fpscr) | |
163 | #if __ARM_VFP__ | |
164 | fmrx r0, fpscr | |
165 | #endif | |
166 | bx lr | |
167 | .align 2 | |
168 | .globl EXT(set_fpscr) | |
169 | /* void set_fpscr(uint32_t value): | |
170 | * Set the FPSCR register. | |
171 | */ | |
172 | LEXT(set_fpscr) | |
173 | #if __ARM_VFP__ | |
174 | fmxr fpscr, r0 | |
175 | #else | |
176 | mov r0, #0 | |
177 | #endif | |
178 | bx lr | |
179 | ||
5ba3f43e A |
180 | /* |
181 | * void OSSynchronizeIO(void) | |
182 | */ | |
183 | .text | |
184 | .align 2 | |
185 | .globl EXT(OSSynchronizeIO) | |
186 | LEXT(OSSynchronizeIO) | |
187 | .align 2 | |
188 | dsb | |
189 | bx lr | |
190 | ||
d9a64523 A |
191 | .macro SYNC_TLB_FLUSH |
192 | dsb ish | |
193 | isb | |
194 | .endmacro | |
195 | ||
5ba3f43e | 196 | /* |
d9a64523 | 197 | * void sync_tlb_flush |
5ba3f43e | 198 | * |
d9a64523 | 199 | * Synchronize one or more prior TLB flush operations |
5ba3f43e A |
200 | */ |
201 | .text | |
202 | .align 2 | |
d9a64523 A |
203 | .globl EXT(sync_tlb_flush) |
204 | LEXT(sync_tlb_flush) | |
205 | SYNC_TLB_FLUSH | |
206 | bx lr | |
207 | ||
208 | .macro FLUSH_MMU_TLB | |
5ba3f43e A |
209 | mov r0, #0 |
210 | #if __ARM_SMP__ | |
211 | mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs | |
212 | #else | |
213 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
214 | #endif | |
d9a64523 A |
215 | .endmacro |
216 | ||
217 | /* | |
218 | * void flush_mmu_tlb_async(void) | |
219 | * | |
220 | * Flush all TLBs, don't wait for completion | |
221 | */ | |
222 | .text | |
223 | .align 2 | |
224 | .globl EXT(flush_mmu_tlb_async) | |
225 | LEXT(flush_mmu_tlb_async) | |
226 | FLUSH_MMU_TLB | |
227 | bx lr | |
228 | ||
229 | /* | |
230 | * void flush_mmu_tlb(void) | |
231 | * | |
232 | * Flush all TLBs | |
233 | */ | |
234 | .text | |
235 | .align 2 | |
236 | .globl EXT(flush_mmu_tlb) | |
237 | LEXT(flush_mmu_tlb) | |
238 | FLUSH_MMU_TLB | |
239 | SYNC_TLB_FLUSH | |
240 | bx lr | |
241 | ||
242 | .macro FLUSH_CORE_TLB | |
243 | mov r0, #0 | |
244 | mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB | |
245 | .endmacro | |
246 | ||
247 | /* | |
248 | * | |
249 | * void flush_core_tlb_async(void) | |
250 | * | |
251 | * Flush local core's TLB, don't wait for completion | |
252 | */ | |
253 | .text | |
254 | .align 2 | |
255 | .globl EXT(flush_core_tlb_async) | |
256 | LEXT(flush_core_tlb_async) | |
257 | FLUSH_CORE_TLB | |
258 | bx lr | |
5ba3f43e A |
259 | |
260 | /* | |
261 | * void flush_core_tlb(void) | |
262 | * | |
d9a64523 | 263 | * Flush local core's TLB |
5ba3f43e A |
264 | */ |
265 | .text | |
266 | .align 2 | |
267 | .globl EXT(flush_core_tlb) | |
268 | LEXT(flush_core_tlb) | |
d9a64523 A |
269 | FLUSH_CORE_TLB |
270 | SYNC_TLB_FLUSH | |
271 | bx lr | |
272 | ||
273 | .macro FLUSH_MMU_TLB_ENTRY | |
274 | #if __ARM_SMP__ | |
275 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry | |
276 | #else | |
277 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
278 | #endif | |
279 | .endmacro | |
280 | /* | |
281 | * void flush_mmu_tlb_entry_async(uint32_t) | |
282 | * | |
283 | * Flush TLB entry, don't wait for completion | |
284 | */ | |
285 | .text | |
286 | .align 2 | |
287 | .globl EXT(flush_mmu_tlb_entry_async) | |
288 | LEXT(flush_mmu_tlb_entry_async) | |
289 | FLUSH_MMU_TLB_ENTRY | |
290 | bx lr | |
5ba3f43e A |
291 | |
292 | /* | |
293 | * void flush_mmu_tlb_entry(uint32_t) | |
294 | * | |
295 | * Flush TLB entry | |
296 | */ | |
297 | .text | |
298 | .align 2 | |
299 | .globl EXT(flush_mmu_tlb_entry) | |
300 | LEXT(flush_mmu_tlb_entry) | |
d9a64523 A |
301 | FLUSH_MMU_TLB_ENTRY |
302 | SYNC_TLB_FLUSH | |
303 | bx lr | |
304 | ||
305 | .macro FLUSH_MMU_TLB_ENTRIES | |
306 | 1: | |
5ba3f43e | 307 | #if __ARM_SMP__ |
d9a64523 | 308 | mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry |
5ba3f43e A |
309 | #else |
310 | mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry | |
311 | #endif | |
d9a64523 A |
312 | add r0, r0, ARM_PGBYTES // Increment to the next page |
313 | cmp r0, r1 // Loop if current address < end address | |
314 | blt 1b | |
315 | .endmacro | |
316 | ||
317 | /* | |
318 | * void flush_mmu_tlb_entries_async(uint32_t, uint32_t) | |
319 | * | |
320 | * Flush TLB entries for address range, don't wait for completion | |
321 | */ | |
322 | .text | |
323 | .align 2 | |
324 | .globl EXT(flush_mmu_tlb_entries_async) | |
325 | LEXT(flush_mmu_tlb_entries_async) | |
326 | FLUSH_MMU_TLB_ENTRIES | |
327 | bx lr | |
5ba3f43e A |
328 | |
329 | /* | |
330 | * void flush_mmu_tlb_entries(uint32_t, uint32_t) | |
331 | * | |
d9a64523 | 332 | * Flush TLB entries for address range |
5ba3f43e A |
333 | */ |
334 | .text | |
335 | .align 2 | |
336 | .globl EXT(flush_mmu_tlb_entries) | |
337 | LEXT(flush_mmu_tlb_entries) | |
d9a64523 A |
338 | FLUSH_MMU_TLB_ENTRIES |
339 | SYNC_TLB_FLUSH | |
340 | bx lr | |
341 | ||
342 | ||
343 | .macro FLUSH_MMU_TLB_MVA_ENTRIES | |
5ba3f43e | 344 | #if __ARM_SMP__ |
d9a64523 | 345 | mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva |
5ba3f43e | 346 | #else |
d9a64523 | 347 | mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva |
5ba3f43e | 348 | #endif |
d9a64523 | 349 | .endmacro |
5ba3f43e | 350 | |
d9a64523 A |
351 | /* |
352 | * void flush_mmu_tlb_mva_entries_async(uint32_t) | |
353 | * | |
354 | * Flush TLB entries for mva, don't wait for completion | |
355 | */ | |
356 | .text | |
357 | .align 2 | |
358 | .globl EXT(flush_mmu_tlb_mva_entries_async) | |
359 | LEXT(flush_mmu_tlb_mva_entries_async) | |
360 | FLUSH_MMU_TLB_MVA_ENTRIES | |
361 | bx lr | |
5ba3f43e A |
362 | |
363 | /* | |
d9a64523 | 364 | * void flush_mmu_tlb_mva_entries_async(uint32_t) |
5ba3f43e A |
365 | * |
366 | * Flush TLB entries for mva | |
367 | */ | |
368 | .text | |
369 | .align 2 | |
370 | .globl EXT(flush_mmu_tlb_mva_entries) | |
371 | LEXT(flush_mmu_tlb_mva_entries) | |
d9a64523 A |
372 | FLUSH_MMU_TLB_MVA_ENTRIES |
373 | SYNC_TLB_FLUSH | |
374 | bx lr | |
375 | ||
376 | .macro FLUSH_MMU_TLB_ASID | |
5ba3f43e | 377 | #if __ARM_SMP__ |
d9a64523 | 378 | mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid |
5ba3f43e | 379 | #else |
d9a64523 | 380 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid |
5ba3f43e | 381 | #endif |
d9a64523 A |
382 | .endmacro |
383 | ||
384 | /* | |
385 | * void flush_mmu_tlb_asid_async(uint32_t) | |
386 | * | |
387 | * Flush TLB entries for asid, don't wait for completion | |
388 | */ | |
389 | .text | |
390 | .align 2 | |
391 | .globl EXT(flush_mmu_tlb_asid_async) | |
392 | LEXT(flush_mmu_tlb_asid_async) | |
393 | FLUSH_MMU_TLB_ASID | |
394 | bx lr | |
5ba3f43e A |
395 | |
396 | /* | |
397 | * void flush_mmu_tlb_asid(uint32_t) | |
398 | * | |
d9a64523 | 399 | * Flush TLB entries for asid |
5ba3f43e A |
400 | */ |
401 | .text | |
402 | .align 2 | |
403 | .globl EXT(flush_mmu_tlb_asid) | |
404 | LEXT(flush_mmu_tlb_asid) | |
d9a64523 A |
405 | FLUSH_MMU_TLB_ASID |
406 | SYNC_TLB_FLUSH | |
407 | bx lr | |
408 | ||
409 | .macro FLUSH_CORE_TLB_ASID | |
5ba3f43e | 410 | mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid |
d9a64523 A |
411 | .endmacro |
412 | ||
413 | /* | |
414 | * void flush_core_tlb_asid_async(uint32_t) | |
415 | * | |
416 | * Flush local core TLB entries for asid, don't wait for completion | |
417 | */ | |
418 | .text | |
419 | .align 2 | |
420 | .globl EXT(flush_core_tlb_asid_async) | |
421 | LEXT(flush_core_tlb_asid_async) | |
422 | FLUSH_CORE_TLB_ASID | |
423 | bx lr | |
5ba3f43e A |
424 | |
425 | /* | |
426 | * void flush_core_tlb_asid(uint32_t) | |
427 | * | |
d9a64523 | 428 | * Flush local core TLB entries for asid |
5ba3f43e A |
429 | */ |
430 | .text | |
431 | .align 2 | |
432 | .globl EXT(flush_core_tlb_asid) | |
433 | LEXT(flush_core_tlb_asid) | |
d9a64523 A |
434 | FLUSH_CORE_TLB_ASID |
435 | SYNC_TLB_FLUSH | |
436 | bx lr | |
5ba3f43e A |
437 | |
438 | /* | |
439 | * Set MMU Translation Table Base | |
440 | */ | |
441 | .text | |
442 | .align 2 | |
443 | .globl EXT(set_mmu_ttb) | |
444 | LEXT(set_mmu_ttb) | |
445 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
446 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
447 | mcr p15, 0, r0, c2, c0, 0 // write r0 to translation table 0 | |
448 | dsb ish | |
449 | isb | |
450 | bx lr | |
451 | ||
452 | /* | |
453 | * Set MMU Translation Table Base Alternate | |
454 | */ | |
455 | .text | |
456 | .align 2 | |
457 | .globl EXT(set_mmu_ttb_alternate) | |
458 | LEXT(set_mmu_ttb_alternate) | |
459 | orr r0, r0, #(TTBR_SETUP & 0xFF) // Setup PTWs memory attribute | |
460 | orr r0, r0, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
461 | mcr p15, 0, r0, c2, c0, 1 // write r0 to translation table 1 | |
462 | dsb ish | |
463 | isb | |
464 | bx lr | |
465 | ||
466 | /* | |
467 | * Set MMU Translation Table Base | |
468 | */ | |
469 | .text | |
470 | .align 2 | |
471 | .globl EXT(get_mmu_ttb) | |
472 | LEXT(get_mmu_ttb) | |
473 | mrc p15, 0, r0, c2, c0, 0 // translation table to r0 | |
474 | isb | |
475 | bx lr | |
476 | ||
477 | /* | |
478 | * get MMU control register | |
479 | */ | |
480 | .text | |
481 | .align 2 | |
482 | .globl EXT(get_aux_control) | |
483 | LEXT(get_aux_control) | |
484 | mrc p15, 0, r0, c1, c0, 1 // read aux control into r0 | |
485 | bx lr // return old bits in r0 | |
486 | ||
487 | /* | |
488 | * set MMU control register | |
489 | */ | |
490 | .text | |
491 | .align 2 | |
492 | .globl EXT(set_aux_control) | |
493 | LEXT(set_aux_control) | |
494 | mcr p15, 0, r0, c1, c0, 1 // write r0 back to aux control | |
495 | isb | |
496 | bx lr | |
497 | ||
498 | ||
499 | /* | |
500 | * get MMU control register | |
501 | */ | |
502 | .text | |
503 | .align 2 | |
504 | .globl EXT(get_mmu_control) | |
505 | LEXT(get_mmu_control) | |
506 | mrc p15, 0, r0, c1, c0, 0 // read mmu control into r0 | |
507 | bx lr // return old bits in r0 | |
508 | ||
509 | /* | |
510 | * set MMU control register | |
511 | */ | |
512 | .text | |
513 | .align 2 | |
514 | .globl EXT(set_mmu_control) | |
515 | LEXT(set_mmu_control) | |
516 | mcr p15, 0, r0, c1, c0, 0 // write r0 back to mmu control | |
517 | isb | |
518 | bx lr | |
519 | ||
520 | /* | |
521 | * MMU kernel virtual to physical address translation | |
522 | */ | |
523 | .text | |
524 | .align 2 | |
525 | .globl EXT(mmu_kvtop) | |
526 | LEXT(mmu_kvtop) | |
527 | mrs r3, cpsr // Read cpsr | |
528 | cpsid if // Disable FIQ IRQ | |
529 | mov r1, r0 | |
530 | mcr p15, 0, r1, c7, c8, 0 // Write V2PCWPR | |
531 | isb | |
532 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
533 | ands r2, r0, #0x1 // Test conversion aborted | |
534 | bne mmu_kvtophys_fail | |
535 | ands r2, r0, #0x2 // Test super section | |
536 | mvnne r2, #0xFF000000 | |
537 | moveq r2, #0x000000FF | |
538 | orreq r2, r2, #0x00000F00 | |
539 | bics r0, r0, r2 // Clear lower bits | |
540 | beq mmu_kvtophys_fail | |
541 | and r1, r1, r2 | |
542 | orr r0, r0, r1 | |
543 | b mmu_kvtophys_ret | |
544 | mmu_kvtophys_fail: | |
545 | mov r0, #0 | |
546 | mmu_kvtophys_ret: | |
547 | msr cpsr, r3 // Restore cpsr | |
548 | bx lr | |
549 | ||
550 | /* | |
551 | * MMU user virtual to physical address translation | |
552 | */ | |
553 | .text | |
554 | .align 2 | |
555 | .globl EXT(mmu_uvtop) | |
556 | LEXT(mmu_uvtop) | |
557 | mrs r3, cpsr // Read cpsr | |
558 | cpsid if // Disable FIQ IRQ | |
559 | mov r1, r0 | |
560 | mcr p15, 0, r1, c7, c8, 2 // Write V2PCWUR | |
561 | isb | |
562 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
563 | ands r2, r0, #0x1 // Test conversion aborted | |
564 | bne mmu_uvtophys_fail | |
565 | ands r2, r0, #0x2 // Test super section | |
566 | mvnne r2, #0xFF000000 | |
567 | moveq r2, #0x000000FF | |
568 | orreq r2, r2, #0x00000F00 | |
569 | bics r0, r0, r2 // Clear lower bits | |
570 | beq mmu_uvtophys_fail | |
571 | and r1, r1, r2 | |
572 | orr r0, r0, r1 | |
573 | b mmu_uvtophys_ret | |
574 | mmu_uvtophys_fail: | |
575 | mov r0, #0 | |
576 | mmu_uvtophys_ret: | |
577 | msr cpsr, r3 // Restore cpsr | |
578 | bx lr | |
579 | ||
580 | /* | |
581 | * MMU kernel virtual to physical address preflight write access | |
582 | */ | |
583 | .text | |
584 | .align 2 | |
585 | .globl EXT(mmu_kvtop_wpreflight) | |
586 | LEXT(mmu_kvtop_wpreflight) | |
587 | mrs r3, cpsr // Read cpsr | |
588 | cpsid if // Disable FIQ IRQ | |
589 | mov r1, r0 | |
590 | mcr p15, 0, r1, c7, c8, 1 // Write V2PCWPW | |
591 | isb | |
592 | mrc p15, 0, r0, c7, c4, 0 // Read PAR | |
593 | ands r2, r0, #0x1 // Test conversion aborted | |
594 | bne mmu_kvtophys_wpreflight_fail | |
595 | ands r2, r0, #0x2 // Test super section | |
596 | mvnne r2, #0xFF000000 | |
597 | moveq r2, #0x000000FF | |
598 | orreq r2, r2, #0x00000F00 | |
599 | bics r0, r0, r2 // Clear lower bits | |
600 | beq mmu_kvtophys_wpreflight_fail // Sanity check: successful access must deliver zero low bits | |
601 | and r1, r1, r2 | |
602 | orr r0, r0, r1 | |
603 | b mmu_kvtophys_wpreflight_ret | |
604 | mmu_kvtophys_wpreflight_fail: | |
605 | mov r0, #0 | |
606 | mmu_kvtophys_wpreflight_ret: | |
607 | msr cpsr, r3 // Restore cpsr | |
608 | bx lr | |
609 | ||
610 | /* | |
611 | * set context id register | |
612 | */ | |
613 | /* | |
614 | * set context id register | |
615 | */ | |
616 | .text | |
617 | .align 2 | |
618 | .globl EXT(set_context_id) | |
619 | LEXT(set_context_id) | |
620 | mcr p15, 0, r0, c13, c0, 1 | |
621 | isb | |
622 | bx lr | |
623 | ||
d9a64523 | 624 | #define COPYIO_VALIDATE(NAME) \ |
5ba3f43e A |
625 | /* call NAME_validate to check the arguments */ ;\ |
626 | push {r0, r1, r2, r7, lr} ;\ | |
627 | add r7, sp, #12 ;\ | |
628 | blx EXT(NAME##_validate) ;\ | |
629 | cmp r0, #0 ;\ | |
630 | addne sp, #12 ;\ | |
631 | popne {r7, pc} ;\ | |
632 | pop {r0, r1, r2, r7, lr} ;\ | |
5ba3f43e A |
633 | |
634 | #define COPYIO_SET_RECOVER() \ | |
635 | /* set recovery address */ ;\ | |
636 | stmfd sp!, { r4, r5, r6 } ;\ | |
637 | adr r3, copyio_error ;\ | |
638 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
639 | ldr r4, [r12, TH_RECOVER] ;\ | |
640 | str r3, [r12, TH_RECOVER] | |
641 | ||
d9a64523 A |
642 | #define COPYIO_TRY_KERNEL() \ |
643 | /* if (current_thread()->map->pmap == kernel_pmap) copyio_kernel() */ ;\ | |
644 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW ;\ | |
645 | ldr r3, [r12, ACT_MAP] ;\ | |
646 | ldr r3, [r3, MAP_PMAP] ;\ | |
647 | LOAD_ADDR(ip, kernel_pmap_store) ;\ | |
648 | cmp r3, ip ;\ | |
649 | beq copyio_kern_body | |
650 | ||
5ba3f43e A |
651 | #if __ARM_USER_PROTECT__ |
652 | #define COPYIO_MAP_USER() \ | |
653 | /* disable interrupts to prevent expansion to 2GB at L1 ;\ | |
654 | * between loading ttep and storing it in ttbr0.*/ ;\ | |
655 | mrs r5, cpsr ;\ | |
656 | cpsid if ;\ | |
657 | ldr r3, [r12, ACT_UPTW_TTB] ;\ | |
658 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
659 | msr cpsr, r5 ;\ | |
660 | ldr r3, [r12, ACT_ASID] ;\ | |
661 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
662 | isb | |
663 | #else | |
664 | #define COPYIO_MAP_USER() | |
665 | #endif | |
666 | ||
d9a64523 | 667 | #define COPYIO_HEADER() ;\ |
5ba3f43e A |
668 | /* test for zero len */ ;\ |
669 | cmp r2, #0 ;\ | |
670 | moveq r0, #0 ;\ | |
671 | bxeq lr | |
672 | ||
673 | .macro COPYIO_BODY | |
674 | /* if len is less than 16 bytes, just do a simple copy */ | |
675 | cmp r2, #16 | |
676 | blt L$0_bytewise | |
677 | /* test for src and dest of the same word alignment */ | |
678 | orr r3, r0, r1 | |
679 | tst r3, #3 | |
680 | bne L$0_bytewise | |
681 | L$0_wordwise: | |
682 | sub r2, r2, #16 | |
683 | L$0_wordwise_loop: | |
684 | /* 16 bytes at a time */ | |
685 | ldmia r0!, { r3, r5, r6, r12 } | |
686 | stmia r1!, { r3, r5, r6, r12 } | |
687 | subs r2, r2, #16 | |
688 | bge L$0_wordwise_loop | |
689 | /* fixup the len and test for completion */ | |
690 | adds r2, r2, #16 | |
691 | beq L$0_noerror | |
692 | L$0_bytewise: | |
693 | /* copy 2 bytes at a time */ | |
694 | subs r2, r2, #2 | |
695 | ldrb r3, [r0], #1 | |
696 | ldrbpl r12, [r0], #1 | |
697 | strb r3, [r1], #1 | |
698 | strbpl r12, [r1], #1 | |
699 | bhi L$0_bytewise | |
700 | L$0_noerror: | |
701 | mov r0, #0 | |
702 | .endmacro | |
703 | ||
704 | #if __ARM_USER_PROTECT__ | |
705 | #define COPYIO_UNMAP_USER() \ | |
706 | mrc p15, 0, r12, c13, c0, 4 ;\ | |
707 | ldr r3, [r12, ACT_KPTW_TTB] ;\ | |
708 | mcr p15, 0, r3, c2, c0, 0 ;\ | |
709 | mov r3, #0 ;\ | |
710 | mcr p15, 0, r3, c13, c0, 1 ;\ | |
711 | isb | |
712 | #else | |
713 | #define COPYIO_UNMAP_USER() \ | |
714 | mrc p15, 0, r12, c13, c0, 4 | |
715 | #endif | |
716 | ||
717 | #define COPYIO_RESTORE_RECOVER() \ | |
718 | /* restore the recovery address */ ;\ | |
719 | str r4, [r12, TH_RECOVER] ;\ | |
720 | ldmfd sp!, { r4, r5, r6 } | |
721 | ||
722 | /* | |
723 | * int copyinstr( | |
724 | * const user_addr_t user_addr, | |
725 | * char *kernel_addr, | |
726 | * vm_size_t max, | |
727 | * vm_size_t *actual) | |
728 | */ | |
729 | .text | |
730 | .align 2 | |
731 | .globl EXT(copyinstr) | |
732 | LEXT(copyinstr) | |
d9a64523 A |
733 | cmp r2, #0 |
734 | moveq r0, #ENAMETOOLONG | |
735 | moveq r12, #0 | |
736 | streq r12, [r3] | |
737 | bxeq lr | |
738 | COPYIO_VALIDATE(copyin) | |
5ba3f43e A |
739 | stmfd sp!, { r4, r5, r6 } |
740 | ||
741 | mov r6, r3 | |
5ba3f43e A |
742 | adr r3, copyinstr_error // Get address for recover |
743 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
744 | ldr r4, [r12, TH_RECOVER] ;\ | |
745 | str r3, [r12, TH_RECOVER] | |
746 | COPYIO_MAP_USER() | |
747 | mov r12, #0 // Number of bytes copied so far | |
5ba3f43e A |
748 | copyinstr_loop: |
749 | ldrb r3, [r0], #1 // Load a byte from the source (user) | |
750 | strb r3, [r1], #1 // Store a byte to the destination (kernel) | |
751 | add r12, r12, #1 | |
752 | cmp r3, #0 | |
753 | beq copyinstr_done | |
754 | cmp r12, r2 // Room to copy more bytes? | |
755 | bne copyinstr_loop | |
756 | // | |
757 | // Ran out of space in the destination buffer, so return ENAMETOOLONG. | |
758 | // | |
759 | copyinstr_too_long: | |
760 | mov r3, #ENAMETOOLONG | |
761 | copyinstr_done: | |
762 | // | |
763 | // When we get here, we have finished copying the string. We came here from | |
d9a64523 | 764 | // either the "beq copyinstr_done" above, in which case r3 == 0 (which is also |
5ba3f43e | 765 | // the function result for success), or falling through from copyinstr_too_long, |
d9a64523 | 766 | // in which case r3 == ENAMETOOLONG. |
5ba3f43e A |
767 | // |
768 | str r12, [r6] // Save the count for actual | |
769 | mov r0, r3 // Return error code from r3 | |
770 | copyinstr_exit: | |
771 | COPYIO_UNMAP_USER() | |
772 | str r4, [r12, TH_RECOVER] | |
5ba3f43e A |
773 | ldmfd sp!, { r4, r5, r6 } |
774 | bx lr | |
775 | ||
776 | copyinstr_error: | |
777 | /* set error, exit routine */ | |
778 | mov r0, #EFAULT | |
779 | b copyinstr_exit | |
780 | ||
5ba3f43e A |
781 | /* |
782 | * int copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
783 | */ | |
784 | .text | |
785 | .align 2 | |
786 | .globl EXT(copyin) | |
787 | LEXT(copyin) | |
d9a64523 A |
788 | COPYIO_HEADER() |
789 | COPYIO_VALIDATE(copyin) | |
790 | COPYIO_TRY_KERNEL() | |
5ba3f43e A |
791 | COPYIO_SET_RECOVER() |
792 | COPYIO_MAP_USER() | |
793 | COPYIO_BODY copyin | |
794 | COPYIO_UNMAP_USER() | |
795 | COPYIO_RESTORE_RECOVER() | |
796 | bx lr | |
797 | ||
798 | /* | |
799 | * int copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
800 | */ | |
801 | .text | |
802 | .align 2 | |
803 | .globl EXT(copyout) | |
804 | LEXT(copyout) | |
d9a64523 A |
805 | COPYIO_HEADER() |
806 | COPYIO_VALIDATE(copyout) | |
807 | COPYIO_TRY_KERNEL() | |
5ba3f43e A |
808 | COPYIO_SET_RECOVER() |
809 | COPYIO_MAP_USER() | |
810 | COPYIO_BODY copyout | |
811 | COPYIO_UNMAP_USER() | |
812 | COPYIO_RESTORE_RECOVER() | |
813 | bx lr | |
814 | ||
815 | ||
816 | /* | |
817 | * int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes) | |
818 | */ | |
819 | .text | |
820 | .align 2 | |
821 | .globl EXT(copyin_word) | |
822 | LEXT(copyin_word) | |
823 | cmp r2, #4 // Test if size is 4 or 8 | |
824 | cmpne r2, #8 | |
825 | bne L_copyin_invalid | |
826 | sub r3, r2, #1 | |
827 | tst r0, r3 // Test alignment of user address | |
828 | bne L_copyin_invalid | |
829 | ||
d9a64523 | 830 | COPYIO_VALIDATE(copyin) |
5ba3f43e A |
831 | COPYIO_SET_RECOVER() |
832 | COPYIO_MAP_USER() | |
833 | ||
834 | mov r3, #0 // Clear high register | |
835 | cmp r2, #4 // If size is 4 | |
836 | ldreq r2, [r0] // Load word from user | |
837 | ldrdne r2, r3, [r0] // Else Load double word from user | |
838 | stm r1, {r2, r3} // Store to kernel_addr | |
839 | mov r0, #0 // Success | |
840 | ||
841 | COPYIO_UNMAP_USER() | |
842 | COPYIO_RESTORE_RECOVER() | |
843 | bx lr | |
844 | L_copyin_invalid: | |
845 | mov r0, #EINVAL | |
846 | bx lr | |
5ba3f43e A |
847 | |
848 | ||
849 | copyio_error: | |
850 | mov r0, #EFAULT | |
851 | COPYIO_UNMAP_USER() | |
852 | str r4, [r12, TH_RECOVER] | |
853 | ldmfd sp!, { r4, r5, r6 } | |
854 | bx lr | |
855 | ||
856 | /* | |
857 | * int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
858 | */ | |
859 | .text | |
860 | .align 2 | |
861 | .globl EXT(copyin_kern) | |
862 | LEXT(copyin_kern) | |
d9a64523 A |
863 | COPYIO_HEADER() |
864 | b copyio_kern_body | |
5ba3f43e A |
865 | |
866 | /* | |
867 | * int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
868 | */ | |
869 | .text | |
870 | .align 2 | |
871 | .globl EXT(copyout_kern) | |
872 | LEXT(copyout_kern) | |
d9a64523 A |
873 | COPYIO_HEADER() |
874 | b copyio_kern_body | |
5ba3f43e | 875 | |
d9a64523 | 876 | copyio_kern_body: |
5ba3f43e A |
877 | stmfd sp!, { r5, r6 } |
878 | COPYIO_BODY copyio_kernel | |
879 | ldmfd sp!, { r5, r6 } | |
880 | bx lr | |
881 | ||
882 | /* | |
883 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr) | |
884 | * | |
885 | * Safely copy eight bytes (the fixed top of an ARM frame) from | |
886 | * either user or kernel memory. | |
887 | */ | |
888 | .text | |
889 | .align 2 | |
890 | .globl EXT(copyinframe) | |
891 | LEXT(copyinframe) | |
892 | COPYIO_SET_RECOVER() | |
893 | COPYIO_MAP_USER() | |
894 | ldmia r0, {r2, r3} | |
895 | stmia r1, {r2, r3} | |
896 | b Lcopyin_noerror | |
897 | ||
898 | /* | |
899 | * uint32_t arm_debug_read_dscr(void) | |
900 | */ | |
901 | .text | |
902 | .align 2 | |
903 | .globl EXT(arm_debug_read_dscr) | |
904 | LEXT(arm_debug_read_dscr) | |
905 | #if __ARM_DEBUG__ >= 6 | |
906 | mrc p14, 0, r0, c0, c1 | |
907 | #else | |
908 | mov r0, #0 | |
909 | #endif | |
910 | bx lr | |
911 | ||
912 | /* | |
913 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
914 | * | |
915 | * Set debug registers to match the current thread state | |
916 | * (NULL to disable). Assume 6 breakpoints and 2 | |
917 | * watchpoints, since that has been the case in all cores | |
918 | * thus far. | |
919 | */ | |
920 | .text | |
921 | .align 2 | |
922 | .globl EXT(arm_debug_set_cp14) | |
923 | LEXT(arm_debug_set_cp14) | |
924 | #if __ARM_DEBUG__ >= 6 | |
925 | mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW | |
926 | ldr r2, [r1, ACT_CPUDATAP] // Get current cpu | |
927 | str r0, [r2, CPU_USER_DEBUG] // Set current user debug | |
928 | ||
929 | // Lock the debug registers | |
930 | movw ip, #0xCE55 | |
931 | movt ip, #0xC5AC | |
932 | mcr p14, 0, ip, c1, c0, 4 | |
933 | ||
934 | // enable monitor mode (needed to set and use debug registers) | |
935 | mrc p14, 0, ip, c0, c1, 0 | |
936 | orr ip, ip, #0x8000 // set MDBGen = 1 | |
937 | #if __ARM_DEBUG__ >= 7 | |
938 | mcr p14, 0, ip, c0, c2, 2 | |
939 | #else | |
940 | mcr p14, 0, ip, c0, c1, 0 | |
941 | #endif | |
942 | // first turn off all breakpoints/watchpoints | |
943 | mov r1, #0 | |
944 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
945 | mcr p14, 0, r1, c0, c1, 5 // BCR1 | |
946 | mcr p14, 0, r1, c0, c2, 5 // BCR2 | |
947 | mcr p14, 0, r1, c0, c3, 5 // BCR3 | |
948 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
949 | mcr p14, 0, r1, c0, c5, 5 // BCR5 | |
950 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
951 | mcr p14, 0, r1, c0, c1, 7 // WCR1 | |
952 | // if (debug_state == NULL) disable monitor mode and return; | |
953 | cmp r0, #0 | |
954 | biceq ip, ip, #0x8000 // set MDBGen = 0 | |
955 | #if __ARM_DEBUG__ >= 7 | |
956 | mcreq p14, 0, ip, c0, c2, 2 | |
957 | #else | |
958 | mcreq p14, 0, ip, c0, c1, 0 | |
959 | #endif | |
960 | bxeq lr | |
961 | ldmia r0!, {r1, r2, r3, ip} | |
962 | mcr p14, 0, r1, c0, c0, 4 // BVR0 | |
963 | mcr p14, 0, r2, c0, c1, 4 // BVR1 | |
964 | mcr p14, 0, r3, c0, c2, 4 // BVR2 | |
965 | mcr p14, 0, ip, c0, c3, 4 // BVR3 | |
966 | ldmia r0!, {r1, r2} | |
967 | mcr p14, 0, r1, c0, c4, 4 // BVR4 | |
968 | mcr p14, 0, r2, c0, c5, 4 // BVR5 | |
969 | add r0, r0, #40 // advance to bcr[0] | |
970 | ldmia r0!, {r1, r2, r3, ip} | |
971 | mcr p14, 0, r1, c0, c0, 5 // BCR0 | |
972 | mcr p14, 0, r2, c0, c1, 5 // BCR1 | |
973 | mcr p14, 0, r3, c0, c2, 5 // BCR2 | |
974 | mcr p14, 0, ip, c0, c3, 5 // BCR3 | |
975 | ldmia r0!, {r1, r2} | |
976 | mcr p14, 0, r1, c0, c4, 5 // BCR4 | |
977 | mcr p14, 0, r2, c0, c5, 5 // BCR5 | |
978 | add r0, r0, #40 // advance to wvr[0] | |
979 | ldmia r0!, {r1, r2} | |
980 | mcr p14, 0, r1, c0, c0, 6 // WVR0 | |
981 | mcr p14, 0, r2, c0, c1, 6 // WVR1 | |
982 | add r0, r0, #56 // advance to wcr[0] | |
983 | ldmia r0!, {r1, r2} | |
984 | mcr p14, 0, r1, c0, c0, 7 // WCR0 | |
985 | mcr p14, 0, r2, c0, c1, 7 // WCR1 | |
986 | ||
987 | // Unlock debug registers | |
988 | mov ip, #0 | |
989 | mcr p14, 0, ip, c1, c0, 4 | |
990 | #endif | |
991 | bx lr | |
992 | ||
993 | /* | |
994 | * void fiq_context_init(boolean_t enable_fiq) | |
995 | */ | |
996 | .text | |
997 | .align 2 | |
998 | .globl EXT(fiq_context_init) | |
999 | LEXT(fiq_context_init) | |
1000 | mrs r3, cpsr // Save current CPSR | |
1001 | cmp r0, #0 // Test enable_fiq | |
1002 | bicne r3, r3, #PSR_FIQF // Enable FIQ if not FALSE | |
1003 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1004 | ldr r2, [r12, ACT_CPUDATAP] // Get current cpu data | |
1005 | ||
1006 | #if __ARM_TIME__ | |
1007 | /* Despite the fact that we use the physical timebase | |
1008 | * register as the basis for time on our platforms, we | |
1009 | * end up using the virtual timer in order to manage | |
1010 | * deadlines. This is due to the fact that for our | |
1011 | * current platforms, the interrupt generated by the | |
1012 | * physical timer is not hooked up to anything, and is | |
1013 | * therefore dropped on the floor. Therefore, for | |
1014 | * timers to function they MUST be based on the virtual | |
1015 | * timer. | |
1016 | */ | |
1017 | ||
1018 | mov r0, #1 // Enable Timer | |
1019 | mcr p15, 0, r0, c14, c3, 1 // Write to CNTV_CTL | |
1020 | ||
1021 | /* Enable USER access to the physical timebase (PL0PCTEN). | |
1022 | * The rationale for providing access to the physical | |
1023 | * timebase being that the virtual timebase is broken for | |
1024 | * some platforms. Maintaining the offset ourselves isn't | |
1025 | * expensive, so mandate that the userspace implementation | |
1026 | * do timebase_phys+offset rather than trying to propogate | |
1027 | * all of the informaiton about what works up to USER. | |
1028 | */ | |
1029 | mcr p15, 0, r0, c14, c1, 0 // Set CNTKCTL.PL0PCTEN (CNTKCTL[0]) | |
1030 | ||
1031 | #else /* ! __ARM_TIME__ */ | |
1032 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled | |
1033 | mov r8, r2 // Load the BootCPUData address | |
1034 | ldr r9, [r2, CPU_GET_FIQ_HANDLER] // Load fiq function address | |
1035 | ldr r10, [r2, CPU_TBD_HARDWARE_ADDR] // Load the hardware address | |
1036 | ldr r11, [r2, CPU_TBD_HARDWARE_VAL] // Load the hardware value | |
1037 | #endif /* __ARM_TIME__ */ | |
1038 | ||
1039 | msr cpsr_c, r3 // Restore saved CPSR | |
1040 | bx lr | |
1041 | ||
1042 | /* | |
1043 | * void reenable_async_aborts(void) | |
1044 | */ | |
1045 | .text | |
1046 | .align 2 | |
1047 | .globl EXT(reenable_async_aborts) | |
1048 | LEXT(reenable_async_aborts) | |
1049 | cpsie a // Re-enable async aborts | |
1050 | bx lr | |
1051 | ||
1052 | /* | |
1053 | * uint64_t ml_get_timebase(void) | |
1054 | */ | |
1055 | .text | |
1056 | .align 2 | |
1057 | .globl EXT(ml_get_timebase) | |
1058 | LEXT(ml_get_timebase) | |
1059 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1060 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1061 | #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ | |
1062 | isb // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2. | |
1063 | 1: | |
1064 | mrrc p15, 0, r3, r1, c14 // Read the Time Base (CNTPCT), high => r1 | |
1065 | mrrc p15, 0, r0, r3, c14 // Read the Time Base (CNTPCT), low => r0 | |
1066 | mrrc p15, 0, r3, r2, c14 // Read the Time Base (CNTPCT), high => r2 | |
1067 | cmp r1, r2 | |
1068 | bne 1b // Loop until both high values are the same | |
1069 | ||
1070 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1071 | ldr r2, [r3, CPU_BASE_TIMEBASE_LOW] // Add in the offset to | |
1072 | adds r0, r0, r2 // convert to | |
1073 | ldr r2, [r3, CPU_BASE_TIMEBASE_HIGH] // mach_absolute_time | |
1074 | adc r1, r1, r2 // | |
1075 | #else /* ! __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ */ | |
1076 | 1: | |
1077 | ldr r2, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
1078 | ldr r0, [r3, CPU_TIMEBASE_LOW] // Get the saved TBL value | |
1079 | ldr r1, [r3, CPU_TIMEBASE_HIGH] // Get the saved TBU value | |
1080 | cmp r1, r2 // Make sure TB has not rolled over | |
1081 | bne 1b | |
1082 | #endif /* __ARM_TIME__ */ | |
1083 | bx lr // return | |
1084 | ||
1085 | ||
1086 | /* | |
1087 | * uint32_t ml_get_decrementer(void) | |
1088 | */ | |
1089 | .text | |
1090 | .align 2 | |
1091 | .globl EXT(ml_get_decrementer) | |
1092 | LEXT(ml_get_decrementer) | |
1093 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1094 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1095 | ldr r2, [r3, CPU_GET_DECREMENTER_FUNC] // Get get_decrementer_func | |
1096 | cmp r2, #0 | |
1097 | bxne r2 // Call it if there is one | |
1098 | #if __ARM_TIME__ | |
1099 | mrc p15, 0, r0, c14, c3, 0 // Read the Decrementer (CNTV_TVAL) | |
1100 | #else | |
1101 | ldr r0, [r3, CPU_DECREMENTER] // Get the saved dec value | |
1102 | #endif | |
1103 | bx lr // return | |
1104 | ||
1105 | ||
1106 | /* | |
1107 | * void ml_set_decrementer(uint32_t dec_value) | |
1108 | */ | |
1109 | .text | |
1110 | .align 2 | |
1111 | .globl EXT(ml_set_decrementer) | |
1112 | LEXT(ml_set_decrementer) | |
1113 | mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW | |
1114 | ldr r3, [r12, ACT_CPUDATAP] // Get current cpu data | |
1115 | ldr r2, [r3, CPU_SET_DECREMENTER_FUNC] // Get set_decrementer_func | |
1116 | cmp r2, #0 | |
1117 | bxne r2 // Call it if there is one | |
1118 | #if __ARM_TIME__ | |
1119 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1120 | mcr p15, 0, r0, c14, c3, 0 // Write the Decrementer (CNTV_TVAL) | |
1121 | #else | |
1122 | mrs r2, cpsr // Save current CPSR | |
1123 | msr cpsr_c, #(PSR_FIQ_MODE|PSR_FIQF|PSR_IRQF) // Change mode to FIQ with FIQ/IRQ disabled. | |
1124 | mov r12, r0 // Set the DEC value | |
1125 | str r12, [r8, CPU_DECREMENTER] // Store DEC | |
1126 | msr cpsr_c, r2 // Restore saved CPSR | |
1127 | #endif | |
1128 | bx lr | |
1129 | ||
1130 | ||
1131 | /* | |
1132 | * boolean_t ml_get_interrupts_enabled(void) | |
1133 | */ | |
1134 | .text | |
1135 | .align 2 | |
1136 | .globl EXT(ml_get_interrupts_enabled) | |
1137 | LEXT(ml_get_interrupts_enabled) | |
1138 | mrs r2, cpsr | |
1139 | mov r0, #1 | |
1140 | bic r0, r0, r2, lsr #PSR_IRQFb | |
1141 | bx lr | |
1142 | ||
1143 | /* | |
1144 | * Platform Specific Timebase & Decrementer Functions | |
1145 | * | |
1146 | */ | |
1147 | ||
1148 | #if defined(ARM_BOARD_CLASS_S7002) | |
1149 | .text | |
1150 | .align 2 | |
1151 | .globl EXT(fleh_fiq_s7002) | |
1152 | LEXT(fleh_fiq_s7002) | |
1153 | str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt | |
1154 | mvn r13, #0 | |
1155 | str r13, [r8, CPU_DECREMENTER] | |
1156 | b EXT(fleh_dec) | |
1157 | ||
1158 | .text | |
1159 | .align 2 | |
1160 | .globl EXT(s7002_get_decrementer) | |
1161 | LEXT(s7002_get_decrementer) | |
1162 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1163 | add ip, ip, #PMGR_INTERVAL_TMR_OFFSET | |
1164 | ldr r0, [ip] // Get the Decrementer | |
1165 | bx lr | |
1166 | ||
1167 | .text | |
1168 | .align 2 | |
1169 | .globl EXT(s7002_set_decrementer) | |
1170 | LEXT(s7002_set_decrementer) | |
1171 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1172 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1173 | str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer | |
1174 | bx lr | |
1175 | #endif /* defined(ARM_BOARD_CLASS_S7002) */ | |
1176 | ||
1177 | #if defined(ARM_BOARD_CLASS_T8002) | |
1178 | .text | |
1179 | .align 2 | |
1180 | .globl EXT(fleh_fiq_t8002) | |
1181 | LEXT(fleh_fiq_t8002) | |
1182 | mov r13, #kAICTmrIntStat | |
1183 | str r11, [r10, r13] // Clear the decrementer interrupt | |
1184 | mvn r13, #0 | |
1185 | str r13, [r8, CPU_DECREMENTER] | |
1186 | b EXT(fleh_dec) | |
1187 | ||
1188 | .text | |
1189 | .align 2 | |
1190 | .globl EXT(t8002_get_decrementer) | |
1191 | LEXT(t8002_get_decrementer) | |
1192 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1193 | mov r0, #kAICTmrCnt | |
1194 | add ip, ip, r0 | |
1195 | ldr r0, [ip] // Get the Decrementer | |
1196 | bx lr | |
1197 | ||
1198 | .text | |
1199 | .align 2 | |
1200 | .globl EXT(t8002_set_decrementer) | |
1201 | LEXT(t8002_set_decrementer) | |
1202 | str r0, [r3, CPU_DECREMENTER] // Save the new dec value | |
1203 | ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address | |
1204 | mov r5, #kAICTmrCnt | |
1205 | str r0, [ip, r5] // Store the new Decrementer | |
1206 | bx lr | |
1207 | #endif /* defined(ARM_BOARD_CLASS_T8002) */ | |
1208 | ||
1209 | LOAD_ADDR_GEN_DEF(kernel_pmap_store) | |
1210 | ||
1211 | #include "globals_asm.h" | |
1212 | ||
1213 | /* vim: set ts=4: */ |