]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/asm.h> | |
30 | #include <arm/proc_reg.h> | |
31 | #include <mach_kdp.h> | |
32 | #include "assym.s" | |
f427ee49 | 33 | #include "caches_macros.s" |
5ba3f43e A |
34 | |
35 | .text | |
36 | .align 12 | |
37 | ||
38 | .align 2 | |
39 | .globl EXT(resume_idle_cpu) | |
40 | LEXT(resume_idle_cpu) | |
41 | // r0 set to BootArgs phys address | |
42 | // r1 set to cpu data phys address | |
43 | LOAD_ADDR(lr, arm_init_idle_cpu) | |
44 | b L_start_cpu_0 | |
45 | ||
46 | .globl EXT(start_cpu) | |
47 | LEXT(start_cpu) | |
48 | // r0 set to BootArgs phys address | |
49 | // r1 set to cpu data phys address | |
50 | LOAD_ADDR(lr, arm_init_cpu) | |
51 | b L_start_cpu_0 | |
52 | ||
53 | L_start_cpu_0: | |
54 | cpsid if // Disable IRQ FIQ | |
55 | ||
56 | // Turn on L1 I-Cache, Branch prediction early | |
57 | mcr p15, 0, r11, c7, c5, 0 // invalidate the icache | |
58 | isb // before moving on | |
59 | mrc p15, 0, r11, c1, c0, 0 // read mmu control into r11 | |
60 | orr r11, r11, #(SCTLR_ICACHE | SCTLR_PREDIC) // enable i-cache, b-prediction | |
61 | mcr p15, 0, r11, c1, c0, 0 // set mmu control | |
62 | dsb // ensure mmu settings are inplace | |
63 | isb // before moving on | |
64 | ||
65 | // Get the kernel's phys & virt addr, and size from BootArgs | |
66 | ldr r8, [r0, BA_PHYS_BASE] // Get the phys base in r8 | |
67 | ldr r9, [r0, BA_VIRT_BASE] // Get the virt base in r9 | |
68 | ldr r10, [r0, BA_MEM_SIZE] // Get the mem size in r10 | |
69 | ||
70 | // Set the base of the translation table into the MMU | |
71 | ldr r4, [r0, BA_TOP_OF_KERNEL_DATA] // Get the top of kernel data | |
72 | orr r5, r4, #(TTBR_SETUP & 0x00FF) // Setup PTWs memory attribute | |
73 | orr r5, r5, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
74 | mcr p15, 0, r5, c2, c0, 0 // write kernel to translation table base 0 | |
75 | mcr p15, 0, r5, c2, c0, 1 // also to translation table base 1 | |
f427ee49 | 76 | mov r5, #TTBCR_N_SETUP // identify the split between 0 and 1 |
5ba3f43e A |
77 | mcr p15, 0, r5, c2, c0, 2 // and set up the translation control reg |
78 | ldr r2, [r1, CPU_NUMBER_GS] // Get cpu number | |
79 | mcr p15, 0, r2, c13, c0, 3 // Write TPIDRURO | |
80 | ldr sp, [r1, CPU_INTSTACK_TOP] // Get interrupt stack top | |
81 | sub sp, sp, SS_SIZE // Set stack pointer | |
82 | sub r0, r1, r8 // Convert to virtual address | |
83 | add r0, r0, r9 | |
84 | b join_start | |
85 | ||
86 | .align 2 | |
87 | .globl EXT(_start) | |
88 | LEXT(_start) | |
89 | // r0 has the boot-args pointer | |
90 | // r1 set to zero | |
91 | mov r1, #0 | |
92 | LOAD_ADDR(lr, arm_init) | |
93 | cpsid if // Disable IRQ FIQ | |
94 | ||
95 | // Turn on L1 I-Cache, Branch prediction early | |
96 | mcr p15, 0, r11, c7, c5, 0 // invalidate the icache | |
97 | isb // before moving on | |
98 | mrc p15, 0, r11, c1, c0, 0 // read mmu control into r11 | |
99 | orr r11, r11, #(SCTLR_ICACHE | SCTLR_PREDIC) // enable i-cache, b-prediction | |
100 | mcr p15, 0, r11, c1, c0, 0 // set mmu control | |
101 | dsb // ensure mmu settings are inplace | |
102 | isb // before moving on | |
103 | ||
104 | // Get the kernel's phys & virt addr, and size from boot_args. | |
105 | ldr r8, [r0, BA_PHYS_BASE] // Get the phys base in r8 | |
106 | ldr r9, [r0, BA_VIRT_BASE] // Get the virt base in r9 | |
107 | ldr r10, [r0, BA_MEM_SIZE] // Get the mem size in r10 | |
108 | ||
109 | #define LOAD_PHYS_ADDR(reg, label) \ | |
110 | LOAD_ADDR(reg, label); \ | |
111 | sub reg, reg, r9; \ | |
112 | add reg, reg, r8 | |
113 | ||
114 | // Take this opportunity to patch the targets for the exception vectors | |
115 | LOAD_ADDR(r4, fleh_reset) | |
116 | LOAD_PHYS_ADDR(r5, ExceptionVectorsTable) | |
117 | str r4, [r5] | |
118 | LOAD_ADDR(r4, fleh_undef) | |
119 | add r5, #4 | |
120 | str r4, [r5] | |
121 | LOAD_ADDR(r4, fleh_swi) | |
122 | add r5, #4 | |
123 | str r4, [r5] | |
124 | LOAD_ADDR(r4, fleh_prefabt) | |
125 | add r5, #4 | |
126 | str r4, [r5] | |
127 | LOAD_ADDR(r4, fleh_dataabt) | |
128 | add r5, #4 | |
129 | str r4, [r5] | |
130 | LOAD_ADDR(r4, fleh_addrexc) | |
131 | add r5, #4 | |
132 | str r4, [r5] | |
133 | LOAD_ADDR(r4, fleh_irq) | |
134 | add r5, #4 | |
135 | str r4, [r5] | |
136 | LOAD_ADDR(r4, fleh_decirq) | |
137 | add r5, #4 | |
138 | str r4, [r5] | |
139 | ||
140 | // arm_init_tramp is sensitive, so for the moment, take the opportunity to store the | |
141 | // virtual address locally, so that we don't run into issues retrieving it later. | |
142 | // This is a pretty miserable solution, but it should be enough for the moment | |
143 | LOAD_ADDR(r4, arm_init_tramp) | |
144 | adr r5, arm_init_tramp_addr | |
145 | str r4, [r5] | |
146 | ||
147 | #undef LOAD_PHYS_ADDR | |
148 | ||
149 | // Set the base of the translation table into the MMU | |
150 | ldr r4, [r0, BA_TOP_OF_KERNEL_DATA] // Get the top of kernel data | |
151 | orr r5, r4, #(TTBR_SETUP & 0x00FF) // Setup PTWs memory attribute | |
152 | orr r5, r5, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute | |
153 | mcr p15, 0, r5, c2, c0, 0 // write kernel to translation table base 0 | |
154 | mcr p15, 0, r5, c2, c0, 1 // also to translation table base 1 | |
f427ee49 | 155 | mov r5, #TTBCR_N_SETUP // identify the split between 0 and 1 |
5ba3f43e A |
156 | mcr p15, 0, r5, c2, c0, 2 // and set up the translation control reg |
157 | ||
158 | // Mark the entries invalid in the 4 page trampoline translation table | |
159 | // Mark the entries invalid in the 4 page CPU translation table | |
160 | // Mark the entries invalid in the one page table for the final 1MB (if used) | |
161 | // Mark the entries invalid in the one page table for HIGH_EXC_VECTORS | |
162 | mov r5, r4 // local copy of base | |
163 | mov r11, #ARM_TTE_TYPE_FAULT // invalid entry template | |
164 | mov r2, PGBYTES >> 2 // number of ttes/page | |
165 | add r2, r2, r2, LSL #2 // 8 ttes + 2 ptes to clear. Multiply by 5... | |
166 | mov r2, r2, LSL #1 // ...then multiply by 2 | |
167 | invalidate_tte: | |
168 | str r11, [r5] // store the invalid tte | |
169 | add r5, r5, #4 // increment tte pointer | |
170 | subs r2, r2, #1 // decrement count | |
171 | bne invalidate_tte | |
172 | ||
173 | // create default section tte template | |
174 | mov r6, #ARM_TTE_TYPE_BLOCK // use block mapping entries | |
175 | mov r7, #(ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xFF) | |
176 | orr r7, r7, #(ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xFF00) | |
177 | orr r7, r7, #(ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xF0000) | |
178 | orr r6, r6, r7 // with default cache attrs | |
179 | mov r7, #ARM_TTE_BLOCK_AP(AP_RWNA) // Set kernel rw, user no access | |
180 | orr r7, r7, #(ARM_TTE_BLOCK_AP(AP_RWNA) & 0xFF00) | |
181 | orr r7, r7, #(ARM_TTE_BLOCK_AP(AP_RWNA) & 0xF0000) | |
182 | orr r6, r6, r7 // Set RWNA protection | |
183 | ||
184 | orr r6, r6, #ARM_TTE_BLOCK_AF // Set access protection | |
185 | orr r6, r6, #ARM_TTE_BLOCK_SH // Set shareability | |
186 | ||
187 | // Set up the V=P mapping for the 1 MB section around the current pc | |
188 | lsr r7, pc, #ARM_TT_L1_SHIFT // Extract tte index for pc addr | |
189 | add r5, r4, r7, LSL #2 // convert tte index to tte pointer | |
190 | lsl r7, r7, #ARM_TT_L1_SHIFT // Truncate pc to 1MB aligned addr | |
191 | orr r11, r7, r6 // make tte entry value | |
192 | str r11, [r5] // store tte | |
193 | ||
194 | // Set up the virtual mapping for the kernel using 1Mb direct section TTE entries | |
195 | mov r7, r8 // Save original phys base | |
196 | add r5, r4, r9, LSR #ARM_TT_L1_SHIFT-2 // convert vaddr to tte pointer | |
197 | mov r3, #ARM_TT_L1_SIZE // set 1MB boundary | |
198 | ||
199 | mapveqp: | |
200 | cmp r3, r10 // Check if we're beyond the last 1MB section | |
201 | bgt mapveqpL2 // If so, a coarse entry is required | |
202 | ||
203 | orr r11, r7, r6 // make tte entry value | |
204 | str r11, [r5], #4 // store tte and move to next | |
205 | add r7, r7, #ARM_TT_L1_SIZE // move to next phys addr | |
206 | subs r10, r10, #ARM_TT_L1_SIZE // subtract tte size | |
207 | bne mapveqp | |
208 | b doneveqp // end is 1MB aligned, and we're done | |
209 | ||
210 | mapveqpL2: | |
211 | // The end is not 1MB aligned, so steal a page and set up L2 entries within | |
212 | ||
213 | // Coarse entry first | |
214 | add r6, r4, PGBYTES * 8 // add L2 offset | |
215 | mov r11, r6 | |
216 | ||
217 | orr r6, #ARM_TTE_TYPE_TABLE // coarse entry | |
218 | ||
219 | str r6, [r5] // store coarse tte entry | |
220 | ||
221 | // Fill in the L2 entries | |
222 | mov r5, r11 | |
223 | ||
224 | // create pte template | |
225 | mov r2, #ARM_PTE_TYPE // default pte type | |
226 | orr r2, r2, #(ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xff) // with default cache attrs | |
227 | orr r2, r2, #(ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xff00) | |
228 | orr r2, r2, #(ARM_PTE_AP(AP_RWNA) & 0xff) // with default cache attrs | |
229 | orr r2, r2, #(ARM_PTE_AP(AP_RWNA) & 0xff00) | |
230 | orr r2, r2, #ARM_PTE_AF // Set access | |
231 | orr r2, r2, #ARM_PTE_SH // Set shareability | |
232 | ||
233 | storepte: | |
234 | orr r11, r7, r2 // make pte entry value | |
235 | str r11, [r5], #4 // store pte and move to next | |
236 | add r7, r7, PGBYTES // move to next phys addr | |
237 | subs r10, r10, PGBYTES // subtract pte size | |
238 | bne storepte | |
239 | ||
240 | doneveqp: | |
241 | // Insert page table page for high address exception vectors into translation table | |
242 | mov r5, #0xff000000 // part of virt HIGH_EXC_VECTORS (HACK!) | |
243 | orr r5, r5, #0x00ff0000 // rest of virt HIGH_EXC_VECTORS (HACK!) | |
244 | mov r5, r5, LSR #ARM_TT_L1_SHIFT // convert virt addr to index | |
245 | add r5, r4, r5, LSL #2 // convert to tte pointer | |
246 | ||
247 | add r6, r4, PGBYTES * 9 // get page table base (past 4 + 4 + 1 tte/pte pages) | |
cb323159 | 248 | add r6, r6, #0xc00 // adjust to last 1MB section |
5ba3f43e | 249 | mov r7, #(ARM_TTE_TABLE_MASK & 0xFFFF) // ARM_TTE_TABLE_MASK low halfword |
cb323159 | 250 | movt r7, #(ARM_TTE_TABLE_MASK >> 16) // ARM_TTE_TABLE_MASK top halfword |
5ba3f43e A |
251 | and r11, r6, r7 // apply mask |
252 | orr r11, r11, #ARM_TTE_TYPE_TABLE // mark it as a coarse page table | |
253 | str r11, [r5] // store tte entry for page table | |
254 | ||
255 | // create pte template | |
256 | mov r2, #ARM_PTE_TYPE // pte type | |
257 | orr r2, r2, #(ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0x00ff) // default cache attrs | |
258 | orr r2, r2, #(ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) & 0xff00) | |
259 | orr r2, r2, #(ARM_PTE_AP(AP_RWNA) & 0x00ff) // set RWNA protection | |
260 | orr r2, r2, #(ARM_PTE_AP(AP_RWNA) & 0xff00) | |
261 | orr r2, r2, #ARM_PTE_AF // Set access | |
262 | orr r2, r2, #ARM_PTE_SH // Set shareability | |
263 | ||
264 | // Now initialize the page table entry for the exception vectors | |
265 | mov r5, #0xff000000 // part of HIGH_EXC_VECTORS | |
266 | orr r5, r5, #0x00ff0000 // rest of HIGH_EXC_VECTORS | |
267 | mov r7, #(ARM_TT_L2_INDEX_MASK & 0xFFFF) // ARM_TT_L2_INDEX_MASK low halfword | |
268 | movt r7, #(ARM_TT_L2_INDEX_MASK >> 16) // ARM_TT_L2_INDEX_MASK top halfword | |
269 | and r5, r5, r7 // mask for getting index | |
270 | mov r5, r5, LSR #ARM_TT_L2_SHIFT // get page table index | |
271 | add r5, r6, r5, LSL #2 // convert to pte pointer | |
272 | ||
273 | LOAD_ADDR(r11, ExceptionVectorsBase) // get address of vectors addr | |
274 | sub r11, r11, r9 // convert to physical address | |
275 | add r11, r11, r8 | |
276 | ||
277 | mov r7, #(ARM_PTE_PAGE_MASK & 0xFFFF) // ARM_PTE_PAGE_MASK low halfword | |
278 | movt r7, #(ARM_PTE_PAGE_MASK >> 16) // ARM_PTE_PAGE_MASK top halfword | |
279 | and r11, r11, r7 // insert masked address into pte | |
280 | orr r11, r11, r2 // add template bits | |
281 | str r11, [r5] // store pte by base and index | |
282 | ||
283 | // clean the dcache | |
284 | mov r11, #0 | |
f427ee49 A |
285 | GET_CACHE_CONFIG r11, r2, r3, r4 |
286 | mov r11, #0 | |
5ba3f43e A |
287 | cleanflushway: |
288 | cleanflushline: | |
289 | mcr p15, 0, r11, c7, c14, 2 // cleanflush dcache line by way/set | |
f427ee49 A |
290 | add r11, r11, r2 // increment set index |
291 | tst r11, r3 // look for overflow | |
5ba3f43e | 292 | beq cleanflushline |
f427ee49 A |
293 | bic r11, r11, r3 // clear set overflow |
294 | adds r11, r11, r4 // increment way | |
5ba3f43e | 295 | bcc cleanflushway // loop |
f427ee49 A |
296 | HAS_L2_CACHE r11 |
297 | cmp r11, #0 | |
298 | beq invall2skipl2dcache | |
5ba3f43e | 299 | // Invalidate L2 cache |
f427ee49 A |
300 | mov r11, #1 |
301 | GET_CACHE_CONFIG r11, r2, r3, r4 | |
5ba3f43e A |
302 | mov r11, #2 |
303 | invall2flushway: | |
304 | invall2flushline: | |
305 | mcr p15, 0, r11, c7, c14, 2 // Invalidate dcache line by way/set | |
f427ee49 A |
306 | add r11, r11, r2 // increment set index |
307 | tst r11, r3 // look for overflow | |
5ba3f43e | 308 | beq invall2flushline |
f427ee49 A |
309 | bic r11, r11, r3 // clear set overflow |
310 | adds r11, r11, r4 // increment way | |
5ba3f43e | 311 | bcc invall2flushway // loop |
f427ee49 | 312 | invall2skipl2dcache: |
5ba3f43e A |
313 | mov r11, #0 |
314 | mcr p15, 0, r11, c13, c0, 3 // Write TPIDRURO | |
315 | LOAD_ADDR(sp, intstack_top) // Get interrupt stack top | |
316 | sub sp, sp, SS_SIZE // Set stack pointer | |
317 | sub r0, r0, r8 // Convert to virtual address | |
318 | add r0, r0, r9 | |
319 | ||
320 | join_start: | |
321 | // kernel page table is setup | |
322 | // lr set to return handler function virtual address | |
323 | // r0 set to return handler argument virtual address | |
324 | // sp set to interrupt context stack virtual address | |
325 | ||
326 | // Cpu specific configuration | |
327 | ||
328 | #ifdef ARMA7 | |
329 | #if __ARMA7_SMP__ | |
330 | mrc p15, 0, r11, c1, c0, 1 | |
331 | orr r11, r11, #(1<<6) // SMP | |
332 | mcr p15, 0, r11, c1, c0, 1 | |
333 | isb | |
334 | #endif | |
335 | #endif | |
336 | ||
337 | mrs r11, cpsr // Get cpsr | |
338 | bic r11, #0x100 // Allow async aborts | |
339 | msr cpsr_x, r11 // Update cpsr | |
340 | ||
341 | mov r11, #0 | |
342 | mcr p15, 0, r11, c8, c7, 0 // invalidate all TLB entries | |
343 | mcr p15, 0, r11, c7, c5, 0 // invalidate the icache | |
344 | ||
345 | // set DACR | |
346 | mov r11, #(ARM_DAC_SETUP & 0xFFFF) // ARM_DAC_SETUP low halfword | |
347 | movt r11, #(ARM_DAC_SETUP >> 16) // ARM_DAC_SETUP top halfword | |
348 | mcr p15, 0, r11, c3, c0, 0 // write to dac register | |
349 | ||
350 | // Set PRRR | |
351 | mov r11, #(PRRR_SETUP & 0xFFFF) // PRRR_SETUP low halfword | |
352 | movt r11, #(PRRR_SETUP >> 16) // PRRR_SETUP top halfword | |
353 | mcr p15, 0, r11, c10,c2,0 // write to PRRR register | |
354 | ||
355 | // Set NMRR | |
356 | mov r11, #(NMRR_SETUP & 0xFFFF) // NMRR_SETUP low halfword | |
357 | movt r11, #(NMRR_SETUP >> 16) // NMRR_SETUP top halfword | |
358 | mcr p15, 0, r11, c10,c2,1 // write to NMRR register | |
359 | ||
360 | // set SCTLR | |
361 | mrc p15, 0, r11, c1, c0, 0 // read system control | |
362 | ||
363 | bic r11, r11, #SCTLR_ALIGN // force off alignment exceptions | |
364 | mov r7, #(SCTLR_AFE|SCTLR_TRE) // Access flag, TEX remap | |
365 | orr r7, r7, #(SCTLR_HIGHVEC | SCTLR_ICACHE | SCTLR_PREDIC) | |
366 | orr r7, r7, #(SCTLR_DCACHE | SCTLR_ENABLE) | |
367 | #if (__ARM_ENABLE_SWAP__ == 1) | |
368 | orr r7, r7, #SCTLR_SW // SWP/SWPB Enable | |
369 | #endif | |
370 | orr r11, r11, r7 // or in the default settings | |
371 | mcr p15, 0, r11, c1, c0, 0 // set mmu control | |
372 | ||
373 | dsb // ensure mmu settings are inplace | |
374 | isb // before moving on | |
375 | ||
376 | #if __ARM_VFP__ | |
377 | // Initialize the VFP coprocessors. | |
378 | mrc p15, 0, r2, c1, c0, 2 // read coprocessor control register | |
379 | mov r3, #15 // 0xF | |
380 | orr r2, r2, r3, LSL #20 // enable 10 and 11 | |
381 | mcr p15, 0, r2, c1, c0, 2 // write coprocessor control register | |
382 | isb | |
383 | #endif /* __ARM_VFP__ */ | |
384 | ||
385 | // Running virtual. Prepare to call init code | |
386 | cmp r1, #0 // Test if invoked from start | |
387 | beq join_start_1 // Branch if yes | |
388 | ldr r7, arm_init_tramp_addr // Load trampoline address | |
389 | bx r7 // Branch to virtual trampoline address | |
390 | ||
391 | // Loading the virtual address for arm_init_tramp is a rather ugly | |
392 | // problem. There is probably a better solution, but for the moment, | |
393 | // patch the address in locally so that loading it is trivial | |
394 | arm_init_tramp_addr: | |
395 | .long 0 | |
396 | .globl EXT(arm_init_tramp) | |
397 | LEXT(arm_init_tramp) | |
398 | mrc p15, 0, r5, c2, c0, 0 // Read to translation table base 0 | |
399 | add r5, r5, PGBYTES * 4 // get kernel page table base (past 4 boot tte pages) | |
400 | mcr p15, 0, r5, c2, c0, 0 // write kernel to translation table base 0 | |
401 | mcr p15, 0, r5, c2, c0, 1 // also to translation table base 1 | |
402 | isb | |
403 | mov r5, #0 | |
404 | mcr p15, 0, r5, c8, c7, 0 // Flush all TLB entries | |
405 | dsb // ensure mmu settings are inplace | |
406 | isb // before moving on | |
407 | ||
408 | join_start_1: | |
409 | #if __ARM_VFP__ | |
410 | // Enable VFP for the bootstrap thread context. | |
411 | // VFP is enabled for the arm_init path as we may | |
412 | // execute VFP code before we can handle an undef. | |
413 | fmrx r2, fpexc // get fpexc | |
414 | orr r2, #FPEXC_EN // set the enable bit | |
415 | fmxr fpexc, r2 // set fpexc | |
416 | mov r2, #FPSCR_DEFAULT // set default fpscr | |
417 | fmxr fpscr, r2 // set fpscr | |
418 | #endif /* __ARM_VFP__ */ | |
419 | ||
420 | mov r7, #0 // Set stack frame 0 | |
421 | bx lr | |
422 | ||
423 | LOAD_ADDR_GEN_DEF(arm_init) | |
424 | LOAD_ADDR_GEN_DEF(arm_init_cpu) | |
425 | LOAD_ADDR_GEN_DEF(arm_init_idle_cpu) | |
426 | LOAD_ADDR_GEN_DEF(arm_init_tramp) | |
427 | LOAD_ADDR_GEN_DEF(fleh_reset) | |
428 | LOAD_ADDR_GEN_DEF(ExceptionVectorsTable) | |
429 | LOAD_ADDR_GEN_DEF(fleh_undef) | |
430 | LOAD_ADDR_GEN_DEF(fleh_swi) | |
431 | LOAD_ADDR_GEN_DEF(fleh_prefabt) | |
432 | LOAD_ADDR_GEN_DEF(fleh_dataabt) | |
433 | LOAD_ADDR_GEN_DEF(fleh_addrexc) | |
434 | LOAD_ADDR_GEN_DEF(fleh_irq) | |
435 | LOAD_ADDR_GEN_DEF(fleh_decirq) | |
436 | ||
437 | #include "globals_asm.h" | |
438 | ||
439 | /* vim: set ts=4: */ |