]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMAssembler.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / ARMAssembler.cpp
1 /*
2 * Copyright (C) 2009 University of Szeged
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include "config.h"
28
29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
30
31 #include "ARMAssembler.h"
32
33 namespace JSC {
34
35 // Patching helpers
36
37 void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
38 {
39 ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
40 ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
41 ARMWord index = (*ldr & 0xfff) >> 1;
42
43 ASSERT(diff >= 1);
44 if (diff >= 2 || index > 0) {
45 diff = (diff + index - 2) * sizeof(ARMWord);
46 ASSERT(diff <= 0xfff);
47 *ldr = (*ldr & ~0xfff) | diff;
48 } else
49 *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
50 }
51
52 // Handle immediates
53
54 ARMWord ARMAssembler::getOp2(ARMWord imm)
55 {
56 int rol;
57
58 if (imm <= 0xff)
59 return Op2Immediate | imm;
60
61 if ((imm & 0xff000000) == 0) {
62 imm <<= 8;
63 rol = 8;
64 }
65 else {
66 imm = (imm << 24) | (imm >> 8);
67 rol = 0;
68 }
69
70 if ((imm & 0xff000000) == 0) {
71 imm <<= 8;
72 rol += 4;
73 }
74
75 if ((imm & 0xf0000000) == 0) {
76 imm <<= 4;
77 rol += 2;
78 }
79
80 if ((imm & 0xc0000000) == 0) {
81 imm <<= 2;
82 rol += 1;
83 }
84
85 if ((imm & 0x00ffffff) == 0)
86 return Op2Immediate | (imm >> 24) | (rol << 8);
87
88 return InvalidImmediate;
89 }
90
91 int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
92 {
93 // Step1: Search a non-immediate part
94 ARMWord mask;
95 ARMWord imm1;
96 ARMWord imm2;
97 int rol;
98
99 mask = 0xff000000;
100 rol = 8;
101 while(1) {
102 if ((imm & mask) == 0) {
103 imm = (imm << rol) | (imm >> (32 - rol));
104 rol = 4 + (rol >> 1);
105 break;
106 }
107 rol += 2;
108 mask >>= 2;
109 if (mask & 0x3) {
110 // rol 8
111 imm = (imm << 8) | (imm >> 24);
112 mask = 0xff00;
113 rol = 24;
114 while (1) {
115 if ((imm & mask) == 0) {
116 imm = (imm << rol) | (imm >> (32 - rol));
117 rol = (rol >> 1) - 8;
118 break;
119 }
120 rol += 2;
121 mask >>= 2;
122 if (mask & 0x3)
123 return 0;
124 }
125 break;
126 }
127 }
128
129 ASSERT((imm & 0xff) == 0);
130
131 if ((imm & 0xff000000) == 0) {
132 imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
133 imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
134 } else if (imm & 0xc0000000) {
135 imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
136 imm <<= 8;
137 rol += 4;
138
139 if ((imm & 0xff000000) == 0) {
140 imm <<= 8;
141 rol += 4;
142 }
143
144 if ((imm & 0xf0000000) == 0) {
145 imm <<= 4;
146 rol += 2;
147 }
148
149 if ((imm & 0xc0000000) == 0) {
150 imm <<= 2;
151 rol += 1;
152 }
153
154 if ((imm & 0x00ffffff) == 0)
155 imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
156 else
157 return 0;
158 } else {
159 if ((imm & 0xf0000000) == 0) {
160 imm <<= 4;
161 rol += 2;
162 }
163
164 if ((imm & 0xc0000000) == 0) {
165 imm <<= 2;
166 rol += 1;
167 }
168
169 imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
170 imm <<= 8;
171 rol += 4;
172
173 if ((imm & 0xf0000000) == 0) {
174 imm <<= 4;
175 rol += 2;
176 }
177
178 if ((imm & 0xc0000000) == 0) {
179 imm <<= 2;
180 rol += 1;
181 }
182
183 if ((imm & 0x00ffffff) == 0)
184 imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
185 else
186 return 0;
187 }
188
189 if (positive) {
190 mov(reg, imm1);
191 orr(reg, reg, imm2);
192 } else {
193 mvn(reg, imm1);
194 bic(reg, reg, imm2);
195 }
196
197 return 1;
198 }
199
200 ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
201 {
202 ARMWord tmp;
203
204 // Do it by 1 instruction
205 tmp = getOp2(imm);
206 if (tmp != InvalidImmediate)
207 return tmp;
208
209 tmp = getOp2(~imm);
210 if (tmp != InvalidImmediate) {
211 if (invert)
212 return tmp | Op2InvertedImmediate;
213 mvn(tmpReg, tmp);
214 return tmpReg;
215 }
216
217 return encodeComplexImm(imm, tmpReg);
218 }
219
220 void ARMAssembler::moveImm(ARMWord imm, int dest)
221 {
222 ARMWord tmp;
223
224 // Do it by 1 instruction
225 tmp = getOp2(imm);
226 if (tmp != InvalidImmediate) {
227 mov(dest, tmp);
228 return;
229 }
230
231 tmp = getOp2(~imm);
232 if (tmp != InvalidImmediate) {
233 mvn(dest, tmp);
234 return;
235 }
236
237 encodeComplexImm(imm, dest);
238 }
239
240 ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
241 {
242 #if WTF_ARM_ARCH_AT_LEAST(7)
243 ARMWord tmp = getImm16Op2(imm);
244 if (tmp != InvalidImmediate) {
245 movw(dest, tmp);
246 return dest;
247 }
248 movw(dest, getImm16Op2(imm & 0xffff));
249 movt(dest, getImm16Op2(imm >> 16));
250 return dest;
251 #else
252 // Do it by 2 instruction
253 if (genInt(dest, imm, true))
254 return dest;
255 if (genInt(dest, ~imm, false))
256 return dest;
257
258 ldrImmediate(dest, imm);
259 return dest;
260 #endif
261 }
262
263 // Memory load/store helpers
264
265 void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
266 {
267 if (offset >= 0) {
268 if (offset <= 0xfff)
269 dtrUp(transferType, srcDst, base, offset);
270 else if (offset <= 0xfffff) {
271 add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
272 dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
273 } else {
274 moveImm(offset, ARMRegisters::S0);
275 dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
276 }
277 } else {
278 if (offset >= -0xfff)
279 dtrDown(transferType, srcDst, base, -offset);
280 else if (offset >= -0xfffff) {
281 sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
282 dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
283 } else {
284 moveImm(offset, ARMRegisters::S0);
285 dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
286 }
287 }
288 }
289
290 void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
291 {
292 ASSERT(scale >= 0 && scale <= 3);
293 ARMWord op2 = lsl(index, scale);
294
295 if (!offset) {
296 dtrUpRegister(transferType, srcDst, base, op2);
297 return;
298 }
299
300 if (offset <= 0xfffff && offset >= -0xfffff) {
301 add(ARMRegisters::S0, base, op2);
302 dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
303 return;
304 }
305
306 moveImm(offset, ARMRegisters::S0);
307 add(ARMRegisters::S0, ARMRegisters::S0, op2);
308 dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
309 }
310
311 void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
312 {
313 if (offset >= 0) {
314 if (offset <= 0xff)
315 halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
316 else if (offset <= 0xffff) {
317 add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
318 halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
319 } else {
320 moveImm(offset, ARMRegisters::S0);
321 halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
322 }
323 } else {
324 if (offset >= -0xff)
325 halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
326 else if (offset >= -0xffff) {
327 sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
328 halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
329 } else {
330 moveImm(offset, ARMRegisters::S0);
331 halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
332 }
333 }
334 }
335
336 void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
337 {
338 if (!scale && !offset) {
339 halfDtrUpRegister(transferType, srcDst, base, index);
340 return;
341 }
342
343 ARMWord op2 = lsl(index, scale);
344
345 if (offset <= 0xffff && offset >= -0xffff) {
346 add(ARMRegisters::S0, base, op2);
347 dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
348 return;
349 }
350
351 moveImm(offset, ARMRegisters::S0);
352 add(ARMRegisters::S0, ARMRegisters::S0, op2);
353 halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
354 }
355
356 void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
357 {
358 // VFP cannot directly access memory that is not four-byte-aligned
359 if (!(offset & 0x3)) {
360 if (offset <= 0x3ff && offset >= 0) {
361 doubleDtrUp(transferType, srcDst, base, offset >> 2);
362 return;
363 }
364 if (offset <= 0x3ffff && offset >= 0) {
365 add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
366 doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
367 return;
368 }
369 offset = -offset;
370
371 if (offset <= 0x3ff && offset >= 0) {
372 doubleDtrDown(transferType, srcDst, base, offset >> 2);
373 return;
374 }
375 if (offset <= 0x3ffff && offset >= 0) {
376 sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
377 doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
378 return;
379 }
380 offset = -offset;
381 }
382
383 moveImm(offset, ARMRegisters::S0);
384 add(ARMRegisters::S0, ARMRegisters::S0, base);
385 doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
386 }
387
388 void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
389 {
390 add(ARMRegisters::S1, base, lsl(index, scale));
391 dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
392 }
393
394 PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort)
395 {
396 // 64-bit alignment is required for next constant pool and JIT code as well
397 m_buffer.flushWithoutBarrier(true);
398 if (!m_buffer.isAligned(8))
399 bkpt(0);
400
401 RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(vm, ownerUID, effort);
402 char* data = reinterpret_cast<char*>(result->start());
403
404 for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
405 // The last bit is set if the constant must be placed on constant pool.
406 int pos = (iter->m_offset) & (~0x1);
407 ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
408 ARMWord* addr = getLdrImmAddress(ldrAddr);
409 if (*addr != InvalidBranchTarget) {
410 if (!(iter->m_offset & 1)) {
411 intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
412
413 if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
414 *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
415 continue;
416 }
417 }
418 *addr = reinterpret_cast<ARMWord>(data + *addr);
419 }
420 }
421
422 return result;
423 }
424
425 #if OS(LINUX) && COMPILER(RVCT)
426
427 __asm void ARMAssembler::cacheFlush(void* code, size_t size)
428 {
429 ARM
430 push {r7}
431 add r1, r1, r0
432 mov r7, #0xf0000
433 add r7, r7, #0x2
434 mov r2, #0x0
435 svc #0x0
436 pop {r7}
437 bx lr
438 }
439
440 #endif
441
442 } // namespace JSC
443
444 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)