]> git.saurik.com Git - apple/libc.git/blob - i386/string/bcopy_sse2.s
cba82f0027aaca5df8e8075af010ec2c5979f90f
[apple/libc.git] / i386 / string / bcopy_sse2.s
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/cpu_capabilities.h>
30 #include <platfunc.h>
31
32 /*
33 * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE2
34 * and 64-byte cache lines, such as Core and Core 2.
35 *
36 * The following #defines are tightly coupled to the u-architecture:
37 */
38
39 #define kShort 80 // too short to bother with SSE (must be >=80)
40 #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192)
41 #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
42 #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl"
43
44
45 // void bcopy(const void *src, void *dst, size_t len);
46
47 PLATFUNC_FUNCTION_START(bcopy, sse2, 32, 5)
48 pushl %ebp // set up a frame for backtraces
49 movl %esp,%ebp
50 pushl %esi
51 pushl %edi
52 movl 8(%ebp),%esi // get source ptr
53 movl 12(%ebp),%edi // get dest ptr
54 jmp Ljoin
55
56 //
57 // void *memcpy(void *dst, const void *src, size_t len);
58 // void *memmove(void *dst, const void *src, size_t len);
59 //
60
61 PLATFUNC_FUNCTION_START(memcpy, sse2, 32, 0) // void *memcpy(void *dst, const void *src, size_t len)
62 PLATFUNC_FUNCTION_START(memmove, sse2, 32, 0) // void *memmove(void *dst, const void *src, size_t len)
63 Lmemcpy_sse2:
64 pushl %ebp // set up a frame for backtraces
65 movl %esp,%ebp
66 pushl %esi
67 pushl %edi
68 movl 8(%ebp),%edi // get dest ptr
69 movl 12(%ebp),%esi // get source ptr
70
71 Ljoin: // here from bcopy() with esi and edi loaded
72 movl 16(%ebp),%ecx // get length
73 movl %edi,%edx
74 subl %esi,%edx // (dest - source)
75 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
76 jb LReverseIsland
77 Lrejoin: // here from very-long-operand copies
78 cmpl $(kShort),%ecx // long enough to bother with SSE?
79 ja LNotShort // yes
80
81 // Handle short forward copies. As the most common case, this is the fall-through path.
82 // ecx = length (<= kShort)
83 // esi = source ptr
84 // edi = dest ptr
85
86 Lshort:
87 movl %ecx,%edx // copy length
88 shrl $2,%ecx // get #doublewords
89 jz LLeftovers
90 2: // loop copying doublewords
91 movl (%esi),%eax
92 addl $4,%esi
93 movl %eax,(%edi)
94 addl $4,%edi
95 dec %ecx
96 jnz 2b
97 LLeftovers: // handle leftover bytes (0..3) in last word
98 andl $3,%edx // any leftover bytes?
99 jz 5f
100 4: // loop copying bytes
101 movb (%esi),%al
102 inc %esi
103 movb %al,(%edi)
104 inc %edi
105 dec %edx
106 jnz 4b
107 5:
108 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
109 popl %edi
110 popl %esi
111 popl %ebp
112 ret
113
114
115 LReverseIsland: // keep the "jb" above a short branch...
116 jmp LReverse // ...because reverse moves are uncommon
117
118
119 // Handle forward moves that are long enough to justify use of SSE3.
120 // First, 16-byte align the destination.
121 // ecx = length (> kShort)
122 // esi = source ptr
123 // edi = dest ptr
124
125 LNotShort:
126 cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops?
127 movl %edi,%edx // copy destination
128 jae LVeryLong // use very-long-operand path
129 negl %edx
130 andl $15,%edx // get #bytes to align destination
131 jz LDestAligned // already aligned
132 subl %edx,%ecx // decrement length
133 1: // loop copying 1..15 bytes
134 movb (%esi),%al
135 inc %esi
136 movb %al,(%edi)
137 inc %edi
138 dec %edx
139 jnz 1b
140
141 // Destination is now aligned. Prepare for forward loops over 64-byte chunks.
142 // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk.
143
144 LDestAligned:
145 movl %ecx,%edx // copy length
146 movl %ecx,%eax // twice
147 andl $63,%ecx // get remaining bytes for Lshort
148 andl $-64,%edx // get number of bytes we will copy in inner loop
149 addl %edx,%esi // point to 1st byte not copied
150 addl %edx,%edi
151 negl %edx // now generate offset to 1st byte to be copied
152 testl $15,%esi // is source aligned too?
153 jnz LUnalignedLoop // no
154
155
156 cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode?
157 jb LAlignedLoop // no, use SSE
158 cld // we'll move forward
159 movl %eax,%ecx // copy length again
160 shrl $2,%ecx // compute #words to move
161 addl %edx,%esi // restore ptrs to 1st byte of source and dest
162 addl %edx,%edi
163 rep // the u-code will optimize this
164 movsl
165 movl %eax,%edx // original length
166 jmp LLeftovers // handle 0..3 leftover bytes
167
168
169 // Forward aligned loop for medium length operands (kShort < n < kVeryLong).
170
171 .align 4,0x90 // 16-byte align inner loops
172 LAlignedLoop: // loop over 64-byte chunks
173 movdqa (%esi,%edx),%xmm0
174 movdqa 16(%esi,%edx),%xmm1
175 movdqa 32(%esi,%edx),%xmm2
176 movdqa 48(%esi,%edx),%xmm3
177
178 movdqa %xmm0,(%edi,%edx)
179 movdqa %xmm1,16(%edi,%edx)
180 movdqa %xmm2,32(%edi,%edx)
181 movdqa %xmm3,48(%edi,%edx)
182
183 addl $64,%edx
184 jnz LAlignedLoop
185
186 jmp Lshort // copy remaining 0..15 bytes and done
187
188
189 // Forward unaligned loop for medium length operands (kShort < n < kVeryLong).
190 // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross
191 // source cache lines.
192
193 .align 4,0x90 // 16-byte align inner loops
194 LUnalignedLoop: // loop over 64-byte chunks
195 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
196 movdqu 16(%esi,%edx),%xmm1
197 movdqu 32(%esi,%edx),%xmm2
198 movdqu 48(%esi,%edx),%xmm3
199
200 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
201 movdqa %xmm1,16(%edi,%edx)
202 movdqa %xmm2,32(%edi,%edx)
203 movdqa %xmm3,48(%edi,%edx)
204
205 addl $64,%edx
206 jnz LUnalignedLoop
207
208 jmp Lshort // copy remaining 0..63 bytes and done
209
210
211 // Very long forward moves. These are at least several pages, so we loop over big
212 // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
213 // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
214 // so the copy loop reads from L2 and writes directly to memory (with write combining.)
215 // This minimizes bus turnaround and maintains good DRAM page locality.
216 // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
217 // size. Otherwise, it is counter-productive to bypass L2 on the stores.
218 // ecx = length (>= kVeryLong bytes)
219 // edi = dest (aligned)
220 // esi = source
221
222 LVeryLong:
223 pushl %ebx // we'll need to use this
224 movl %edi,%ebx // copy dest ptr
225 negl %ebx
226 andl $63,%ebx // get #bytes to cache line align destination
227 jz LBigChunkLoop // already aligned
228
229 // Cache line align destination, so temporal stores in copy loops work right.
230
231 pushl %ecx // save total length remaining
232 pushl %ebx // arg3 - #bytes to align destination (1..63)
233 pushl %esi // arg2 - source
234 pushl %edi // arg1 - dest
235 call Lmemcpy_sse2 // align the destination
236 movl 12(%esp),%ecx // recover total length
237 addl $16,%esp
238 addl %ebx,%esi // adjust ptrs and lengths past copy
239 addl %ebx,%edi
240 subl %ebx,%ecx
241
242 // Loop over big chunks.
243 // ecx = length remaining (>= 4096)
244 // edi = dest (64-byte aligned)
245 // esi = source (may be unaligned)
246
247 LBigChunkLoop:
248 movl $(kBigChunk),%edx // assume we can do a full chunk
249 cmpl %edx,%ecx // do we have a full chunk left to do?
250 cmovbl %ecx,%edx // if not, only move what we have left
251 andl $-4096,%edx // we work in page multiples
252 xor %eax,%eax // initialize chunk offset
253 jmp LTouchLoop
254
255 // Because the source may be unaligned, we use byte loads to touch.
256 // ecx = length remaining (including this chunk)
257 // edi = ptr to start of dest chunk
258 // esi = ptr to start of source chunk
259 // edx = chunk length (multiples of pages)
260 // ebx = scratch reg used to read a byte of each cache line
261 // eax = chunk offset
262
263 .align 4,0x90 // 16-byte align inner loops
264 LTouchLoop:
265 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
266 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
267 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
268 movzb 9*64(%esi,%eax),%ebx // etc
269
270 movzb 16*64(%esi,%eax),%ebx
271 movzb 17*64(%esi,%eax),%ebx
272 movzb 24*64(%esi,%eax),%ebx
273 movzb 25*64(%esi,%eax),%ebx
274
275 movzb 32*64(%esi,%eax),%ebx
276 movzb 33*64(%esi,%eax),%ebx
277 movzb 40*64(%esi,%eax),%ebx
278 movzb 41*64(%esi,%eax),%ebx
279
280 movzb 48*64(%esi,%eax),%ebx
281 movzb 49*64(%esi,%eax),%ebx
282 movzb 56*64(%esi,%eax),%ebx
283 movzb 57*64(%esi,%eax),%ebx
284
285 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
286 testl $512,%eax // done with this page?
287 jz LTouchLoop // no, next of four slices
288 addl $(4096-512),%eax // move on to next page
289 cmpl %eax,%edx // done with this chunk?
290 jnz LTouchLoop // no, do next page
291
292 // The chunk has been pre-fetched, now copy it using non-temporal stores.
293 // There are two copy loops, depending on whether the source is 16-byte aligned
294 // or not.
295
296 addl %edx,%esi // increment ptrs by chunk length
297 addl %edx,%edi
298 subl %edx,%ecx // adjust remaining length
299 negl %edx // prepare loop index (counts up to 0)
300 testl $15,%esi // is source 16-byte aligned?
301 jnz LVeryLongUnaligned // source is not aligned
302 jmp LVeryLongAligned
303
304 .align 4,0x90 // 16-byte align inner loops
305 LVeryLongAligned: // aligned loop over 128-bytes
306 movdqa (%esi,%edx),%xmm0
307 movdqa 16(%esi,%edx),%xmm1
308 movdqa 32(%esi,%edx),%xmm2
309 movdqa 48(%esi,%edx),%xmm3
310 movdqa 64(%esi,%edx),%xmm4
311 movdqa 80(%esi,%edx),%xmm5
312 movdqa 96(%esi,%edx),%xmm6
313 movdqa 112(%esi,%edx),%xmm7
314
315 movntdq %xmm0,(%edi,%edx)
316 movntdq %xmm1,16(%edi,%edx)
317 movntdq %xmm2,32(%edi,%edx)
318 movntdq %xmm3,48(%edi,%edx)
319 movntdq %xmm4,64(%edi,%edx)
320 movntdq %xmm5,80(%edi,%edx)
321 movntdq %xmm6,96(%edi,%edx)
322 movntdq %xmm7,112(%edi,%edx)
323
324 subl $-128,%edx // add 128 with an 8-bit immediate
325 jnz LVeryLongAligned
326 jmp LVeryLongChunkEnd
327
328 .align 4,0x90 // 16-byte align inner loops
329 LVeryLongUnaligned: // unaligned loop over 128-bytes
330 movdqu (%esi,%edx),%xmm0
331 movdqu 16(%esi,%edx),%xmm1
332 movdqu 32(%esi,%edx),%xmm2
333 movdqu 48(%esi,%edx),%xmm3
334 movdqu 64(%esi,%edx),%xmm4
335 movdqu 80(%esi,%edx),%xmm5
336 movdqu 96(%esi,%edx),%xmm6
337 movdqu 112(%esi,%edx),%xmm7
338
339 movntdq %xmm0,(%edi,%edx)
340 movntdq %xmm1,16(%edi,%edx)
341 movntdq %xmm2,32(%edi,%edx)
342 movntdq %xmm3,48(%edi,%edx)
343 movntdq %xmm4,64(%edi,%edx)
344 movntdq %xmm5,80(%edi,%edx)
345 movntdq %xmm6,96(%edi,%edx)
346 movntdq %xmm7,112(%edi,%edx)
347
348 subl $-128,%edx // add 128 with an 8-bit immediate
349 jnz LVeryLongUnaligned
350
351 LVeryLongChunkEnd:
352 cmpl $4096,%ecx // at least another page to go?
353 jae LBigChunkLoop // yes
354
355 sfence // required by non-temporal stores
356 popl %ebx
357 jmp Lrejoin // handle remaining (0..4095) bytes
358
359
360 // Reverse moves.
361 // ecx = length
362 // esi = source ptr
363 // edi = dest ptr
364
365 LReverse:
366 addl %ecx,%esi // point to end of strings
367 addl %ecx,%edi
368 cmpl $(kShort),%ecx // long enough to bother with SSE?
369 ja LReverseNotShort // yes
370
371 // Handle reverse short copies.
372 // ecx = length
373 // esi = one byte past end of source
374 // edi = one byte past end of dest
375
376 LReverseShort:
377 movl %ecx,%edx // copy length
378 shrl $2,%ecx // #words
379 jz 3f
380 1:
381 subl $4,%esi
382 movl (%esi),%eax
383 subl $4,%edi
384 movl %eax,(%edi)
385 dec %ecx
386 jnz 1b
387 3:
388 andl $3,%edx // bytes?
389 jz 5f
390 4:
391 dec %esi
392 movb (%esi),%al
393 dec %edi
394 movb %al,(%edi)
395 dec %edx
396 jnz 4b
397 5:
398 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
399 popl %edi
400 popl %esi
401 popl %ebp
402 ret
403
404 // Handle a reverse move long enough to justify using SSE.
405 // ecx = length
406 // esi = one byte past end of source
407 // edi = one byte past end of dest
408
409 LReverseNotShort:
410 movl %edi,%edx // copy destination
411 andl $15,%edx // get #bytes to align destination
412 je LReverseDestAligned // already aligned
413 subl %edx,%ecx // adjust length
414 1: // loop copying 1..15 bytes
415 dec %esi
416 movb (%esi),%al
417 dec %edi
418 movb %al,(%edi)
419 dec %edx
420 jnz 1b
421
422 // Destination is now aligned. Prepare for reverse loops.
423
424 LReverseDestAligned:
425 movl %ecx,%edx // copy length
426 andl $63,%ecx // get remaining bytes for Lshort
427 andl $-64,%edx // get number of bytes we will copy in inner loop
428 subl %edx,%esi // point to endpoint of copy
429 subl %edx,%edi
430 testl $15,%esi // is source aligned too?
431 jnz LReverseUnalignedLoop // no
432 jmp LReverseAlignedLoop // use aligned loop
433
434 .align 4,0x90 // 16-byte align inner loops
435 LReverseAlignedLoop: // loop over 64-byte chunks
436 movdqa -16(%esi,%edx),%xmm0
437 movdqa -32(%esi,%edx),%xmm1
438 movdqa -48(%esi,%edx),%xmm2
439 movdqa -64(%esi,%edx),%xmm3
440
441 movdqa %xmm0,-16(%edi,%edx)
442 movdqa %xmm1,-32(%edi,%edx)
443 movdqa %xmm2,-48(%edi,%edx)
444 movdqa %xmm3,-64(%edi,%edx)
445
446 subl $64,%edx
447 jne LReverseAlignedLoop
448
449 jmp LReverseShort // copy remaining 0..63 bytes and done
450
451
452 // Reverse, unaligned loop. LDDQU==MOVDQU on these machines.
453
454 .align 4,0x90 // 16-byte align inner loops
455 LReverseUnalignedLoop: // loop over 64-byte chunks
456 movdqu -16(%esi,%edx),%xmm0
457 movdqu -32(%esi,%edx),%xmm1
458 movdqu -48(%esi,%edx),%xmm2
459 movdqu -64(%esi,%edx),%xmm3
460
461 movdqa %xmm0,-16(%edi,%edx)
462 movdqa %xmm1,-32(%edi,%edx)
463 movdqa %xmm2,-48(%edi,%edx)
464 movdqa %xmm3,-64(%edi,%edx)
465
466 subl $64,%edx
467 jne LReverseUnalignedLoop
468
469 jmp LReverseShort // copy remaining 0..63 bytes and done
470
471 PLATFUNC_DESCRIPTOR(bcopy,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3)
472 PLATFUNC_DESCRIPTOR(memcpy,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3)
473 PLATFUNC_DESCRIPTOR(memmove,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3)