2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/cpu_capabilities.h>
24 #include <machine/commpage.h>
27 * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE3
28 * and 64-byte cache lines, such as Core and Core 2.
30 * The following #defines are tightly coupled to the u-architecture:
33 #define kShort 80 // too short to bother with SSE (must be >=80)
34 #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192)
35 #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
36 #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl"
39 // void bcopy(const void *src, void *dst, size_t len);
43 Lbcopy_sse3: // void bcopy(const void *src, void *dst, size_t len)
44 pushl %ebp // set up a frame for backtraces
48 movl 8(%ebp),%esi // get source ptr
49 movl 12(%ebp),%edi // get dest ptr
53 // void *memcpy(void *dst, const void *src, size_t len);
54 // void *memmove(void *dst, const void *src, size_t len);
56 // NB: These need to be 32 bytes from bcopy():
60 Lmemcpy: // void *memcpy(void *dst, const void *src, size_t len)
61 Lmemmove: // void *memmove(void *dst, const void *src, size_t len)
62 pushl %ebp // set up a frame for backtraces
66 movl 8(%ebp),%edi // get dest ptr
67 movl 12(%ebp),%esi // get source ptr
69 Ljoin: // here from bcopy() with esi and edi loaded
70 movl 16(%ebp),%ecx // get length
72 subl %esi,%edx // (dest - source)
73 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
75 Lrejoin: // here from very-long-operand copies
76 cmpl $(kShort),%ecx // long enough to bother with SSE?
79 // Handle short forward copies. As the most common case, this is the fall-through path.
80 // ecx = length (<= kShort)
85 movl %ecx,%edx // copy length
86 shrl $2,%ecx // get #doublewords
88 2: // loop copying doublewords
95 LLeftovers: // handle leftover bytes (0..3) in last word
96 andl $3,%edx // any leftover bytes?
98 4: // loop copying bytes
106 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
113 LReverseIsland: // keep the "jb" above a short branch...
114 jmp LReverse // ...because reverse moves are uncommon
117 // Handle forward moves that are long enough to justify use of SSE3.
118 // First, 16-byte align the destination.
119 // ecx = length (> kShort)
124 cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops?
125 movl %edi,%edx // copy destination
126 jae LVeryLong // use very-long-operand path
128 andl $15,%edx // get #bytes to align destination
129 jz LDestAligned // already aligned
130 subl %edx,%ecx // decrement length
131 1: // loop copying 1..15 bytes
139 // Destination is now aligned. Prepare for forward loops over 64-byte chunks.
140 // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk.
143 movl %ecx,%edx // copy length
144 movl %ecx,%eax // twice
145 andl $63,%ecx // get remaining bytes for Lshort
146 andl $-64,%edx // get number of bytes we will copy in inner loop
147 addl %edx,%esi // point to 1st byte not copied
149 negl %edx // now generate offset to 1st byte to be copied
150 testl $15,%esi // is source aligned too?
151 jnz LUnalignedLoop // no
154 cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode?
155 jb LAlignedLoop // no, use SSE
156 cld // we'll move forward
157 movl %eax,%ecx // copy length again
158 shrl $2,%ecx // compute #words to move
159 addl %edx,%esi // restore ptrs to 1st byte of source and dest
161 rep // the u-code will optimize this
163 movl %eax,%edx // original length
164 jmp LLeftovers // handle 0..3 leftover bytes
167 // Forward aligned loop for medium length operands (kShort < n < kVeryLong).
169 .align 4,0x90 // 16-byte align inner loops
170 LAlignedLoop: // loop over 64-byte chunks
171 movdqa (%esi,%edx),%xmm0
172 movdqa 16(%esi,%edx),%xmm1
173 movdqa 32(%esi,%edx),%xmm2
174 movdqa 48(%esi,%edx),%xmm3
176 movdqa %xmm0,(%edi,%edx)
177 movdqa %xmm1,16(%edi,%edx)
178 movdqa %xmm2,32(%edi,%edx)
179 movdqa %xmm3,48(%edi,%edx)
184 jmp Lshort // copy remaining 0..15 bytes and done
187 // Forward unaligned loop for medium length operands (kShort < n < kVeryLong).
188 // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross
189 // source cache lines.
191 .align 4,0x90 // 16-byte align inner loops
192 LUnalignedLoop: // loop over 64-byte chunks
193 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
194 movdqu 16(%esi,%edx),%xmm1
195 movdqu 32(%esi,%edx),%xmm2
196 movdqu 48(%esi,%edx),%xmm3
198 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
199 movdqa %xmm1,16(%edi,%edx)
200 movdqa %xmm2,32(%edi,%edx)
201 movdqa %xmm3,48(%edi,%edx)
206 jmp Lshort // copy remaining 0..63 bytes and done
209 // Very long forward moves. These are at least several pages, so we loop over big
210 // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
211 // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
212 // so the copy loop reads from L2 and writes directly to memory (with write combining.)
213 // This minimizes bus turnaround and maintains good DRAM page locality.
214 // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
215 // size. Otherwise, it is counter-productive to bypass L2 on the stores.
216 // ecx = length (>= kVeryLong bytes)
217 // edi = dest (aligned)
221 pushl %ebx // we'll need to use this
222 movl %edi,%ebx // copy dest ptr
224 andl $63,%ebx // get #bytes to cache line align destination
225 jz LBigChunkLoop // already aligned
227 // Cache line align destination, so temporal stores in copy loops work right.
229 pushl %ecx // save total length remaining
230 pushl %ebx // arg3 - #bytes to align destination (1..63)
231 pushl %esi // arg2 - source
232 pushl %edi // arg1 - dest
233 call Lmemcpy // align the destination
234 movl 12(%esp),%ecx // recover total length
236 addl %ebx,%esi // adjust ptrs and lengths past copy
240 // Loop over big chunks.
241 // ecx = length remaining (>= 4096)
242 // edi = dest (64-byte aligned)
243 // esi = source (may be unaligned)
246 movl $(kBigChunk),%edx // assume we can do a full chunk
247 cmpl %edx,%ecx // do we have a full chunk left to do?
248 cmovbl %ecx,%edx // if not, only move what we have left
249 andl $-4096,%edx // we work in page multiples
250 xor %eax,%eax // initialize chunk offset
253 // Because the source may be unaligned, we use byte loads to touch.
254 // ecx = length remaining (including this chunk)
255 // edi = ptr to start of dest chunk
256 // esi = ptr to start of source chunk
257 // edx = chunk length (multiples of pages)
258 // ebx = scratch reg used to read a byte of each cache line
259 // eax = chunk offset
261 .align 4,0x90 // 16-byte align inner loops
263 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
264 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
265 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
266 movzb 9*64(%esi,%eax),%ebx // etc
268 movzb 16*64(%esi,%eax),%ebx
269 movzb 17*64(%esi,%eax),%ebx
270 movzb 24*64(%esi,%eax),%ebx
271 movzb 25*64(%esi,%eax),%ebx
273 movzb 32*64(%esi,%eax),%ebx
274 movzb 33*64(%esi,%eax),%ebx
275 movzb 40*64(%esi,%eax),%ebx
276 movzb 41*64(%esi,%eax),%ebx
278 movzb 48*64(%esi,%eax),%ebx
279 movzb 49*64(%esi,%eax),%ebx
280 movzb 56*64(%esi,%eax),%ebx
281 movzb 57*64(%esi,%eax),%ebx
283 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
284 testl $512,%eax // done with this page?
285 jz LTouchLoop // no, next of four slices
286 addl $(4096-512),%eax // move on to next page
287 cmpl %eax,%edx // done with this chunk?
288 jnz LTouchLoop // no, do next page
290 // The chunk has been pre-fetched, now copy it using non-temporal stores.
291 // There are two copy loops, depending on whether the source is 16-byte aligned
294 addl %edx,%esi // increment ptrs by chunk length
296 subl %edx,%ecx // adjust remaining length
297 negl %edx // prepare loop index (counts up to 0)
298 testl $15,%esi // is source 16-byte aligned?
299 jnz LVeryLongUnaligned // source is not aligned
302 .align 4,0x90 // 16-byte align inner loops
303 LVeryLongAligned: // aligned loop over 128-bytes
304 movdqa (%esi,%edx),%xmm0
305 movdqa 16(%esi,%edx),%xmm1
306 movdqa 32(%esi,%edx),%xmm2
307 movdqa 48(%esi,%edx),%xmm3
308 movdqa 64(%esi,%edx),%xmm4
309 movdqa 80(%esi,%edx),%xmm5
310 movdqa 96(%esi,%edx),%xmm6
311 movdqa 112(%esi,%edx),%xmm7
313 movntdq %xmm0,(%edi,%edx)
314 movntdq %xmm1,16(%edi,%edx)
315 movntdq %xmm2,32(%edi,%edx)
316 movntdq %xmm3,48(%edi,%edx)
317 movntdq %xmm4,64(%edi,%edx)
318 movntdq %xmm5,80(%edi,%edx)
319 movntdq %xmm6,96(%edi,%edx)
320 movntdq %xmm7,112(%edi,%edx)
322 subl $-128,%edx // add 128 with an 8-bit immediate
324 jmp LVeryLongChunkEnd
326 .align 4,0x90 // 16-byte align inner loops
327 LVeryLongUnaligned: // unaligned loop over 128-bytes
328 movdqu (%esi,%edx),%xmm0
329 movdqu 16(%esi,%edx),%xmm1
330 movdqu 32(%esi,%edx),%xmm2
331 movdqu 48(%esi,%edx),%xmm3
332 movdqu 64(%esi,%edx),%xmm4
333 movdqu 80(%esi,%edx),%xmm5
334 movdqu 96(%esi,%edx),%xmm6
335 movdqu 112(%esi,%edx),%xmm7
337 movntdq %xmm0,(%edi,%edx)
338 movntdq %xmm1,16(%edi,%edx)
339 movntdq %xmm2,32(%edi,%edx)
340 movntdq %xmm3,48(%edi,%edx)
341 movntdq %xmm4,64(%edi,%edx)
342 movntdq %xmm5,80(%edi,%edx)
343 movntdq %xmm6,96(%edi,%edx)
344 movntdq %xmm7,112(%edi,%edx)
346 subl $-128,%edx // add 128 with an 8-bit immediate
347 jnz LVeryLongUnaligned
350 cmpl $4096,%ecx // at least another page to go?
351 jae LBigChunkLoop // yes
353 sfence // required by non-temporal stores
355 jmp Lrejoin // handle remaining (0..4095) bytes
364 addl %ecx,%esi // point to end of strings
366 cmpl $(kShort),%ecx // long enough to bother with SSE?
367 ja LReverseNotShort // yes
369 // Handle reverse short copies.
371 // esi = one byte past end of source
372 // edi = one byte past end of dest
375 movl %ecx,%edx // copy length
376 shrl $2,%ecx // #words
386 andl $3,%edx // bytes?
396 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
402 // Handle a reverse move long enough to justify using SSE.
404 // esi = one byte past end of source
405 // edi = one byte past end of dest
408 movl %edi,%edx // copy destination
409 andl $15,%edx // get #bytes to align destination
410 je LReverseDestAligned // already aligned
411 subl %edx,%ecx // adjust length
412 1: // loop copying 1..15 bytes
420 // Destination is now aligned. Prepare for reverse loops.
423 movl %ecx,%edx // copy length
424 andl $63,%ecx // get remaining bytes for Lshort
425 andl $-64,%edx // get number of bytes we will copy in inner loop
426 subl %edx,%esi // point to endpoint of copy
428 testl $15,%esi // is source aligned too?
429 jnz LReverseUnalignedLoop // no
430 jmp LReverseAlignedLoop // use aligned loop
432 .align 4,0x90 // 16-byte align inner loops
433 LReverseAlignedLoop: // loop over 64-byte chunks
434 movdqa -16(%esi,%edx),%xmm0
435 movdqa -32(%esi,%edx),%xmm1
436 movdqa -48(%esi,%edx),%xmm2
437 movdqa -64(%esi,%edx),%xmm3
439 movdqa %xmm0,-16(%edi,%edx)
440 movdqa %xmm1,-32(%edi,%edx)
441 movdqa %xmm2,-48(%edi,%edx)
442 movdqa %xmm3,-64(%edi,%edx)
445 jne LReverseAlignedLoop
447 jmp LReverseShort // copy remaining 0..63 bytes and done
450 // Reverse, unaligned loop. LDDQU==MOVDQU on these machines.
452 .align 4,0x90 // 16-byte align inner loops
453 LReverseUnalignedLoop: // loop over 64-byte chunks
454 movdqu -16(%esi,%edx),%xmm0
455 movdqu -32(%esi,%edx),%xmm1
456 movdqu -48(%esi,%edx),%xmm2
457 movdqu -64(%esi,%edx),%xmm3
459 movdqa %xmm0,-16(%edi,%edx)
460 movdqa %xmm1,-32(%edi,%edx)
461 movdqa %xmm2,-48(%edi,%edx)
462 movdqa %xmm3,-64(%edi,%edx)
465 jne LReverseUnalignedLoop
467 jmp LReverseShort // copy remaining 0..63 bytes and done
470 COMMPAGE_DESCRIPTOR(bcopy_sse3,_COMM_PAGE_BCOPY,kHasSSE2+kCache64,kHasSupplementalSSE3)