2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/cpu_capabilities.h>
30 #include <machine/commpage.h>
33 * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE2
34 * and 64-byte cache lines, such as Core and Core 2.
36 * The following #defines are tightly coupled to the u-architecture:
39 #define kShort 80 // too short to bother with SSE (must be >=80)
40 #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192)
41 #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
42 #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl"
45 // void bcopy(const void *src, void *dst, size_t len);
47 COMMPAGE_FUNCTION_START(bcopy_sse2, 32, 5)
48 pushl %ebp // set up a frame for backtraces
52 movl 8(%ebp),%esi // get source ptr
53 movl 12(%ebp),%edi // get dest ptr
57 // void *memcpy(void *dst, const void *src, size_t len);
58 // void *memmove(void *dst, const void *src, size_t len);
60 // NB: These need to be 32 bytes from bcopy():
64 Lmemcpy: // void *memcpy(void *dst, const void *src, size_t len)
65 Lmemmove: // void *memmove(void *dst, const void *src, size_t len)
66 pushl %ebp // set up a frame for backtraces
70 movl 8(%ebp),%edi // get dest ptr
71 movl 12(%ebp),%esi // get source ptr
73 Ljoin: // here from bcopy() with esi and edi loaded
74 movl 16(%ebp),%ecx // get length
76 subl %esi,%edx // (dest - source)
77 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
79 Lrejoin: // here from very-long-operand copies
80 cmpl $(kShort),%ecx // long enough to bother with SSE?
83 // Handle short forward copies. As the most common case, this is the fall-through path.
84 // ecx = length (<= kShort)
89 movl %ecx,%edx // copy length
90 shrl $2,%ecx // get #doublewords
92 2: // loop copying doublewords
99 LLeftovers: // handle leftover bytes (0..3) in last word
100 andl $3,%edx // any leftover bytes?
102 4: // loop copying bytes
110 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
117 LReverseIsland: // keep the "jb" above a short branch...
118 jmp LReverse // ...because reverse moves are uncommon
121 // Handle forward moves that are long enough to justify use of SSE3.
122 // First, 16-byte align the destination.
123 // ecx = length (> kShort)
128 cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops?
129 movl %edi,%edx // copy destination
130 jae LVeryLong // use very-long-operand path
132 andl $15,%edx // get #bytes to align destination
133 jz LDestAligned // already aligned
134 subl %edx,%ecx // decrement length
135 1: // loop copying 1..15 bytes
143 // Destination is now aligned. Prepare for forward loops over 64-byte chunks.
144 // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk.
147 movl %ecx,%edx // copy length
148 movl %ecx,%eax // twice
149 andl $63,%ecx // get remaining bytes for Lshort
150 andl $-64,%edx // get number of bytes we will copy in inner loop
151 addl %edx,%esi // point to 1st byte not copied
153 negl %edx // now generate offset to 1st byte to be copied
154 testl $15,%esi // is source aligned too?
155 jnz LUnalignedLoop // no
158 cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode?
159 jb LAlignedLoop // no, use SSE
160 cld // we'll move forward
161 movl %eax,%ecx // copy length again
162 shrl $2,%ecx // compute #words to move
163 addl %edx,%esi // restore ptrs to 1st byte of source and dest
165 rep // the u-code will optimize this
167 movl %eax,%edx // original length
168 jmp LLeftovers // handle 0..3 leftover bytes
171 // Forward aligned loop for medium length operands (kShort < n < kVeryLong).
173 .align 4,0x90 // 16-byte align inner loops
174 LAlignedLoop: // loop over 64-byte chunks
175 movdqa (%esi,%edx),%xmm0
176 movdqa 16(%esi,%edx),%xmm1
177 movdqa 32(%esi,%edx),%xmm2
178 movdqa 48(%esi,%edx),%xmm3
180 movdqa %xmm0,(%edi,%edx)
181 movdqa %xmm1,16(%edi,%edx)
182 movdqa %xmm2,32(%edi,%edx)
183 movdqa %xmm3,48(%edi,%edx)
188 jmp Lshort // copy remaining 0..15 bytes and done
191 // Forward unaligned loop for medium length operands (kShort < n < kVeryLong).
192 // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross
193 // source cache lines.
195 .align 4,0x90 // 16-byte align inner loops
196 LUnalignedLoop: // loop over 64-byte chunks
197 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
198 movdqu 16(%esi,%edx),%xmm1
199 movdqu 32(%esi,%edx),%xmm2
200 movdqu 48(%esi,%edx),%xmm3
202 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
203 movdqa %xmm1,16(%edi,%edx)
204 movdqa %xmm2,32(%edi,%edx)
205 movdqa %xmm3,48(%edi,%edx)
210 jmp Lshort // copy remaining 0..63 bytes and done
213 // Very long forward moves. These are at least several pages, so we loop over big
214 // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
215 // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
216 // so the copy loop reads from L2 and writes directly to memory (with write combining.)
217 // This minimizes bus turnaround and maintains good DRAM page locality.
218 // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
219 // size. Otherwise, it is counter-productive to bypass L2 on the stores.
220 // ecx = length (>= kVeryLong bytes)
221 // edi = dest (aligned)
225 pushl %ebx // we'll need to use this
226 movl %edi,%ebx // copy dest ptr
228 andl $63,%ebx // get #bytes to cache line align destination
229 jz LBigChunkLoop // already aligned
231 // Cache line align destination, so temporal stores in copy loops work right.
233 pushl %ecx // save total length remaining
234 pushl %ebx // arg3 - #bytes to align destination (1..63)
235 pushl %esi // arg2 - source
236 pushl %edi // arg1 - dest
237 call Lmemcpy // align the destination
238 movl 12(%esp),%ecx // recover total length
240 addl %ebx,%esi // adjust ptrs and lengths past copy
244 // Loop over big chunks.
245 // ecx = length remaining (>= 4096)
246 // edi = dest (64-byte aligned)
247 // esi = source (may be unaligned)
250 movl $(kBigChunk),%edx // assume we can do a full chunk
251 cmpl %edx,%ecx // do we have a full chunk left to do?
252 cmovbl %ecx,%edx // if not, only move what we have left
253 andl $-4096,%edx // we work in page multiples
254 xor %eax,%eax // initialize chunk offset
257 // Because the source may be unaligned, we use byte loads to touch.
258 // ecx = length remaining (including this chunk)
259 // edi = ptr to start of dest chunk
260 // esi = ptr to start of source chunk
261 // edx = chunk length (multiples of pages)
262 // ebx = scratch reg used to read a byte of each cache line
263 // eax = chunk offset
265 .align 4,0x90 // 16-byte align inner loops
267 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
268 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
269 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
270 movzb 9*64(%esi,%eax),%ebx // etc
272 movzb 16*64(%esi,%eax),%ebx
273 movzb 17*64(%esi,%eax),%ebx
274 movzb 24*64(%esi,%eax),%ebx
275 movzb 25*64(%esi,%eax),%ebx
277 movzb 32*64(%esi,%eax),%ebx
278 movzb 33*64(%esi,%eax),%ebx
279 movzb 40*64(%esi,%eax),%ebx
280 movzb 41*64(%esi,%eax),%ebx
282 movzb 48*64(%esi,%eax),%ebx
283 movzb 49*64(%esi,%eax),%ebx
284 movzb 56*64(%esi,%eax),%ebx
285 movzb 57*64(%esi,%eax),%ebx
287 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
288 testl $512,%eax // done with this page?
289 jz LTouchLoop // no, next of four slices
290 addl $(4096-512),%eax // move on to next page
291 cmpl %eax,%edx // done with this chunk?
292 jnz LTouchLoop // no, do next page
294 // The chunk has been pre-fetched, now copy it using non-temporal stores.
295 // There are two copy loops, depending on whether the source is 16-byte aligned
298 addl %edx,%esi // increment ptrs by chunk length
300 subl %edx,%ecx // adjust remaining length
301 negl %edx // prepare loop index (counts up to 0)
302 testl $15,%esi // is source 16-byte aligned?
303 jnz LVeryLongUnaligned // source is not aligned
306 .align 4,0x90 // 16-byte align inner loops
307 LVeryLongAligned: // aligned loop over 128-bytes
308 movdqa (%esi,%edx),%xmm0
309 movdqa 16(%esi,%edx),%xmm1
310 movdqa 32(%esi,%edx),%xmm2
311 movdqa 48(%esi,%edx),%xmm3
312 movdqa 64(%esi,%edx),%xmm4
313 movdqa 80(%esi,%edx),%xmm5
314 movdqa 96(%esi,%edx),%xmm6
315 movdqa 112(%esi,%edx),%xmm7
317 movntdq %xmm0,(%edi,%edx)
318 movntdq %xmm1,16(%edi,%edx)
319 movntdq %xmm2,32(%edi,%edx)
320 movntdq %xmm3,48(%edi,%edx)
321 movntdq %xmm4,64(%edi,%edx)
322 movntdq %xmm5,80(%edi,%edx)
323 movntdq %xmm6,96(%edi,%edx)
324 movntdq %xmm7,112(%edi,%edx)
326 subl $-128,%edx // add 128 with an 8-bit immediate
328 jmp LVeryLongChunkEnd
330 .align 4,0x90 // 16-byte align inner loops
331 LVeryLongUnaligned: // unaligned loop over 128-bytes
332 movdqu (%esi,%edx),%xmm0
333 movdqu 16(%esi,%edx),%xmm1
334 movdqu 32(%esi,%edx),%xmm2
335 movdqu 48(%esi,%edx),%xmm3
336 movdqu 64(%esi,%edx),%xmm4
337 movdqu 80(%esi,%edx),%xmm5
338 movdqu 96(%esi,%edx),%xmm6
339 movdqu 112(%esi,%edx),%xmm7
341 movntdq %xmm0,(%edi,%edx)
342 movntdq %xmm1,16(%edi,%edx)
343 movntdq %xmm2,32(%edi,%edx)
344 movntdq %xmm3,48(%edi,%edx)
345 movntdq %xmm4,64(%edi,%edx)
346 movntdq %xmm5,80(%edi,%edx)
347 movntdq %xmm6,96(%edi,%edx)
348 movntdq %xmm7,112(%edi,%edx)
350 subl $-128,%edx // add 128 with an 8-bit immediate
351 jnz LVeryLongUnaligned
354 cmpl $4096,%ecx // at least another page to go?
355 jae LBigChunkLoop // yes
357 sfence // required by non-temporal stores
359 jmp Lrejoin // handle remaining (0..4095) bytes
368 addl %ecx,%esi // point to end of strings
370 cmpl $(kShort),%ecx // long enough to bother with SSE?
371 ja LReverseNotShort // yes
373 // Handle reverse short copies.
375 // esi = one byte past end of source
376 // edi = one byte past end of dest
379 movl %ecx,%edx // copy length
380 shrl $2,%ecx // #words
390 andl $3,%edx // bytes?
400 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
406 // Handle a reverse move long enough to justify using SSE.
408 // esi = one byte past end of source
409 // edi = one byte past end of dest
412 movl %edi,%edx // copy destination
413 andl $15,%edx // get #bytes to align destination
414 je LReverseDestAligned // already aligned
415 subl %edx,%ecx // adjust length
416 1: // loop copying 1..15 bytes
424 // Destination is now aligned. Prepare for reverse loops.
427 movl %ecx,%edx // copy length
428 andl $63,%ecx // get remaining bytes for Lshort
429 andl $-64,%edx // get number of bytes we will copy in inner loop
430 subl %edx,%esi // point to endpoint of copy
432 testl $15,%esi // is source aligned too?
433 jnz LReverseUnalignedLoop // no
434 jmp LReverseAlignedLoop // use aligned loop
436 .align 4,0x90 // 16-byte align inner loops
437 LReverseAlignedLoop: // loop over 64-byte chunks
438 movdqa -16(%esi,%edx),%xmm0
439 movdqa -32(%esi,%edx),%xmm1
440 movdqa -48(%esi,%edx),%xmm2
441 movdqa -64(%esi,%edx),%xmm3
443 movdqa %xmm0,-16(%edi,%edx)
444 movdqa %xmm1,-32(%edi,%edx)
445 movdqa %xmm2,-48(%edi,%edx)
446 movdqa %xmm3,-64(%edi,%edx)
449 jne LReverseAlignedLoop
451 jmp LReverseShort // copy remaining 0..63 bytes and done
454 // Reverse, unaligned loop. LDDQU==MOVDQU on these machines.
456 .align 4,0x90 // 16-byte align inner loops
457 LReverseUnalignedLoop: // loop over 64-byte chunks
458 movdqu -16(%esi,%edx),%xmm0
459 movdqu -32(%esi,%edx),%xmm1
460 movdqu -48(%esi,%edx),%xmm2
461 movdqu -64(%esi,%edx),%xmm3
463 movdqa %xmm0,-16(%edi,%edx)
464 movdqa %xmm1,-32(%edi,%edx)
465 movdqa %xmm2,-48(%edi,%edx)
466 movdqa %xmm3,-64(%edi,%edx)
469 jne LReverseUnalignedLoop
471 jmp LReverseShort // copy remaining 0..63 bytes and done
473 COMMPAGE_DESCRIPTOR(bcopy_sse2,_COMM_PAGE_BCOPY,kHasSSE2+kCache64,kHasSupplementalSSE3)