2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <machine/cpu_capabilities.h>
32 #include <machine/commpage.h>
35 * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE3
36 * and 64-byte cache lines, such as Core and Core 2.
38 * The following #defines are tightly coupled to the u-architecture:
41 #define kShort 80 // too short to bother with SSE (must be >=80)
42 #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192)
43 #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
44 #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl"
47 // void bcopy(const void *src, void *dst, size_t len);
51 Lbcopy_sse3: // void bcopy(const void *src, void *dst, size_t len)
52 pushl %ebp // set up a frame for backtraces
56 movl 8(%ebp),%esi // get source ptr
57 movl 12(%ebp),%edi // get dest ptr
61 // void *memcpy(void *dst, const void *src, size_t len);
62 // void *memmove(void *dst, const void *src, size_t len);
64 // NB: These need to be 32 bytes from bcopy():
68 Lmemcpy: // void *memcpy(void *dst, const void *src, size_t len)
69 Lmemmove: // void *memmove(void *dst, const void *src, size_t len)
70 pushl %ebp // set up a frame for backtraces
74 movl 8(%ebp),%edi // get dest ptr
75 movl 12(%ebp),%esi // get source ptr
77 Ljoin: // here from bcopy() with esi and edi loaded
78 movl 16(%ebp),%ecx // get length
80 subl %esi,%edx // (dest - source)
81 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
83 Lrejoin: // here from very-long-operand copies
84 cmpl $(kShort),%ecx // long enough to bother with SSE?
87 // Handle short forward copies. As the most common case, this is the fall-through path.
88 // ecx = length (<= kShort)
93 movl %ecx,%edx // copy length
94 shrl $2,%ecx // get #doublewords
96 2: // loop copying doublewords
103 LLeftovers: // handle leftover bytes (0..3) in last word
104 andl $3,%edx // any leftover bytes?
106 4: // loop copying bytes
114 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
121 LReverseIsland: // keep the "jb" above a short branch...
122 jmp LReverse // ...because reverse moves are uncommon
125 // Handle forward moves that are long enough to justify use of SSE3.
126 // First, 16-byte align the destination.
127 // ecx = length (> kShort)
132 cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops?
133 movl %edi,%edx // copy destination
134 jae LVeryLong // use very-long-operand path
136 andl $15,%edx // get #bytes to align destination
137 jz LDestAligned // already aligned
138 subl %edx,%ecx // decrement length
139 1: // loop copying 1..15 bytes
147 // Destination is now aligned. Prepare for forward loops over 64-byte chunks.
148 // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk.
151 movl %ecx,%edx // copy length
152 movl %ecx,%eax // twice
153 andl $63,%ecx // get remaining bytes for Lshort
154 andl $-64,%edx // get number of bytes we will copy in inner loop
155 addl %edx,%esi // point to 1st byte not copied
157 negl %edx // now generate offset to 1st byte to be copied
158 testl $15,%esi // is source aligned too?
159 jnz LUnalignedLoop // no
162 cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode?
163 jb LAlignedLoop // no, use SSE
164 cld // we'll move forward
165 movl %eax,%ecx // copy length again
166 shrl $2,%ecx // compute #words to move
167 addl %edx,%esi // restore ptrs to 1st byte of source and dest
169 rep // the u-code will optimize this
171 movl %eax,%edx // original length
172 jmp LLeftovers // handle 0..3 leftover bytes
175 // Forward aligned loop for medium length operands (kShort < n < kVeryLong).
177 .align 4,0x90 // 16-byte align inner loops
178 LAlignedLoop: // loop over 64-byte chunks
179 movdqa (%esi,%edx),%xmm0
180 movdqa 16(%esi,%edx),%xmm1
181 movdqa 32(%esi,%edx),%xmm2
182 movdqa 48(%esi,%edx),%xmm3
184 movdqa %xmm0,(%edi,%edx)
185 movdqa %xmm1,16(%edi,%edx)
186 movdqa %xmm2,32(%edi,%edx)
187 movdqa %xmm3,48(%edi,%edx)
192 jmp Lshort // copy remaining 0..15 bytes and done
195 // Forward unaligned loop for medium length operands (kShort < n < kVeryLong).
196 // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross
197 // source cache lines.
199 .align 4,0x90 // 16-byte align inner loops
200 LUnalignedLoop: // loop over 64-byte chunks
201 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
202 movdqu 16(%esi,%edx),%xmm1
203 movdqu 32(%esi,%edx),%xmm2
204 movdqu 48(%esi,%edx),%xmm3
206 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
207 movdqa %xmm1,16(%edi,%edx)
208 movdqa %xmm2,32(%edi,%edx)
209 movdqa %xmm3,48(%edi,%edx)
214 jmp Lshort // copy remaining 0..63 bytes and done
217 // Very long forward moves. These are at least several pages, so we loop over big
218 // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
219 // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
220 // so the copy loop reads from L2 and writes directly to memory (with write combining.)
221 // This minimizes bus turnaround and maintains good DRAM page locality.
222 // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
223 // size. Otherwise, it is counter-productive to bypass L2 on the stores.
224 // ecx = length (>= kVeryLong bytes)
225 // edi = dest (aligned)
229 pushl %ebx // we'll need to use this
230 movl %edi,%ebx // copy dest ptr
232 andl $63,%ebx // get #bytes to cache line align destination
233 jz LBigChunkLoop // already aligned
235 // Cache line align destination, so temporal stores in copy loops work right.
237 pushl %ecx // save total length remaining
238 pushl %ebx // arg3 - #bytes to align destination (1..63)
239 pushl %esi // arg2 - source
240 pushl %edi // arg1 - dest
241 call Lmemcpy // align the destination
242 movl 12(%esp),%ecx // recover total length
244 addl %ebx,%esi // adjust ptrs and lengths past copy
248 // Loop over big chunks.
249 // ecx = length remaining (>= 4096)
250 // edi = dest (64-byte aligned)
251 // esi = source (may be unaligned)
254 movl $(kBigChunk),%edx // assume we can do a full chunk
255 cmpl %edx,%ecx // do we have a full chunk left to do?
256 cmovbl %ecx,%edx // if not, only move what we have left
257 andl $-4096,%edx // we work in page multiples
258 xor %eax,%eax // initialize chunk offset
261 // Because the source may be unaligned, we use byte loads to touch.
262 // ecx = length remaining (including this chunk)
263 // edi = ptr to start of dest chunk
264 // esi = ptr to start of source chunk
265 // edx = chunk length (multiples of pages)
266 // ebx = scratch reg used to read a byte of each cache line
267 // eax = chunk offset
269 .align 4,0x90 // 16-byte align inner loops
271 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
272 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
273 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
274 movzb 9*64(%esi,%eax),%ebx // etc
276 movzb 16*64(%esi,%eax),%ebx
277 movzb 17*64(%esi,%eax),%ebx
278 movzb 24*64(%esi,%eax),%ebx
279 movzb 25*64(%esi,%eax),%ebx
281 movzb 32*64(%esi,%eax),%ebx
282 movzb 33*64(%esi,%eax),%ebx
283 movzb 40*64(%esi,%eax),%ebx
284 movzb 41*64(%esi,%eax),%ebx
286 movzb 48*64(%esi,%eax),%ebx
287 movzb 49*64(%esi,%eax),%ebx
288 movzb 56*64(%esi,%eax),%ebx
289 movzb 57*64(%esi,%eax),%ebx
291 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
292 testl $512,%eax // done with this page?
293 jz LTouchLoop // no, next of four slices
294 addl $(4096-512),%eax // move on to next page
295 cmpl %eax,%edx // done with this chunk?
296 jnz LTouchLoop // no, do next page
298 // The chunk has been pre-fetched, now copy it using non-temporal stores.
299 // There are two copy loops, depending on whether the source is 16-byte aligned
302 addl %edx,%esi // increment ptrs by chunk length
304 subl %edx,%ecx // adjust remaining length
305 negl %edx // prepare loop index (counts up to 0)
306 testl $15,%esi // is source 16-byte aligned?
307 jnz LVeryLongUnaligned // source is not aligned
310 .align 4,0x90 // 16-byte align inner loops
311 LVeryLongAligned: // aligned loop over 128-bytes
312 movdqa (%esi,%edx),%xmm0
313 movdqa 16(%esi,%edx),%xmm1
314 movdqa 32(%esi,%edx),%xmm2
315 movdqa 48(%esi,%edx),%xmm3
316 movdqa 64(%esi,%edx),%xmm4
317 movdqa 80(%esi,%edx),%xmm5
318 movdqa 96(%esi,%edx),%xmm6
319 movdqa 112(%esi,%edx),%xmm7
321 movntdq %xmm0,(%edi,%edx)
322 movntdq %xmm1,16(%edi,%edx)
323 movntdq %xmm2,32(%edi,%edx)
324 movntdq %xmm3,48(%edi,%edx)
325 movntdq %xmm4,64(%edi,%edx)
326 movntdq %xmm5,80(%edi,%edx)
327 movntdq %xmm6,96(%edi,%edx)
328 movntdq %xmm7,112(%edi,%edx)
330 subl $-128,%edx // add 128 with an 8-bit immediate
332 jmp LVeryLongChunkEnd
334 .align 4,0x90 // 16-byte align inner loops
335 LVeryLongUnaligned: // unaligned loop over 128-bytes
336 movdqu (%esi,%edx),%xmm0
337 movdqu 16(%esi,%edx),%xmm1
338 movdqu 32(%esi,%edx),%xmm2
339 movdqu 48(%esi,%edx),%xmm3
340 movdqu 64(%esi,%edx),%xmm4
341 movdqu 80(%esi,%edx),%xmm5
342 movdqu 96(%esi,%edx),%xmm6
343 movdqu 112(%esi,%edx),%xmm7
345 movntdq %xmm0,(%edi,%edx)
346 movntdq %xmm1,16(%edi,%edx)
347 movntdq %xmm2,32(%edi,%edx)
348 movntdq %xmm3,48(%edi,%edx)
349 movntdq %xmm4,64(%edi,%edx)
350 movntdq %xmm5,80(%edi,%edx)
351 movntdq %xmm6,96(%edi,%edx)
352 movntdq %xmm7,112(%edi,%edx)
354 subl $-128,%edx // add 128 with an 8-bit immediate
355 jnz LVeryLongUnaligned
358 cmpl $4096,%ecx // at least another page to go?
359 jae LBigChunkLoop // yes
361 sfence // required by non-temporal stores
363 jmp Lrejoin // handle remaining (0..4095) bytes
372 addl %ecx,%esi // point to end of strings
374 cmpl $(kShort),%ecx // long enough to bother with SSE?
375 ja LReverseNotShort // yes
377 // Handle reverse short copies.
379 // esi = one byte past end of source
380 // edi = one byte past end of dest
383 movl %ecx,%edx // copy length
384 shrl $2,%ecx // #words
394 andl $3,%edx // bytes?
404 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
410 // Handle a reverse move long enough to justify using SSE.
412 // esi = one byte past end of source
413 // edi = one byte past end of dest
416 movl %edi,%edx // copy destination
417 andl $15,%edx // get #bytes to align destination
418 je LReverseDestAligned // already aligned
419 subl %edx,%ecx // adjust length
420 1: // loop copying 1..15 bytes
428 // Destination is now aligned. Prepare for reverse loops.
431 movl %ecx,%edx // copy length
432 andl $63,%ecx // get remaining bytes for Lshort
433 andl $-64,%edx // get number of bytes we will copy in inner loop
434 subl %edx,%esi // point to endpoint of copy
436 testl $15,%esi // is source aligned too?
437 jnz LReverseUnalignedLoop // no
438 jmp LReverseAlignedLoop // use aligned loop
440 .align 4,0x90 // 16-byte align inner loops
441 LReverseAlignedLoop: // loop over 64-byte chunks
442 movdqa -16(%esi,%edx),%xmm0
443 movdqa -32(%esi,%edx),%xmm1
444 movdqa -48(%esi,%edx),%xmm2
445 movdqa -64(%esi,%edx),%xmm3
447 movdqa %xmm0,-16(%edi,%edx)
448 movdqa %xmm1,-32(%edi,%edx)
449 movdqa %xmm2,-48(%edi,%edx)
450 movdqa %xmm3,-64(%edi,%edx)
453 jne LReverseAlignedLoop
455 jmp LReverseShort // copy remaining 0..63 bytes and done
458 // Reverse, unaligned loop. LDDQU==MOVDQU on these machines.
460 .align 4,0x90 // 16-byte align inner loops
461 LReverseUnalignedLoop: // loop over 64-byte chunks
462 movdqu -16(%esi,%edx),%xmm0
463 movdqu -32(%esi,%edx),%xmm1
464 movdqu -48(%esi,%edx),%xmm2
465 movdqu -64(%esi,%edx),%xmm3
467 movdqa %xmm0,-16(%edi,%edx)
468 movdqa %xmm1,-32(%edi,%edx)
469 movdqa %xmm2,-48(%edi,%edx)
470 movdqa %xmm3,-64(%edi,%edx)
473 jne LReverseUnalignedLoop
475 jmp LReverseShort // copy remaining 0..63 bytes and done
478 COMMPAGE_DESCRIPTOR(bcopy_sse3,_COMM_PAGE_BCOPY,kHasSSE2+kCache64,kHasSupplementalSSE3)