]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/commpage/bcopy_sse2.s
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / bcopy_sse2.s
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <machine/cpu_capabilities.h>
30#include <machine/commpage.h>
31
32/*
2d21ac55 33 * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE2
0c530ab8
A
34 * and 64-byte cache lines, such as Core and Core 2.
35 *
36 * The following #defines are tightly coupled to the u-architecture:
37 */
38
39#define kShort 80 // too short to bother with SSE (must be >=80)
40#define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192)
41#define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
42#define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl"
43
44
45// void bcopy(const void *src, void *dst, size_t len);
46
b0d623f7 47COMMPAGE_FUNCTION_START(bcopy_sse2, 32, 5)
0c530ab8
A
48 pushl %ebp // set up a frame for backtraces
49 movl %esp,%ebp
50 pushl %esi
51 pushl %edi
52 movl 8(%ebp),%esi // get source ptr
53 movl 12(%ebp),%edi // get dest ptr
54 jmp Ljoin
55
56//
57// void *memcpy(void *dst, const void *src, size_t len);
58// void *memmove(void *dst, const void *src, size_t len);
59//
60// NB: These need to be 32 bytes from bcopy():
61//
62
63 .align 5, 0x90
64Lmemcpy: // void *memcpy(void *dst, const void *src, size_t len)
65Lmemmove: // void *memmove(void *dst, const void *src, size_t len)
66 pushl %ebp // set up a frame for backtraces
67 movl %esp,%ebp
68 pushl %esi
69 pushl %edi
70 movl 8(%ebp),%edi // get dest ptr
71 movl 12(%ebp),%esi // get source ptr
72
73Ljoin: // here from bcopy() with esi and edi loaded
74 movl 16(%ebp),%ecx // get length
75 movl %edi,%edx
76 subl %esi,%edx // (dest - source)
77 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
78 jb LReverseIsland
79Lrejoin: // here from very-long-operand copies
80 cmpl $(kShort),%ecx // long enough to bother with SSE?
81 ja LNotShort // yes
82
83// Handle short forward copies. As the most common case, this is the fall-through path.
84// ecx = length (<= kShort)
85// esi = source ptr
86// edi = dest ptr
87
88Lshort:
89 movl %ecx,%edx // copy length
90 shrl $2,%ecx // get #doublewords
91 jz LLeftovers
922: // loop copying doublewords
93 movl (%esi),%eax
94 addl $4,%esi
95 movl %eax,(%edi)
96 addl $4,%edi
97 dec %ecx
98 jnz 2b
99LLeftovers: // handle leftover bytes (0..3) in last word
100 andl $3,%edx // any leftover bytes?
101 jz 5f
1024: // loop copying bytes
103 movb (%esi),%al
104 inc %esi
105 movb %al,(%edi)
106 inc %edi
107 dec %edx
108 jnz 4b
1095:
110 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
111 popl %edi
112 popl %esi
113 popl %ebp
114 ret
115
116
117LReverseIsland: // keep the "jb" above a short branch...
118 jmp LReverse // ...because reverse moves are uncommon
119
120
121// Handle forward moves that are long enough to justify use of SSE3.
122// First, 16-byte align the destination.
123// ecx = length (> kShort)
124// esi = source ptr
125// edi = dest ptr
126
127LNotShort:
128 cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops?
129 movl %edi,%edx // copy destination
130 jae LVeryLong // use very-long-operand path
131 negl %edx
132 andl $15,%edx // get #bytes to align destination
133 jz LDestAligned // already aligned
134 subl %edx,%ecx // decrement length
1351: // loop copying 1..15 bytes
136 movb (%esi),%al
137 inc %esi
138 movb %al,(%edi)
139 inc %edi
140 dec %edx
141 jnz 1b
142
143// Destination is now aligned. Prepare for forward loops over 64-byte chunks.
144// Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk.
145
146LDestAligned:
147 movl %ecx,%edx // copy length
148 movl %ecx,%eax // twice
149 andl $63,%ecx // get remaining bytes for Lshort
150 andl $-64,%edx // get number of bytes we will copy in inner loop
151 addl %edx,%esi // point to 1st byte not copied
152 addl %edx,%edi
153 negl %edx // now generate offset to 1st byte to be copied
154 testl $15,%esi // is source aligned too?
155 jnz LUnalignedLoop // no
156
157
158 cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode?
159 jb LAlignedLoop // no, use SSE
160 cld // we'll move forward
161 movl %eax,%ecx // copy length again
162 shrl $2,%ecx // compute #words to move
163 addl %edx,%esi // restore ptrs to 1st byte of source and dest
164 addl %edx,%edi
165 rep // the u-code will optimize this
166 movsl
167 movl %eax,%edx // original length
168 jmp LLeftovers // handle 0..3 leftover bytes
169
170
171// Forward aligned loop for medium length operands (kShort < n < kVeryLong).
172
173 .align 4,0x90 // 16-byte align inner loops
174LAlignedLoop: // loop over 64-byte chunks
175 movdqa (%esi,%edx),%xmm0
176 movdqa 16(%esi,%edx),%xmm1
177 movdqa 32(%esi,%edx),%xmm2
178 movdqa 48(%esi,%edx),%xmm3
179
180 movdqa %xmm0,(%edi,%edx)
181 movdqa %xmm1,16(%edi,%edx)
182 movdqa %xmm2,32(%edi,%edx)
183 movdqa %xmm3,48(%edi,%edx)
184
185 addl $64,%edx
186 jnz LAlignedLoop
187
188 jmp Lshort // copy remaining 0..15 bytes and done
189
190
191// Forward unaligned loop for medium length operands (kShort < n < kVeryLong).
192// Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross
193// source cache lines.
194
195 .align 4,0x90 // 16-byte align inner loops
196LUnalignedLoop: // loop over 64-byte chunks
197 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
198 movdqu 16(%esi,%edx),%xmm1
199 movdqu 32(%esi,%edx),%xmm2
200 movdqu 48(%esi,%edx),%xmm3
201
202 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
203 movdqa %xmm1,16(%edi,%edx)
204 movdqa %xmm2,32(%edi,%edx)
205 movdqa %xmm3,48(%edi,%edx)
206
207 addl $64,%edx
208 jnz LUnalignedLoop
209
210 jmp Lshort // copy remaining 0..63 bytes and done
211
212
213// Very long forward moves. These are at least several pages, so we loop over big
214// chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
215// it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
216// so the copy loop reads from L2 and writes directly to memory (with write combining.)
217// This minimizes bus turnaround and maintains good DRAM page locality.
218// Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
219// size. Otherwise, it is counter-productive to bypass L2 on the stores.
220// ecx = length (>= kVeryLong bytes)
221// edi = dest (aligned)
222// esi = source
223
224LVeryLong:
225 pushl %ebx // we'll need to use this
226 movl %edi,%ebx // copy dest ptr
227 negl %ebx
228 andl $63,%ebx // get #bytes to cache line align destination
229 jz LBigChunkLoop // already aligned
230
231// Cache line align destination, so temporal stores in copy loops work right.
232
233 pushl %ecx // save total length remaining
234 pushl %ebx // arg3 - #bytes to align destination (1..63)
235 pushl %esi // arg2 - source
236 pushl %edi // arg1 - dest
237 call Lmemcpy // align the destination
238 movl 12(%esp),%ecx // recover total length
239 addl $16,%esp
240 addl %ebx,%esi // adjust ptrs and lengths past copy
241 addl %ebx,%edi
242 subl %ebx,%ecx
243
244// Loop over big chunks.
245// ecx = length remaining (>= 4096)
246// edi = dest (64-byte aligned)
247// esi = source (may be unaligned)
248
249LBigChunkLoop:
250 movl $(kBigChunk),%edx // assume we can do a full chunk
251 cmpl %edx,%ecx // do we have a full chunk left to do?
252 cmovbl %ecx,%edx // if not, only move what we have left
253 andl $-4096,%edx // we work in page multiples
254 xor %eax,%eax // initialize chunk offset
255 jmp LTouchLoop
256
257// Because the source may be unaligned, we use byte loads to touch.
258// ecx = length remaining (including this chunk)
259// edi = ptr to start of dest chunk
260// esi = ptr to start of source chunk
261// edx = chunk length (multiples of pages)
262// ebx = scratch reg used to read a byte of each cache line
263// eax = chunk offset
264
265 .align 4,0x90 // 16-byte align inner loops
266LTouchLoop:
267 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
268 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
269 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
270 movzb 9*64(%esi,%eax),%ebx // etc
271
272 movzb 16*64(%esi,%eax),%ebx
273 movzb 17*64(%esi,%eax),%ebx
274 movzb 24*64(%esi,%eax),%ebx
275 movzb 25*64(%esi,%eax),%ebx
276
277 movzb 32*64(%esi,%eax),%ebx
278 movzb 33*64(%esi,%eax),%ebx
279 movzb 40*64(%esi,%eax),%ebx
280 movzb 41*64(%esi,%eax),%ebx
281
282 movzb 48*64(%esi,%eax),%ebx
283 movzb 49*64(%esi,%eax),%ebx
284 movzb 56*64(%esi,%eax),%ebx
285 movzb 57*64(%esi,%eax),%ebx
286
287 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
288 testl $512,%eax // done with this page?
289 jz LTouchLoop // no, next of four slices
290 addl $(4096-512),%eax // move on to next page
291 cmpl %eax,%edx // done with this chunk?
292 jnz LTouchLoop // no, do next page
293
294// The chunk has been pre-fetched, now copy it using non-temporal stores.
295// There are two copy loops, depending on whether the source is 16-byte aligned
296// or not.
297
298 addl %edx,%esi // increment ptrs by chunk length
299 addl %edx,%edi
300 subl %edx,%ecx // adjust remaining length
301 negl %edx // prepare loop index (counts up to 0)
302 testl $15,%esi // is source 16-byte aligned?
303 jnz LVeryLongUnaligned // source is not aligned
304 jmp LVeryLongAligned
305
306 .align 4,0x90 // 16-byte align inner loops
307LVeryLongAligned: // aligned loop over 128-bytes
308 movdqa (%esi,%edx),%xmm0
309 movdqa 16(%esi,%edx),%xmm1
310 movdqa 32(%esi,%edx),%xmm2
311 movdqa 48(%esi,%edx),%xmm3
312 movdqa 64(%esi,%edx),%xmm4
313 movdqa 80(%esi,%edx),%xmm5
314 movdqa 96(%esi,%edx),%xmm6
315 movdqa 112(%esi,%edx),%xmm7
316
317 movntdq %xmm0,(%edi,%edx)
318 movntdq %xmm1,16(%edi,%edx)
319 movntdq %xmm2,32(%edi,%edx)
320 movntdq %xmm3,48(%edi,%edx)
321 movntdq %xmm4,64(%edi,%edx)
322 movntdq %xmm5,80(%edi,%edx)
323 movntdq %xmm6,96(%edi,%edx)
324 movntdq %xmm7,112(%edi,%edx)
325
326 subl $-128,%edx // add 128 with an 8-bit immediate
327 jnz LVeryLongAligned
328 jmp LVeryLongChunkEnd
329
330 .align 4,0x90 // 16-byte align inner loops
331LVeryLongUnaligned: // unaligned loop over 128-bytes
332 movdqu (%esi,%edx),%xmm0
333 movdqu 16(%esi,%edx),%xmm1
334 movdqu 32(%esi,%edx),%xmm2
335 movdqu 48(%esi,%edx),%xmm3
336 movdqu 64(%esi,%edx),%xmm4
337 movdqu 80(%esi,%edx),%xmm5
338 movdqu 96(%esi,%edx),%xmm6
339 movdqu 112(%esi,%edx),%xmm7
340
341 movntdq %xmm0,(%edi,%edx)
342 movntdq %xmm1,16(%edi,%edx)
343 movntdq %xmm2,32(%edi,%edx)
344 movntdq %xmm3,48(%edi,%edx)
345 movntdq %xmm4,64(%edi,%edx)
346 movntdq %xmm5,80(%edi,%edx)
347 movntdq %xmm6,96(%edi,%edx)
348 movntdq %xmm7,112(%edi,%edx)
349
350 subl $-128,%edx // add 128 with an 8-bit immediate
351 jnz LVeryLongUnaligned
352
353LVeryLongChunkEnd:
354 cmpl $4096,%ecx // at least another page to go?
355 jae LBigChunkLoop // yes
356
357 sfence // required by non-temporal stores
358 popl %ebx
359 jmp Lrejoin // handle remaining (0..4095) bytes
360
361
362// Reverse moves.
363// ecx = length
364// esi = source ptr
365// edi = dest ptr
366
367LReverse:
368 addl %ecx,%esi // point to end of strings
369 addl %ecx,%edi
370 cmpl $(kShort),%ecx // long enough to bother with SSE?
371 ja LReverseNotShort // yes
372
373// Handle reverse short copies.
374// ecx = length
375// esi = one byte past end of source
376// edi = one byte past end of dest
377
378LReverseShort:
379 movl %ecx,%edx // copy length
380 shrl $2,%ecx // #words
381 jz 3f
3821:
383 subl $4,%esi
384 movl (%esi),%eax
385 subl $4,%edi
386 movl %eax,(%edi)
387 dec %ecx
388 jnz 1b
3893:
390 andl $3,%edx // bytes?
391 jz 5f
3924:
393 dec %esi
394 movb (%esi),%al
395 dec %edi
396 movb %al,(%edi)
397 dec %edx
398 jnz 4b
3995:
400 movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove
401 popl %edi
402 popl %esi
403 popl %ebp
404 ret
405
406// Handle a reverse move long enough to justify using SSE.
407// ecx = length
408// esi = one byte past end of source
409// edi = one byte past end of dest
410
411LReverseNotShort:
412 movl %edi,%edx // copy destination
413 andl $15,%edx // get #bytes to align destination
414 je LReverseDestAligned // already aligned
415 subl %edx,%ecx // adjust length
4161: // loop copying 1..15 bytes
417 dec %esi
418 movb (%esi),%al
419 dec %edi
420 movb %al,(%edi)
421 dec %edx
422 jnz 1b
423
424// Destination is now aligned. Prepare for reverse loops.
425
426LReverseDestAligned:
427 movl %ecx,%edx // copy length
428 andl $63,%ecx // get remaining bytes for Lshort
429 andl $-64,%edx // get number of bytes we will copy in inner loop
430 subl %edx,%esi // point to endpoint of copy
431 subl %edx,%edi
432 testl $15,%esi // is source aligned too?
433 jnz LReverseUnalignedLoop // no
434 jmp LReverseAlignedLoop // use aligned loop
435
436 .align 4,0x90 // 16-byte align inner loops
437LReverseAlignedLoop: // loop over 64-byte chunks
438 movdqa -16(%esi,%edx),%xmm0
439 movdqa -32(%esi,%edx),%xmm1
440 movdqa -48(%esi,%edx),%xmm2
441 movdqa -64(%esi,%edx),%xmm3
442
443 movdqa %xmm0,-16(%edi,%edx)
444 movdqa %xmm1,-32(%edi,%edx)
445 movdqa %xmm2,-48(%edi,%edx)
446 movdqa %xmm3,-64(%edi,%edx)
447
448 subl $64,%edx
449 jne LReverseAlignedLoop
450
451 jmp LReverseShort // copy remaining 0..63 bytes and done
452
453
454// Reverse, unaligned loop. LDDQU==MOVDQU on these machines.
455
456 .align 4,0x90 // 16-byte align inner loops
457LReverseUnalignedLoop: // loop over 64-byte chunks
458 movdqu -16(%esi,%edx),%xmm0
459 movdqu -32(%esi,%edx),%xmm1
460 movdqu -48(%esi,%edx),%xmm2
461 movdqu -64(%esi,%edx),%xmm3
462
463 movdqa %xmm0,-16(%edi,%edx)
464 movdqa %xmm1,-32(%edi,%edx)
465 movdqa %xmm2,-48(%edi,%edx)
466 movdqa %xmm3,-64(%edi,%edx)
467
468 subl $64,%edx
469 jne LReverseUnalignedLoop
470
471 jmp LReverseShort // copy remaining 0..63 bytes and done
472
b0d623f7 473COMMPAGE_DESCRIPTOR(bcopy_sse2,_COMM_PAGE_BCOPY,kHasSSE2+kCache64,kHasSupplementalSSE3)