]>
Commit | Line | Data |
---|---|---|
1f2f436a A |
1 | /* |
2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/cpu_capabilities.h> | |
30 | #include <platfunc.h> | |
31 | ||
32 | /* | |
33 | * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE2 | |
34 | * and 64-byte cache lines, such as Core and Core 2. | |
35 | * | |
36 | * The following #defines are tightly coupled to the u-architecture: | |
37 | */ | |
38 | ||
39 | #define kShort 80 // too short to bother with SSE (must be >=80) | |
40 | #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192) | |
41 | #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands | |
42 | #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl" | |
43 | ||
44 | ||
45 | // void bcopy(const void *src, void *dst, size_t len); | |
46 | ||
47 | PLATFUNC_FUNCTION_START(bcopy, sse2, 32, 5) | |
48 | pushl %ebp // set up a frame for backtraces | |
49 | movl %esp,%ebp | |
50 | pushl %esi | |
51 | pushl %edi | |
52 | movl 8(%ebp),%esi // get source ptr | |
53 | movl 12(%ebp),%edi // get dest ptr | |
54 | jmp Ljoin | |
55 | ||
56 | // | |
57 | // void *memcpy(void *dst, const void *src, size_t len); | |
58 | // void *memmove(void *dst, const void *src, size_t len); | |
59 | // | |
60 | ||
61 | PLATFUNC_FUNCTION_START(memcpy, sse2, 32, 0) // void *memcpy(void *dst, const void *src, size_t len) | |
62 | PLATFUNC_FUNCTION_START(memmove, sse2, 32, 0) // void *memmove(void *dst, const void *src, size_t len) | |
63 | Lmemcpy_sse2: | |
64 | pushl %ebp // set up a frame for backtraces | |
65 | movl %esp,%ebp | |
66 | pushl %esi | |
67 | pushl %edi | |
68 | movl 8(%ebp),%edi // get dest ptr | |
69 | movl 12(%ebp),%esi // get source ptr | |
70 | ||
71 | Ljoin: // here from bcopy() with esi and edi loaded | |
72 | movl 16(%ebp),%ecx // get length | |
73 | movl %edi,%edx | |
74 | subl %esi,%edx // (dest - source) | |
75 | cmpl %ecx,%edx // must move in reverse if (dest - source) < length | |
76 | jb LReverseIsland | |
77 | Lrejoin: // here from very-long-operand copies | |
78 | cmpl $(kShort),%ecx // long enough to bother with SSE? | |
79 | ja LNotShort // yes | |
80 | ||
81 | // Handle short forward copies. As the most common case, this is the fall-through path. | |
82 | // ecx = length (<= kShort) | |
83 | // esi = source ptr | |
84 | // edi = dest ptr | |
85 | ||
86 | Lshort: | |
87 | movl %ecx,%edx // copy length | |
88 | shrl $2,%ecx // get #doublewords | |
89 | jz LLeftovers | |
90 | 2: // loop copying doublewords | |
91 | movl (%esi),%eax | |
92 | addl $4,%esi | |
93 | movl %eax,(%edi) | |
94 | addl $4,%edi | |
95 | dec %ecx | |
96 | jnz 2b | |
97 | LLeftovers: // handle leftover bytes (0..3) in last word | |
98 | andl $3,%edx // any leftover bytes? | |
99 | jz 5f | |
100 | 4: // loop copying bytes | |
101 | movb (%esi),%al | |
102 | inc %esi | |
103 | movb %al,(%edi) | |
104 | inc %edi | |
105 | dec %edx | |
106 | jnz 4b | |
107 | 5: | |
108 | movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove | |
109 | popl %edi | |
110 | popl %esi | |
111 | popl %ebp | |
112 | ret | |
113 | ||
114 | ||
115 | LReverseIsland: // keep the "jb" above a short branch... | |
116 | jmp LReverse // ...because reverse moves are uncommon | |
117 | ||
118 | ||
119 | // Handle forward moves that are long enough to justify use of SSE3. | |
120 | // First, 16-byte align the destination. | |
121 | // ecx = length (> kShort) | |
122 | // esi = source ptr | |
123 | // edi = dest ptr | |
124 | ||
125 | LNotShort: | |
126 | cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops? | |
127 | movl %edi,%edx // copy destination | |
128 | jae LVeryLong // use very-long-operand path | |
129 | negl %edx | |
130 | andl $15,%edx // get #bytes to align destination | |
131 | jz LDestAligned // already aligned | |
132 | subl %edx,%ecx // decrement length | |
133 | 1: // loop copying 1..15 bytes | |
134 | movb (%esi),%al | |
135 | inc %esi | |
136 | movb %al,(%edi) | |
137 | inc %edi | |
138 | dec %edx | |
139 | jnz 1b | |
140 | ||
141 | // Destination is now aligned. Prepare for forward loops over 64-byte chunks. | |
142 | // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk. | |
143 | ||
144 | LDestAligned: | |
145 | movl %ecx,%edx // copy length | |
146 | movl %ecx,%eax // twice | |
147 | andl $63,%ecx // get remaining bytes for Lshort | |
148 | andl $-64,%edx // get number of bytes we will copy in inner loop | |
149 | addl %edx,%esi // point to 1st byte not copied | |
150 | addl %edx,%edi | |
151 | negl %edx // now generate offset to 1st byte to be copied | |
152 | testl $15,%esi // is source aligned too? | |
153 | jnz LUnalignedLoop // no | |
1f2f436a A |
154 | cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode? |
155 | jb LAlignedLoop // no, use SSE | |
156 | cld // we'll move forward | |
157 | movl %eax,%ecx // copy length again | |
158 | shrl $2,%ecx // compute #words to move | |
159 | addl %edx,%esi // restore ptrs to 1st byte of source and dest | |
160 | addl %edx,%edi | |
161 | rep // the u-code will optimize this | |
162 | movsl | |
163 | movl %eax,%edx // original length | |
164 | jmp LLeftovers // handle 0..3 leftover bytes | |
165 | ||
166 | ||
167 | // Forward aligned loop for medium length operands (kShort < n < kVeryLong). | |
168 | ||
169 | .align 4,0x90 // 16-byte align inner loops | |
170 | LAlignedLoop: // loop over 64-byte chunks | |
171 | movdqa (%esi,%edx),%xmm0 | |
172 | movdqa 16(%esi,%edx),%xmm1 | |
173 | movdqa 32(%esi,%edx),%xmm2 | |
174 | movdqa 48(%esi,%edx),%xmm3 | |
175 | ||
176 | movdqa %xmm0,(%edi,%edx) | |
177 | movdqa %xmm1,16(%edi,%edx) | |
178 | movdqa %xmm2,32(%edi,%edx) | |
179 | movdqa %xmm3,48(%edi,%edx) | |
180 | ||
181 | addl $64,%edx | |
182 | jnz LAlignedLoop | |
183 | ||
184 | jmp Lshort // copy remaining 0..15 bytes and done | |
185 | ||
186 | ||
187 | // Forward unaligned loop for medium length operands (kShort < n < kVeryLong). | |
188 | // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross | |
189 | // source cache lines. | |
190 | ||
191 | .align 4,0x90 // 16-byte align inner loops | |
192 | LUnalignedLoop: // loop over 64-byte chunks | |
193 | movdqu (%esi,%edx),%xmm0 // the loads are unaligned | |
194 | movdqu 16(%esi,%edx),%xmm1 | |
195 | movdqu 32(%esi,%edx),%xmm2 | |
196 | movdqu 48(%esi,%edx),%xmm3 | |
197 | ||
198 | movdqa %xmm0,(%edi,%edx) // we can use aligned stores | |
199 | movdqa %xmm1,16(%edi,%edx) | |
200 | movdqa %xmm2,32(%edi,%edx) | |
201 | movdqa %xmm3,48(%edi,%edx) | |
202 | ||
203 | addl $64,%edx | |
204 | jnz LUnalignedLoop | |
205 | ||
206 | jmp Lshort // copy remaining 0..63 bytes and done | |
207 | ||
208 | ||
209 | // Very long forward moves. These are at least several pages, so we loop over big | |
210 | // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy | |
211 | // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop, | |
212 | // so the copy loop reads from L2 and writes directly to memory (with write combining.) | |
213 | // This minimizes bus turnaround and maintains good DRAM page locality. | |
214 | // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache | |
215 | // size. Otherwise, it is counter-productive to bypass L2 on the stores. | |
216 | // ecx = length (>= kVeryLong bytes) | |
217 | // edi = dest (aligned) | |
218 | // esi = source | |
219 | ||
220 | LVeryLong: | |
221 | pushl %ebx // we'll need to use this | |
222 | movl %edi,%ebx // copy dest ptr | |
223 | negl %ebx | |
224 | andl $63,%ebx // get #bytes to cache line align destination | |
225 | jz LBigChunkLoop // already aligned | |
226 | ||
227 | // Cache line align destination, so temporal stores in copy loops work right. | |
228 | ||
229 | pushl %ecx // save total length remaining | |
230 | pushl %ebx // arg3 - #bytes to align destination (1..63) | |
231 | pushl %esi // arg2 - source | |
232 | pushl %edi // arg1 - dest | |
233 | call Lmemcpy_sse2 // align the destination | |
234 | movl 12(%esp),%ecx // recover total length | |
235 | addl $16,%esp | |
236 | addl %ebx,%esi // adjust ptrs and lengths past copy | |
237 | addl %ebx,%edi | |
238 | subl %ebx,%ecx | |
239 | ||
240 | // Loop over big chunks. | |
241 | // ecx = length remaining (>= 4096) | |
242 | // edi = dest (64-byte aligned) | |
243 | // esi = source (may be unaligned) | |
244 | ||
245 | LBigChunkLoop: | |
246 | movl $(kBigChunk),%edx // assume we can do a full chunk | |
247 | cmpl %edx,%ecx // do we have a full chunk left to do? | |
248 | cmovbl %ecx,%edx // if not, only move what we have left | |
249 | andl $-4096,%edx // we work in page multiples | |
250 | xor %eax,%eax // initialize chunk offset | |
251 | jmp LTouchLoop | |
252 | ||
253 | // Because the source may be unaligned, we use byte loads to touch. | |
254 | // ecx = length remaining (including this chunk) | |
255 | // edi = ptr to start of dest chunk | |
256 | // esi = ptr to start of source chunk | |
257 | // edx = chunk length (multiples of pages) | |
258 | // ebx = scratch reg used to read a byte of each cache line | |
259 | // eax = chunk offset | |
260 | ||
261 | .align 4,0x90 // 16-byte align inner loops | |
262 | LTouchLoop: | |
263 | movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page | |
264 | movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7 | |
265 | movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14 | |
266 | movzb 9*64(%esi,%eax),%ebx // etc | |
267 | ||
268 | movzb 16*64(%esi,%eax),%ebx | |
269 | movzb 17*64(%esi,%eax),%ebx | |
270 | movzb 24*64(%esi,%eax),%ebx | |
271 | movzb 25*64(%esi,%eax),%ebx | |
272 | ||
273 | movzb 32*64(%esi,%eax),%ebx | |
274 | movzb 33*64(%esi,%eax),%ebx | |
275 | movzb 40*64(%esi,%eax),%ebx | |
276 | movzb 41*64(%esi,%eax),%ebx | |
277 | ||
278 | movzb 48*64(%esi,%eax),%ebx | |
279 | movzb 49*64(%esi,%eax),%ebx | |
280 | movzb 56*64(%esi,%eax),%ebx | |
281 | movzb 57*64(%esi,%eax),%ebx | |
282 | ||
283 | subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate) | |
284 | testl $512,%eax // done with this page? | |
285 | jz LTouchLoop // no, next of four slices | |
286 | addl $(4096-512),%eax // move on to next page | |
287 | cmpl %eax,%edx // done with this chunk? | |
288 | jnz LTouchLoop // no, do next page | |
289 | ||
290 | // The chunk has been pre-fetched, now copy it using non-temporal stores. | |
291 | // There are two copy loops, depending on whether the source is 16-byte aligned | |
292 | // or not. | |
293 | ||
294 | addl %edx,%esi // increment ptrs by chunk length | |
295 | addl %edx,%edi | |
296 | subl %edx,%ecx // adjust remaining length | |
297 | negl %edx // prepare loop index (counts up to 0) | |
298 | testl $15,%esi // is source 16-byte aligned? | |
299 | jnz LVeryLongUnaligned // source is not aligned | |
300 | jmp LVeryLongAligned | |
301 | ||
302 | .align 4,0x90 // 16-byte align inner loops | |
303 | LVeryLongAligned: // aligned loop over 128-bytes | |
304 | movdqa (%esi,%edx),%xmm0 | |
305 | movdqa 16(%esi,%edx),%xmm1 | |
306 | movdqa 32(%esi,%edx),%xmm2 | |
307 | movdqa 48(%esi,%edx),%xmm3 | |
308 | movdqa 64(%esi,%edx),%xmm4 | |
309 | movdqa 80(%esi,%edx),%xmm5 | |
310 | movdqa 96(%esi,%edx),%xmm6 | |
311 | movdqa 112(%esi,%edx),%xmm7 | |
312 | ||
313 | movntdq %xmm0,(%edi,%edx) | |
314 | movntdq %xmm1,16(%edi,%edx) | |
315 | movntdq %xmm2,32(%edi,%edx) | |
316 | movntdq %xmm3,48(%edi,%edx) | |
317 | movntdq %xmm4,64(%edi,%edx) | |
318 | movntdq %xmm5,80(%edi,%edx) | |
319 | movntdq %xmm6,96(%edi,%edx) | |
320 | movntdq %xmm7,112(%edi,%edx) | |
321 | ||
322 | subl $-128,%edx // add 128 with an 8-bit immediate | |
323 | jnz LVeryLongAligned | |
324 | jmp LVeryLongChunkEnd | |
325 | ||
326 | .align 4,0x90 // 16-byte align inner loops | |
327 | LVeryLongUnaligned: // unaligned loop over 128-bytes | |
328 | movdqu (%esi,%edx),%xmm0 | |
329 | movdqu 16(%esi,%edx),%xmm1 | |
330 | movdqu 32(%esi,%edx),%xmm2 | |
331 | movdqu 48(%esi,%edx),%xmm3 | |
332 | movdqu 64(%esi,%edx),%xmm4 | |
333 | movdqu 80(%esi,%edx),%xmm5 | |
334 | movdqu 96(%esi,%edx),%xmm6 | |
335 | movdqu 112(%esi,%edx),%xmm7 | |
336 | ||
337 | movntdq %xmm0,(%edi,%edx) | |
338 | movntdq %xmm1,16(%edi,%edx) | |
339 | movntdq %xmm2,32(%edi,%edx) | |
340 | movntdq %xmm3,48(%edi,%edx) | |
341 | movntdq %xmm4,64(%edi,%edx) | |
342 | movntdq %xmm5,80(%edi,%edx) | |
343 | movntdq %xmm6,96(%edi,%edx) | |
344 | movntdq %xmm7,112(%edi,%edx) | |
345 | ||
346 | subl $-128,%edx // add 128 with an 8-bit immediate | |
347 | jnz LVeryLongUnaligned | |
348 | ||
349 | LVeryLongChunkEnd: | |
350 | cmpl $4096,%ecx // at least another page to go? | |
351 | jae LBigChunkLoop // yes | |
352 | ||
353 | sfence // required by non-temporal stores | |
354 | popl %ebx | |
355 | jmp Lrejoin // handle remaining (0..4095) bytes | |
356 | ||
357 | ||
358 | // Reverse moves. | |
359 | // ecx = length | |
360 | // esi = source ptr | |
361 | // edi = dest ptr | |
362 | ||
363 | LReverse: | |
364 | addl %ecx,%esi // point to end of strings | |
365 | addl %ecx,%edi | |
366 | cmpl $(kShort),%ecx // long enough to bother with SSE? | |
367 | ja LReverseNotShort // yes | |
368 | ||
369 | // Handle reverse short copies. | |
370 | // ecx = length | |
371 | // esi = one byte past end of source | |
372 | // edi = one byte past end of dest | |
373 | ||
374 | LReverseShort: | |
375 | movl %ecx,%edx // copy length | |
376 | shrl $2,%ecx // #words | |
377 | jz 3f | |
378 | 1: | |
379 | subl $4,%esi | |
380 | movl (%esi),%eax | |
381 | subl $4,%edi | |
382 | movl %eax,(%edi) | |
383 | dec %ecx | |
384 | jnz 1b | |
385 | 3: | |
386 | andl $3,%edx // bytes? | |
387 | jz 5f | |
388 | 4: | |
389 | dec %esi | |
390 | movb (%esi),%al | |
391 | dec %edi | |
392 | movb %al,(%edi) | |
393 | dec %edx | |
394 | jnz 4b | |
395 | 5: | |
396 | movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove | |
397 | popl %edi | |
398 | popl %esi | |
399 | popl %ebp | |
400 | ret | |
401 | ||
402 | // Handle a reverse move long enough to justify using SSE. | |
403 | // ecx = length | |
404 | // esi = one byte past end of source | |
405 | // edi = one byte past end of dest | |
406 | ||
407 | LReverseNotShort: | |
408 | movl %edi,%edx // copy destination | |
409 | andl $15,%edx // get #bytes to align destination | |
410 | je LReverseDestAligned // already aligned | |
411 | subl %edx,%ecx // adjust length | |
412 | 1: // loop copying 1..15 bytes | |
413 | dec %esi | |
414 | movb (%esi),%al | |
415 | dec %edi | |
416 | movb %al,(%edi) | |
417 | dec %edx | |
418 | jnz 1b | |
419 | ||
420 | // Destination is now aligned. Prepare for reverse loops. | |
421 | ||
422 | LReverseDestAligned: | |
423 | movl %ecx,%edx // copy length | |
424 | andl $63,%ecx // get remaining bytes for Lshort | |
425 | andl $-64,%edx // get number of bytes we will copy in inner loop | |
426 | subl %edx,%esi // point to endpoint of copy | |
427 | subl %edx,%edi | |
428 | testl $15,%esi // is source aligned too? | |
429 | jnz LReverseUnalignedLoop // no | |
430 | jmp LReverseAlignedLoop // use aligned loop | |
431 | ||
432 | .align 4,0x90 // 16-byte align inner loops | |
433 | LReverseAlignedLoop: // loop over 64-byte chunks | |
434 | movdqa -16(%esi,%edx),%xmm0 | |
435 | movdqa -32(%esi,%edx),%xmm1 | |
436 | movdqa -48(%esi,%edx),%xmm2 | |
437 | movdqa -64(%esi,%edx),%xmm3 | |
438 | ||
439 | movdqa %xmm0,-16(%edi,%edx) | |
440 | movdqa %xmm1,-32(%edi,%edx) | |
441 | movdqa %xmm2,-48(%edi,%edx) | |
442 | movdqa %xmm3,-64(%edi,%edx) | |
443 | ||
444 | subl $64,%edx | |
445 | jne LReverseAlignedLoop | |
446 | ||
447 | jmp LReverseShort // copy remaining 0..63 bytes and done | |
448 | ||
449 | ||
450 | // Reverse, unaligned loop. LDDQU==MOVDQU on these machines. | |
451 | ||
452 | .align 4,0x90 // 16-byte align inner loops | |
453 | LReverseUnalignedLoop: // loop over 64-byte chunks | |
454 | movdqu -16(%esi,%edx),%xmm0 | |
455 | movdqu -32(%esi,%edx),%xmm1 | |
456 | movdqu -48(%esi,%edx),%xmm2 | |
457 | movdqu -64(%esi,%edx),%xmm3 | |
458 | ||
459 | movdqa %xmm0,-16(%edi,%edx) | |
460 | movdqa %xmm1,-32(%edi,%edx) | |
461 | movdqa %xmm2,-48(%edi,%edx) | |
462 | movdqa %xmm3,-64(%edi,%edx) | |
463 | ||
464 | subl $64,%edx | |
465 | jne LReverseUnalignedLoop | |
466 | ||
467 | jmp LReverseShort // copy remaining 0..63 bytes and done | |
468 | ||
469 | PLATFUNC_DESCRIPTOR(bcopy,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3) | |
470 | PLATFUNC_DESCRIPTOR(memcpy,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3) | |
471 | PLATFUNC_DESCRIPTOR(memmove,sse2,kHasSSE2|kCache64,kHasSupplementalSSE3) |