]>
Commit | Line | Data |
---|---|---|
5d5c5d0d A |
1 | /* |
2 | * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
29 | */ | |
30 | ||
31 | #include <machine/cpu_capabilities.h> | |
32 | #include <machine/commpage.h> | |
33 | ||
34 | /* | |
35 | * The bcopy/memcpy loops, tuned for Pentium-M class processors with SSE3 | |
36 | * and 64-byte cache lines, such as Core and Core 2. | |
37 | * | |
38 | * The following #defines are tightly coupled to the u-architecture: | |
39 | */ | |
40 | ||
41 | #define kShort 80 // too short to bother with SSE (must be >=80) | |
42 | #define kVeryLong (500*1024) // large enough for non-temporal stores (must be >= 8192) | |
43 | #define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands | |
44 | #define kFastUCode (16*1024) // cutoff for microcode fastpath for "rep/movsl" | |
45 | ||
46 | ||
47 | // void bcopy(const void *src, void *dst, size_t len); | |
48 | ||
49 | .text | |
50 | .align 5, 0x90 | |
51 | Lbcopy_sse3: // void bcopy(const void *src, void *dst, size_t len) | |
52 | pushl %ebp // set up a frame for backtraces | |
53 | movl %esp,%ebp | |
54 | pushl %esi | |
55 | pushl %edi | |
56 | movl 8(%ebp),%esi // get source ptr | |
57 | movl 12(%ebp),%edi // get dest ptr | |
58 | jmp Ljoin | |
59 | ||
60 | // | |
61 | // void *memcpy(void *dst, const void *src, size_t len); | |
62 | // void *memmove(void *dst, const void *src, size_t len); | |
63 | // | |
64 | // NB: These need to be 32 bytes from bcopy(): | |
65 | // | |
66 | ||
67 | .align 5, 0x90 | |
68 | Lmemcpy: // void *memcpy(void *dst, const void *src, size_t len) | |
69 | Lmemmove: // void *memmove(void *dst, const void *src, size_t len) | |
70 | pushl %ebp // set up a frame for backtraces | |
71 | movl %esp,%ebp | |
72 | pushl %esi | |
73 | pushl %edi | |
74 | movl 8(%ebp),%edi // get dest ptr | |
75 | movl 12(%ebp),%esi // get source ptr | |
76 | ||
77 | Ljoin: // here from bcopy() with esi and edi loaded | |
78 | movl 16(%ebp),%ecx // get length | |
79 | movl %edi,%edx | |
80 | subl %esi,%edx // (dest - source) | |
81 | cmpl %ecx,%edx // must move in reverse if (dest - source) < length | |
82 | jb LReverseIsland | |
83 | Lrejoin: // here from very-long-operand copies | |
84 | cmpl $(kShort),%ecx // long enough to bother with SSE? | |
85 | ja LNotShort // yes | |
86 | ||
87 | // Handle short forward copies. As the most common case, this is the fall-through path. | |
88 | // ecx = length (<= kShort) | |
89 | // esi = source ptr | |
90 | // edi = dest ptr | |
91 | ||
92 | Lshort: | |
93 | movl %ecx,%edx // copy length | |
94 | shrl $2,%ecx // get #doublewords | |
95 | jz LLeftovers | |
96 | 2: // loop copying doublewords | |
97 | movl (%esi),%eax | |
98 | addl $4,%esi | |
99 | movl %eax,(%edi) | |
100 | addl $4,%edi | |
101 | dec %ecx | |
102 | jnz 2b | |
103 | LLeftovers: // handle leftover bytes (0..3) in last word | |
104 | andl $3,%edx // any leftover bytes? | |
105 | jz 5f | |
106 | 4: // loop copying bytes | |
107 | movb (%esi),%al | |
108 | inc %esi | |
109 | movb %al,(%edi) | |
110 | inc %edi | |
111 | dec %edx | |
112 | jnz 4b | |
113 | 5: | |
114 | movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove | |
115 | popl %edi | |
116 | popl %esi | |
117 | popl %ebp | |
118 | ret | |
119 | ||
120 | ||
121 | LReverseIsland: // keep the "jb" above a short branch... | |
122 | jmp LReverse // ...because reverse moves are uncommon | |
123 | ||
124 | ||
125 | // Handle forward moves that are long enough to justify use of SSE3. | |
126 | // First, 16-byte align the destination. | |
127 | // ecx = length (> kShort) | |
128 | // esi = source ptr | |
129 | // edi = dest ptr | |
130 | ||
131 | LNotShort: | |
132 | cmpl $(kVeryLong),%ecx // long enough to justify heavyweight loops? | |
133 | movl %edi,%edx // copy destination | |
134 | jae LVeryLong // use very-long-operand path | |
135 | negl %edx | |
136 | andl $15,%edx // get #bytes to align destination | |
137 | jz LDestAligned // already aligned | |
138 | subl %edx,%ecx // decrement length | |
139 | 1: // loop copying 1..15 bytes | |
140 | movb (%esi),%al | |
141 | inc %esi | |
142 | movb %al,(%edi) | |
143 | inc %edi | |
144 | dec %edx | |
145 | jnz 1b | |
146 | ||
147 | // Destination is now aligned. Prepare for forward loops over 64-byte chunks. | |
148 | // Since kShort>=80 and we've moved at most 15 bytes already, there is at least one chunk. | |
149 | ||
150 | LDestAligned: | |
151 | movl %ecx,%edx // copy length | |
152 | movl %ecx,%eax // twice | |
153 | andl $63,%ecx // get remaining bytes for Lshort | |
154 | andl $-64,%edx // get number of bytes we will copy in inner loop | |
155 | addl %edx,%esi // point to 1st byte not copied | |
156 | addl %edx,%edi | |
157 | negl %edx // now generate offset to 1st byte to be copied | |
158 | testl $15,%esi // is source aligned too? | |
159 | jnz LUnalignedLoop // no | |
160 | ||
161 | ||
162 | cmpl $(kFastUCode),%eax // long enough for the fastpath in microcode? | |
163 | jb LAlignedLoop // no, use SSE | |
164 | cld // we'll move forward | |
165 | movl %eax,%ecx // copy length again | |
166 | shrl $2,%ecx // compute #words to move | |
167 | addl %edx,%esi // restore ptrs to 1st byte of source and dest | |
168 | addl %edx,%edi | |
169 | rep // the u-code will optimize this | |
170 | movsl | |
171 | movl %eax,%edx // original length | |
172 | jmp LLeftovers // handle 0..3 leftover bytes | |
173 | ||
174 | ||
175 | // Forward aligned loop for medium length operands (kShort < n < kVeryLong). | |
176 | ||
177 | .align 4,0x90 // 16-byte align inner loops | |
178 | LAlignedLoop: // loop over 64-byte chunks | |
179 | movdqa (%esi,%edx),%xmm0 | |
180 | movdqa 16(%esi,%edx),%xmm1 | |
181 | movdqa 32(%esi,%edx),%xmm2 | |
182 | movdqa 48(%esi,%edx),%xmm3 | |
183 | ||
184 | movdqa %xmm0,(%edi,%edx) | |
185 | movdqa %xmm1,16(%edi,%edx) | |
186 | movdqa %xmm2,32(%edi,%edx) | |
187 | movdqa %xmm3,48(%edi,%edx) | |
188 | ||
189 | addl $64,%edx | |
190 | jnz LAlignedLoop | |
191 | ||
192 | jmp Lshort // copy remaining 0..15 bytes and done | |
193 | ||
194 | ||
195 | // Forward unaligned loop for medium length operands (kShort < n < kVeryLong). | |
196 | // Note that LDDQU==MOVDQU on these machines, ie we don't care when we cross | |
197 | // source cache lines. | |
198 | ||
199 | .align 4,0x90 // 16-byte align inner loops | |
200 | LUnalignedLoop: // loop over 64-byte chunks | |
201 | movdqu (%esi,%edx),%xmm0 // the loads are unaligned | |
202 | movdqu 16(%esi,%edx),%xmm1 | |
203 | movdqu 32(%esi,%edx),%xmm2 | |
204 | movdqu 48(%esi,%edx),%xmm3 | |
205 | ||
206 | movdqa %xmm0,(%edi,%edx) // we can use aligned stores | |
207 | movdqa %xmm1,16(%edi,%edx) | |
208 | movdqa %xmm2,32(%edi,%edx) | |
209 | movdqa %xmm3,48(%edi,%edx) | |
210 | ||
211 | addl $64,%edx | |
212 | jnz LUnalignedLoop | |
213 | ||
214 | jmp Lshort // copy remaining 0..63 bytes and done | |
215 | ||
216 | ||
217 | // Very long forward moves. These are at least several pages, so we loop over big | |
218 | // chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy | |
219 | // it using non-temporal stores. Hopefully all the reads occur in the prefetch loop, | |
220 | // so the copy loop reads from L2 and writes directly to memory (with write combining.) | |
221 | // This minimizes bus turnaround and maintains good DRAM page locality. | |
222 | // Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache | |
223 | // size. Otherwise, it is counter-productive to bypass L2 on the stores. | |
224 | // ecx = length (>= kVeryLong bytes) | |
225 | // edi = dest (aligned) | |
226 | // esi = source | |
227 | ||
228 | LVeryLong: | |
229 | pushl %ebx // we'll need to use this | |
230 | movl %edi,%ebx // copy dest ptr | |
231 | negl %ebx | |
232 | andl $63,%ebx // get #bytes to cache line align destination | |
233 | jz LBigChunkLoop // already aligned | |
234 | ||
235 | // Cache line align destination, so temporal stores in copy loops work right. | |
236 | ||
237 | pushl %ecx // save total length remaining | |
238 | pushl %ebx // arg3 - #bytes to align destination (1..63) | |
239 | pushl %esi // arg2 - source | |
240 | pushl %edi // arg1 - dest | |
241 | call Lmemcpy // align the destination | |
242 | movl 12(%esp),%ecx // recover total length | |
243 | addl $16,%esp | |
244 | addl %ebx,%esi // adjust ptrs and lengths past copy | |
245 | addl %ebx,%edi | |
246 | subl %ebx,%ecx | |
247 | ||
248 | // Loop over big chunks. | |
249 | // ecx = length remaining (>= 4096) | |
250 | // edi = dest (64-byte aligned) | |
251 | // esi = source (may be unaligned) | |
252 | ||
253 | LBigChunkLoop: | |
254 | movl $(kBigChunk),%edx // assume we can do a full chunk | |
255 | cmpl %edx,%ecx // do we have a full chunk left to do? | |
256 | cmovbl %ecx,%edx // if not, only move what we have left | |
257 | andl $-4096,%edx // we work in page multiples | |
258 | xor %eax,%eax // initialize chunk offset | |
259 | jmp LTouchLoop | |
260 | ||
261 | // Because the source may be unaligned, we use byte loads to touch. | |
262 | // ecx = length remaining (including this chunk) | |
263 | // edi = ptr to start of dest chunk | |
264 | // esi = ptr to start of source chunk | |
265 | // edx = chunk length (multiples of pages) | |
266 | // ebx = scratch reg used to read a byte of each cache line | |
267 | // eax = chunk offset | |
268 | ||
269 | .align 4,0x90 // 16-byte align inner loops | |
270 | LTouchLoop: | |
271 | movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page | |
272 | movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7 | |
273 | movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14 | |
274 | movzb 9*64(%esi,%eax),%ebx // etc | |
275 | ||
276 | movzb 16*64(%esi,%eax),%ebx | |
277 | movzb 17*64(%esi,%eax),%ebx | |
278 | movzb 24*64(%esi,%eax),%ebx | |
279 | movzb 25*64(%esi,%eax),%ebx | |
280 | ||
281 | movzb 32*64(%esi,%eax),%ebx | |
282 | movzb 33*64(%esi,%eax),%ebx | |
283 | movzb 40*64(%esi,%eax),%ebx | |
284 | movzb 41*64(%esi,%eax),%ebx | |
285 | ||
286 | movzb 48*64(%esi,%eax),%ebx | |
287 | movzb 49*64(%esi,%eax),%ebx | |
288 | movzb 56*64(%esi,%eax),%ebx | |
289 | movzb 57*64(%esi,%eax),%ebx | |
290 | ||
291 | subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate) | |
292 | testl $512,%eax // done with this page? | |
293 | jz LTouchLoop // no, next of four slices | |
294 | addl $(4096-512),%eax // move on to next page | |
295 | cmpl %eax,%edx // done with this chunk? | |
296 | jnz LTouchLoop // no, do next page | |
297 | ||
298 | // The chunk has been pre-fetched, now copy it using non-temporal stores. | |
299 | // There are two copy loops, depending on whether the source is 16-byte aligned | |
300 | // or not. | |
301 | ||
302 | addl %edx,%esi // increment ptrs by chunk length | |
303 | addl %edx,%edi | |
304 | subl %edx,%ecx // adjust remaining length | |
305 | negl %edx // prepare loop index (counts up to 0) | |
306 | testl $15,%esi // is source 16-byte aligned? | |
307 | jnz LVeryLongUnaligned // source is not aligned | |
308 | jmp LVeryLongAligned | |
309 | ||
310 | .align 4,0x90 // 16-byte align inner loops | |
311 | LVeryLongAligned: // aligned loop over 128-bytes | |
312 | movdqa (%esi,%edx),%xmm0 | |
313 | movdqa 16(%esi,%edx),%xmm1 | |
314 | movdqa 32(%esi,%edx),%xmm2 | |
315 | movdqa 48(%esi,%edx),%xmm3 | |
316 | movdqa 64(%esi,%edx),%xmm4 | |
317 | movdqa 80(%esi,%edx),%xmm5 | |
318 | movdqa 96(%esi,%edx),%xmm6 | |
319 | movdqa 112(%esi,%edx),%xmm7 | |
320 | ||
321 | movntdq %xmm0,(%edi,%edx) | |
322 | movntdq %xmm1,16(%edi,%edx) | |
323 | movntdq %xmm2,32(%edi,%edx) | |
324 | movntdq %xmm3,48(%edi,%edx) | |
325 | movntdq %xmm4,64(%edi,%edx) | |
326 | movntdq %xmm5,80(%edi,%edx) | |
327 | movntdq %xmm6,96(%edi,%edx) | |
328 | movntdq %xmm7,112(%edi,%edx) | |
329 | ||
330 | subl $-128,%edx // add 128 with an 8-bit immediate | |
331 | jnz LVeryLongAligned | |
332 | jmp LVeryLongChunkEnd | |
333 | ||
334 | .align 4,0x90 // 16-byte align inner loops | |
335 | LVeryLongUnaligned: // unaligned loop over 128-bytes | |
336 | movdqu (%esi,%edx),%xmm0 | |
337 | movdqu 16(%esi,%edx),%xmm1 | |
338 | movdqu 32(%esi,%edx),%xmm2 | |
339 | movdqu 48(%esi,%edx),%xmm3 | |
340 | movdqu 64(%esi,%edx),%xmm4 | |
341 | movdqu 80(%esi,%edx),%xmm5 | |
342 | movdqu 96(%esi,%edx),%xmm6 | |
343 | movdqu 112(%esi,%edx),%xmm7 | |
344 | ||
345 | movntdq %xmm0,(%edi,%edx) | |
346 | movntdq %xmm1,16(%edi,%edx) | |
347 | movntdq %xmm2,32(%edi,%edx) | |
348 | movntdq %xmm3,48(%edi,%edx) | |
349 | movntdq %xmm4,64(%edi,%edx) | |
350 | movntdq %xmm5,80(%edi,%edx) | |
351 | movntdq %xmm6,96(%edi,%edx) | |
352 | movntdq %xmm7,112(%edi,%edx) | |
353 | ||
354 | subl $-128,%edx // add 128 with an 8-bit immediate | |
355 | jnz LVeryLongUnaligned | |
356 | ||
357 | LVeryLongChunkEnd: | |
358 | cmpl $4096,%ecx // at least another page to go? | |
359 | jae LBigChunkLoop // yes | |
360 | ||
361 | sfence // required by non-temporal stores | |
362 | popl %ebx | |
363 | jmp Lrejoin // handle remaining (0..4095) bytes | |
364 | ||
365 | ||
366 | // Reverse moves. | |
367 | // ecx = length | |
368 | // esi = source ptr | |
369 | // edi = dest ptr | |
370 | ||
371 | LReverse: | |
372 | addl %ecx,%esi // point to end of strings | |
373 | addl %ecx,%edi | |
374 | cmpl $(kShort),%ecx // long enough to bother with SSE? | |
375 | ja LReverseNotShort // yes | |
376 | ||
377 | // Handle reverse short copies. | |
378 | // ecx = length | |
379 | // esi = one byte past end of source | |
380 | // edi = one byte past end of dest | |
381 | ||
382 | LReverseShort: | |
383 | movl %ecx,%edx // copy length | |
384 | shrl $2,%ecx // #words | |
385 | jz 3f | |
386 | 1: | |
387 | subl $4,%esi | |
388 | movl (%esi),%eax | |
389 | subl $4,%edi | |
390 | movl %eax,(%edi) | |
391 | dec %ecx | |
392 | jnz 1b | |
393 | 3: | |
394 | andl $3,%edx // bytes? | |
395 | jz 5f | |
396 | 4: | |
397 | dec %esi | |
398 | movb (%esi),%al | |
399 | dec %edi | |
400 | movb %al,(%edi) | |
401 | dec %edx | |
402 | jnz 4b | |
403 | 5: | |
404 | movl 8(%ebp),%eax // get return value (dst ptr) for memcpy/memmove | |
405 | popl %edi | |
406 | popl %esi | |
407 | popl %ebp | |
408 | ret | |
409 | ||
410 | // Handle a reverse move long enough to justify using SSE. | |
411 | // ecx = length | |
412 | // esi = one byte past end of source | |
413 | // edi = one byte past end of dest | |
414 | ||
415 | LReverseNotShort: | |
416 | movl %edi,%edx // copy destination | |
417 | andl $15,%edx // get #bytes to align destination | |
418 | je LReverseDestAligned // already aligned | |
419 | subl %edx,%ecx // adjust length | |
420 | 1: // loop copying 1..15 bytes | |
421 | dec %esi | |
422 | movb (%esi),%al | |
423 | dec %edi | |
424 | movb %al,(%edi) | |
425 | dec %edx | |
426 | jnz 1b | |
427 | ||
428 | // Destination is now aligned. Prepare for reverse loops. | |
429 | ||
430 | LReverseDestAligned: | |
431 | movl %ecx,%edx // copy length | |
432 | andl $63,%ecx // get remaining bytes for Lshort | |
433 | andl $-64,%edx // get number of bytes we will copy in inner loop | |
434 | subl %edx,%esi // point to endpoint of copy | |
435 | subl %edx,%edi | |
436 | testl $15,%esi // is source aligned too? | |
437 | jnz LReverseUnalignedLoop // no | |
438 | jmp LReverseAlignedLoop // use aligned loop | |
439 | ||
440 | .align 4,0x90 // 16-byte align inner loops | |
441 | LReverseAlignedLoop: // loop over 64-byte chunks | |
442 | movdqa -16(%esi,%edx),%xmm0 | |
443 | movdqa -32(%esi,%edx),%xmm1 | |
444 | movdqa -48(%esi,%edx),%xmm2 | |
445 | movdqa -64(%esi,%edx),%xmm3 | |
446 | ||
447 | movdqa %xmm0,-16(%edi,%edx) | |
448 | movdqa %xmm1,-32(%edi,%edx) | |
449 | movdqa %xmm2,-48(%edi,%edx) | |
450 | movdqa %xmm3,-64(%edi,%edx) | |
451 | ||
452 | subl $64,%edx | |
453 | jne LReverseAlignedLoop | |
454 | ||
455 | jmp LReverseShort // copy remaining 0..63 bytes and done | |
456 | ||
457 | ||
458 | // Reverse, unaligned loop. LDDQU==MOVDQU on these machines. | |
459 | ||
460 | .align 4,0x90 // 16-byte align inner loops | |
461 | LReverseUnalignedLoop: // loop over 64-byte chunks | |
462 | movdqu -16(%esi,%edx),%xmm0 | |
463 | movdqu -32(%esi,%edx),%xmm1 | |
464 | movdqu -48(%esi,%edx),%xmm2 | |
465 | movdqu -64(%esi,%edx),%xmm3 | |
466 | ||
467 | movdqa %xmm0,-16(%edi,%edx) | |
468 | movdqa %xmm1,-32(%edi,%edx) | |
469 | movdqa %xmm2,-48(%edi,%edx) | |
470 | movdqa %xmm3,-64(%edi,%edx) | |
471 | ||
472 | subl $64,%edx | |
473 | jne LReverseUnalignedLoop | |
474 | ||
475 | jmp LReverseShort // copy remaining 0..63 bytes and done | |
476 | ||
477 | ||
478 | COMMPAGE_DESCRIPTOR(bcopy_sse3,_COMM_PAGE_BCOPY,kHasSSE2+kCache64,kHasSupplementalSSE3) |