]> git.saurik.com Git - apple/libc.git/blame - i386/string/longcopy_sse3x.s
Libc-763.12.tar.gz
[apple/libc.git] / i386 / string / longcopy_sse3x.s
CommitLineData
1f2f436a
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/cpu_capabilities.h>
30#include <platfunc.h>
31
32
33/*
34 * The bcopy/memcpy loops for very long operands, tuned for Pentium-M
35 * class processors with Supplemental SSE3 and 64-byte cache lines.
36 *
37 * The following #defines are tightly coupled to the u-architecture:
38 */
39
40#define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
41
42
43// Very long forward moves. These are at least several pages, so we loop over big
44// chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
45// it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
46// so the copy loop reads from L2 and writes directly to memory (with write combining.)
47// This minimizes bus turnaround and maintains good DRAM page locality.
48// Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
49// size. Otherwise, it is counter-productive to bypass L2 on the stores.
50//
51// We are called from the platfunc bcopy loops when they encounter very long
52// operands, with the standard ABI.
53//
54// void longcopy(const void *dest, void *sou, size_t len)
55
56// void longcopy(const void *dest, void *sou, size_t len)
57
58 .text
59 .private_extern _longcopy
60
61 .align 5
62_longcopy:
63 pushl %ebp // set up a frame for backtraces
64 movl %esp,%ebp
65 pushl %esi
66 pushl %edi
67 pushl %ebx // we'll need to use this too
68 movl 8(%ebp),%edi // get dest ptr
69 movl 12(%ebp),%esi // get source ptr
70 movl 16(%ebp),%ecx // get length
71 movl %edi,%ebx // copy dest ptr
72 negl %ebx
73 andl $63,%ebx // get #bytes to cache line align destination
74 jz LBigChunkLoop // already aligned
75
76// Cache line align destination, so temporal stores in copy loops work right.
77
78 pushl %ebx // arg3 - #bytes to align destination (1..63)
79 pushl %esi // arg2 - source
80 pushl %edi // arg1 - dest
81 call _memcpy // align the destination
82 addl $12,%esp
83 movl 8(%ebp),%edi // recover dest ptr
84 movl 12(%ebp),%esi // recover source ptr
85 movl 16(%ebp),%ecx // recover length
86 addl %ebx,%esi // adjust ptrs and lengths past copy
87 addl %ebx,%edi
88 subl %ebx,%ecx
89
90// Loop over big chunks.
91// ecx = length remaining (>= 4096)
92// edi = dest (64-byte aligned)
93// esi = source (may be unaligned)
94
95LBigChunkLoop:
96 movl $(kBigChunk),%edx // assume we can do a full chunk
97 cmpl %edx,%ecx // do we have a full chunk left to do?
98 cmovbl %ecx,%edx // if not, only move what we have left
99 andl $-4096,%edx // we work in page multiples
100 xor %eax,%eax // initialize chunk offset
101 jmp LTouchLoop
102
103// Touch in the next chunk. We try to keep the prefetch unit in "kick-start" mode,
104// by touching two adjacent cache lines every 8 lines of each page, in four slices.
105// Because the source may be unaligned, we use byte loads to touch.
106// ecx = length remaining (including this chunk)
107// edi = ptr to start of dest chunk
108// esi = ptr to start of source chunk
109// edx = chunk length (multiples of pages)
110// ebx = scratch reg used to read a byte of each cache line
111// eax = chunk offset
112
113 .align 4,0x90 // 16-byte align inner loops
114LTouchLoop:
115 movzb (%esi,%eax),%ebx // touch line 0, 2, 4, or 6 of page
116 movzb 1*64(%esi,%eax),%ebx // touch line 1, 3, 5, or 7
117 movzb 8*64(%esi,%eax),%ebx // touch line 8, 10, 12, or 14
118 movzb 9*64(%esi,%eax),%ebx // etc
119
120 movzb 16*64(%esi,%eax),%ebx
121 movzb 17*64(%esi,%eax),%ebx
122 movzb 24*64(%esi,%eax),%ebx
123 movzb 25*64(%esi,%eax),%ebx
124
125 movzb 32*64(%esi,%eax),%ebx
126 movzb 33*64(%esi,%eax),%ebx
127 movzb 40*64(%esi,%eax),%ebx
128 movzb 41*64(%esi,%eax),%ebx
129
130 movzb 48*64(%esi,%eax),%ebx
131 movzb 49*64(%esi,%eax),%ebx
132 movzb 56*64(%esi,%eax),%ebx
133 movzb 57*64(%esi,%eax),%ebx
134
135 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
136 testl $512,%eax // done with this page?
137 jz LTouchLoop // no, next of four slices
138 addl $(4096-512),%eax // move on to next page
139 cmpl %eax,%edx // done with this chunk?
140 jnz LTouchLoop // no, do next page
141
142// The chunk has been pre-fetched, now copy it using non-temporal stores.
143// There are two copy loops, depending on whether the source is 16-byte aligned
144// or not.
145
146 addl %edx,%esi // increment ptrs by chunk length
147 addl %edx,%edi
148 subl %edx,%ecx // adjust remaining length
149 negl %edx // prepare loop index (counts up to 0)
150 testl $15,%esi // is source 16-byte aligned?
151 jnz LVeryLongUnaligned // source is not aligned
152 jmp LVeryLongAligned
153
154 .align 4,0x90 // 16-byte align inner loops
155LVeryLongAligned: // aligned loop over 128-bytes
156 movdqa (%esi,%edx),%xmm0
157 movdqa 16(%esi,%edx),%xmm1
158 movdqa 32(%esi,%edx),%xmm2
159 movdqa 48(%esi,%edx),%xmm3
160 movdqa 64(%esi,%edx),%xmm4
161 movdqa 80(%esi,%edx),%xmm5
162 movdqa 96(%esi,%edx),%xmm6
163 movdqa 112(%esi,%edx),%xmm7
164
165 movntdq %xmm0,(%edi,%edx)
166 movntdq %xmm1,16(%edi,%edx)
167 movntdq %xmm2,32(%edi,%edx)
168 movntdq %xmm3,48(%edi,%edx)
169 movntdq %xmm4,64(%edi,%edx)
170 movntdq %xmm5,80(%edi,%edx)
171 movntdq %xmm6,96(%edi,%edx)
172 movntdq %xmm7,112(%edi,%edx)
173
174 subl $-128,%edx // add 128 with an 8-bit immediate
175 jnz LVeryLongAligned
176 jmp LVeryLongChunkEnd
177
178 .align 4,0x90 // 16-byte align inner loops
179LVeryLongUnaligned: // unaligned loop over 128-bytes
180 movdqu (%esi,%edx),%xmm0
181 movdqu 16(%esi,%edx),%xmm1
182 movdqu 32(%esi,%edx),%xmm2
183 movdqu 48(%esi,%edx),%xmm3
184 movdqu 64(%esi,%edx),%xmm4
185 movdqu 80(%esi,%edx),%xmm5
186 movdqu 96(%esi,%edx),%xmm6
187 movdqu 112(%esi,%edx),%xmm7
188
189 movntdq %xmm0,(%edi,%edx)
190 movntdq %xmm1,16(%edi,%edx)
191 movntdq %xmm2,32(%edi,%edx)
192 movntdq %xmm3,48(%edi,%edx)
193 movntdq %xmm4,64(%edi,%edx)
194 movntdq %xmm5,80(%edi,%edx)
195 movntdq %xmm6,96(%edi,%edx)
196 movntdq %xmm7,112(%edi,%edx)
197
198 subl $-128,%edx // add 128 with an 8-bit immediate
199 jnz LVeryLongUnaligned
200
201LVeryLongChunkEnd:
202 cmpl $4096,%ecx // at least another page to go?
203 jae LBigChunkLoop // yes
204
205// Done. Call memcpy() again to handle the 0-4095 bytes at the end.
206
207 sfence // required by non-temporal stores
208 testl %ecx,%ecx // anything left to copy?
209 jz 1f
210 pushl %ecx // arg3 - #bytes to align destination (1..63)
211 pushl %esi // arg2 - source
212 pushl %edi // arg1 - dest
213 call _memcpy // align the destination
214 addl $12,%esp // pop off arguments
2151:
216 popl %ebx
217 popl %edi
218 popl %esi
219 popl %ebp
220 ret