]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/commpage/longcopy_sse4_64.s
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / longcopy_sse4_64.s
CommitLineData
5d5c5d0d
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31#include <machine/cpu_capabilities.h>
32#include <machine/commpage.h>
33
34
35/*
36 * The bcopy/memcpy loops for very long operands, tuned for 64-bit
37 * Pentium-M class processors with SSE4 and 64-byte cache lines.
38 * This is the 64-bit version.
39 *
40 * The following #defines are tightly coupled to the u-architecture:
41 */
42
43#define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
44
45
46// Very long forward moves. These are at least several pages, so we loop over big
47// chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
48// it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
49// so the copy loop reads from L2 and writes directly to memory (with write combining.)
50// This minimizes bus turnaround and maintains good DRAM page locality.
51// Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
52// size. Otherwise, it is counter-productive to bypass L2 on the stores.
53//
54// We are called from the commpage bcopy loops when they encounter very long
55// operands, with the standard ABI:
56// rdi = dest ptr
57// rsi = source ptr
58// rdx = length (>= 8kb, probably much bigger)
59
60 .text
61 .code64
62 .align 5, 0x90
63Llongcopy_sse4_64: // void longcopy(const void *dest, void *sou, size_t len)
64 pushq %rbp // set up a frame for backtraces
65 movq %rsp,%rbp
66 movl %edi,%eax // copy dest ptr
67 negl %eax
68 andl $63,%eax // get #bytes to cache line align destination
69 jz LBigChunkLoop // already aligned
70
71// Cache line align destination, so temporal stores in copy loops work right.
72// The recursive call returns with the source and dest ptrs properly updated.
73
74 subq %rax,%rdx // get length remaining after dest is aligned
75 pushq %rdx // save length remaining
76 movl %eax,%edx // #bytes to copy to align destination
77 movq $_COMM_PAGE_32_TO_64(_COMM_PAGE_MEMCPY),%rax
78 call *%rax
79 popq %rdx // recover adjusted length
80
81// Loop over big chunks.
82// rdx = length remaining (>= 4096)
83// rdi = dest (64-byte aligned)
84// rsi = source (may be unaligned)
85
86LBigChunkLoop:
87 movl $(kBigChunk),%r8d // assume we can do a full chunk
88 cmpq %r8,%rdx // do we have a full chunk left to do?
89 cmovbl %edx,%r8d // if not, only move what we have left
90 andl $-4096,%r8d // we work in page multiples
91 xorl %eax,%eax // initialize chunk offset
92 jmp LTouchLoop
93
94// Touch in the next chunk. We try to keep the prefetch unit in "kick-start" mode,
95// by touching two adjacent cache lines every 8 lines of each page, in four slices.
96// Because the source may be unaligned, we use byte loads to touch.
97// rdx = length remaining (including this chunk)
98// rdi = ptr to start of dest chunk
99// rsi = ptr to start of source chunk
100// r8d = chunk length (multiples of pages, less than 2**32)
101// ecx = scratch reg used to read a byte of each cache line
102// eax = chunk offset
103
104 .align 4,0x90 // 16-byte align inner loops
105LTouchLoop:
106 movzb (%rsi,%rax),%ecx // touch line 0, 2, 4, or 6 of page
107 movzb 1*64(%rsi,%rax),%ecx // touch line 1, 3, 5, or 7
108 movzb 8*64(%rsi,%rax),%ecx // touch line 8, 10, 12, or 14
109 movzb 9*64(%rsi,%rax),%ecx // etc
110
111 movzb 16*64(%rsi,%rax),%ecx
112 movzb 17*64(%rsi,%rax),%ecx
113 movzb 24*64(%rsi,%rax),%ecx
114 movzb 25*64(%rsi,%rax),%ecx
115
116 movzb 32*64(%rsi,%rax),%ecx
117 movzb 33*64(%rsi,%rax),%ecx
118 movzb 40*64(%rsi,%rax),%ecx
119 movzb 41*64(%rsi,%rax),%ecx
120
121 movzb 48*64(%rsi,%rax),%ecx
122 movzb 49*64(%rsi,%rax),%ecx
123 movzb 56*64(%rsi,%rax),%ecx
124 movzb 57*64(%rsi,%rax),%ecx
125
126 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
127 testl $512,%eax // done with this page?
128 jz LTouchLoop // no, next of four slices
129 addl $(4096-512),%eax // move on to next page
130 cmpl %eax,%r8d // done with this chunk?
131 jnz LTouchLoop // no, do next page
132
133// The chunk has been pre-fetched, now copy it using non-temporal stores.
134// There are two copy loops, depending on whether the source is 16-byte aligned
135// or not.
136
137 movl %r8d,%ecx // copy chunk size to a reg that doesn't use REX prefix
138 addq %rcx,%rsi // increment ptrs by chunk length
139 addq %rcx,%rdi
140 subq %rcx,%rdx // adjust remaining length
141 negq %rcx // prepare loop index (counts up to 0)
142 testl $15,%esi // is source 16-byte aligned?
143 jnz LVeryLongUnaligned // no
144 jmp LVeryLongAligned
145
146 .align 4,0x90 // 16-byte align inner loops
147LVeryLongAligned: // aligned loop over 128-bytes
148 movdqa (%rsi,%rcx),%xmm0
149 movdqa 16(%rsi,%rcx),%xmm1
150 movdqa 32(%rsi,%rcx),%xmm2
151 movdqa 48(%rsi,%rcx),%xmm3
152 movdqa 64(%rsi,%rcx),%xmm4
153 movdqa 80(%rsi,%rcx),%xmm5
154 movdqa 96(%rsi,%rcx),%xmm6
155 movdqa 112(%rsi,%rcx),%xmm7
156
157 movntdq %xmm0,(%rdi,%rcx)
158 movntdq %xmm1,16(%rdi,%rcx)
159 movntdq %xmm2,32(%rdi,%rcx)
160 movntdq %xmm3,48(%rdi,%rcx)
161 movntdq %xmm4,64(%rdi,%rcx)
162 movntdq %xmm5,80(%rdi,%rcx)
163 movntdq %xmm6,96(%rdi,%rcx)
164 movntdq %xmm7,112(%rdi,%rcx)
165
166 subq $-128,%rcx // add 128 with an 8-bit immediate
167 jnz LVeryLongAligned
168 jmp LVeryLongChunkEnd
169
170 .align 4,0x90 // 16-byte align inner loops
171LVeryLongUnaligned: // unaligned loop over 128-bytes
172 movdqu (%rsi,%rcx),%xmm0
173 movdqu 16(%rsi,%rcx),%xmm1
174 movdqu 32(%rsi,%rcx),%xmm2
175 movdqu 48(%rsi,%rcx),%xmm3
176 movdqu 64(%rsi,%rcx),%xmm4
177 movdqu 80(%rsi,%rcx),%xmm5
178 movdqu 96(%rsi,%rcx),%xmm6
179 movdqu 112(%rsi,%rcx),%xmm7
180
181 movntdq %xmm0,(%rdi,%rcx)
182 movntdq %xmm1,16(%rdi,%rcx)
183 movntdq %xmm2,32(%rdi,%rcx)
184 movntdq %xmm3,48(%rdi,%rcx)
185 movntdq %xmm4,64(%rdi,%rcx)
186 movntdq %xmm5,80(%rdi,%rcx)
187 movntdq %xmm6,96(%rdi,%rcx)
188 movntdq %xmm7,112(%rdi,%rcx)
189
190 subq $-128,%rcx // add 128 with an 8-bit immediate
191 jnz LVeryLongUnaligned
192
193LVeryLongChunkEnd:
194 cmpq $4096,%rdx // at least another page to go?
195 jae LBigChunkLoop // yes
196
197// Done. Call memcpy() again to handle the 0-4095 bytes at the end.
198// We still have the args in the right registers:
199// rdi = destination ptr
200// rsi = source ptr
201// rdx = length remaining (0..4095)
202
203 sfence // required by non-temporal stores
204 testl %edx,%edx // anything left to copy?
205 jz 1f
206 movq $_COMM_PAGE_32_TO_64(_COMM_PAGE_MEMCPY),%rax
207 call *%rax
2081:
209 popq %rbp // restore frame ptr
210 ret
211
212 /* always match for now, as commpage_stuff_routine() will panic if no match */
213 COMMPAGE_DESCRIPTOR(longcopy_sse4_64, _COMM_PAGE_LONGCOPY, 0 ,0)