]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/commpage/longcopy_sse4_64.s
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / longcopy_sse4_64.s
CommitLineData
0c530ab8
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <machine/cpu_capabilities.h>
24#include <machine/commpage.h>
25
26
27/*
28 * The bcopy/memcpy loops for very long operands, tuned for 64-bit
29 * Pentium-M class processors with SSE4 and 64-byte cache lines.
30 * This is the 64-bit version.
31 *
32 * The following #defines are tightly coupled to the u-architecture:
33 */
34
35#define kBigChunk (256*1024) // outer loop chunk size for kVeryLong sized operands
36
37
38// Very long forward moves. These are at least several pages, so we loop over big
39// chunks of memory (kBigChunk in size.) We first prefetch the chunk, and then copy
40// it using non-temporal stores. Hopefully all the reads occur in the prefetch loop,
41// so the copy loop reads from L2 and writes directly to memory (with write combining.)
42// This minimizes bus turnaround and maintains good DRAM page locality.
43// Note that for this scheme to work, kVeryLong must be a large fraction of L2 cache
44// size. Otherwise, it is counter-productive to bypass L2 on the stores.
45//
46// We are called from the commpage bcopy loops when they encounter very long
47// operands, with the standard ABI:
48// rdi = dest ptr
49// rsi = source ptr
50// rdx = length (>= 8kb, probably much bigger)
51
52 .text
53 .code64
54 .align 5, 0x90
55Llongcopy_sse4_64: // void longcopy(const void *dest, void *sou, size_t len)
56 pushq %rbp // set up a frame for backtraces
57 movq %rsp,%rbp
58 movl %edi,%eax // copy dest ptr
59 negl %eax
60 andl $63,%eax // get #bytes to cache line align destination
61 jz LBigChunkLoop // already aligned
62
63// Cache line align destination, so temporal stores in copy loops work right.
64// The recursive call returns with the source and dest ptrs properly updated.
65
66 subq %rax,%rdx // get length remaining after dest is aligned
67 pushq %rdx // save length remaining
68 movl %eax,%edx // #bytes to copy to align destination
69 movq $_COMM_PAGE_32_TO_64(_COMM_PAGE_MEMCPY),%rax
70 call *%rax
71 popq %rdx // recover adjusted length
72
73// Loop over big chunks.
74// rdx = length remaining (>= 4096)
75// rdi = dest (64-byte aligned)
76// rsi = source (may be unaligned)
77
78LBigChunkLoop:
79 movl $(kBigChunk),%r8d // assume we can do a full chunk
80 cmpq %r8,%rdx // do we have a full chunk left to do?
81 cmovbl %edx,%r8d // if not, only move what we have left
82 andl $-4096,%r8d // we work in page multiples
83 xorl %eax,%eax // initialize chunk offset
84 jmp LTouchLoop
85
86// Touch in the next chunk. We try to keep the prefetch unit in "kick-start" mode,
87// by touching two adjacent cache lines every 8 lines of each page, in four slices.
88// Because the source may be unaligned, we use byte loads to touch.
89// rdx = length remaining (including this chunk)
90// rdi = ptr to start of dest chunk
91// rsi = ptr to start of source chunk
92// r8d = chunk length (multiples of pages, less than 2**32)
93// ecx = scratch reg used to read a byte of each cache line
94// eax = chunk offset
95
96 .align 4,0x90 // 16-byte align inner loops
97LTouchLoop:
98 movzb (%rsi,%rax),%ecx // touch line 0, 2, 4, or 6 of page
99 movzb 1*64(%rsi,%rax),%ecx // touch line 1, 3, 5, or 7
100 movzb 8*64(%rsi,%rax),%ecx // touch line 8, 10, 12, or 14
101 movzb 9*64(%rsi,%rax),%ecx // etc
102
103 movzb 16*64(%rsi,%rax),%ecx
104 movzb 17*64(%rsi,%rax),%ecx
105 movzb 24*64(%rsi,%rax),%ecx
106 movzb 25*64(%rsi,%rax),%ecx
107
108 movzb 32*64(%rsi,%rax),%ecx
109 movzb 33*64(%rsi,%rax),%ecx
110 movzb 40*64(%rsi,%rax),%ecx
111 movzb 41*64(%rsi,%rax),%ecx
112
113 movzb 48*64(%rsi,%rax),%ecx
114 movzb 49*64(%rsi,%rax),%ecx
115 movzb 56*64(%rsi,%rax),%ecx
116 movzb 57*64(%rsi,%rax),%ecx
117
118 subl $-128,%eax // next slice of page (adding 128 w 8-bit immediate)
119 testl $512,%eax // done with this page?
120 jz LTouchLoop // no, next of four slices
121 addl $(4096-512),%eax // move on to next page
122 cmpl %eax,%r8d // done with this chunk?
123 jnz LTouchLoop // no, do next page
124
125// The chunk has been pre-fetched, now copy it using non-temporal stores.
126// There are two copy loops, depending on whether the source is 16-byte aligned
127// or not.
128
129 movl %r8d,%ecx // copy chunk size to a reg that doesn't use REX prefix
130 addq %rcx,%rsi // increment ptrs by chunk length
131 addq %rcx,%rdi
132 subq %rcx,%rdx // adjust remaining length
133 negq %rcx // prepare loop index (counts up to 0)
134 testl $15,%esi // is source 16-byte aligned?
135 jnz LVeryLongUnaligned // no
136 jmp LVeryLongAligned
137
138 .align 4,0x90 // 16-byte align inner loops
139LVeryLongAligned: // aligned loop over 128-bytes
140 movdqa (%rsi,%rcx),%xmm0
141 movdqa 16(%rsi,%rcx),%xmm1
142 movdqa 32(%rsi,%rcx),%xmm2
143 movdqa 48(%rsi,%rcx),%xmm3
144 movdqa 64(%rsi,%rcx),%xmm4
145 movdqa 80(%rsi,%rcx),%xmm5
146 movdqa 96(%rsi,%rcx),%xmm6
147 movdqa 112(%rsi,%rcx),%xmm7
148
149 movntdq %xmm0,(%rdi,%rcx)
150 movntdq %xmm1,16(%rdi,%rcx)
151 movntdq %xmm2,32(%rdi,%rcx)
152 movntdq %xmm3,48(%rdi,%rcx)
153 movntdq %xmm4,64(%rdi,%rcx)
154 movntdq %xmm5,80(%rdi,%rcx)
155 movntdq %xmm6,96(%rdi,%rcx)
156 movntdq %xmm7,112(%rdi,%rcx)
157
158 subq $-128,%rcx // add 128 with an 8-bit immediate
159 jnz LVeryLongAligned
160 jmp LVeryLongChunkEnd
161
162 .align 4,0x90 // 16-byte align inner loops
163LVeryLongUnaligned: // unaligned loop over 128-bytes
164 movdqu (%rsi,%rcx),%xmm0
165 movdqu 16(%rsi,%rcx),%xmm1
166 movdqu 32(%rsi,%rcx),%xmm2
167 movdqu 48(%rsi,%rcx),%xmm3
168 movdqu 64(%rsi,%rcx),%xmm4
169 movdqu 80(%rsi,%rcx),%xmm5
170 movdqu 96(%rsi,%rcx),%xmm6
171 movdqu 112(%rsi,%rcx),%xmm7
172
173 movntdq %xmm0,(%rdi,%rcx)
174 movntdq %xmm1,16(%rdi,%rcx)
175 movntdq %xmm2,32(%rdi,%rcx)
176 movntdq %xmm3,48(%rdi,%rcx)
177 movntdq %xmm4,64(%rdi,%rcx)
178 movntdq %xmm5,80(%rdi,%rcx)
179 movntdq %xmm6,96(%rdi,%rcx)
180 movntdq %xmm7,112(%rdi,%rcx)
181
182 subq $-128,%rcx // add 128 with an 8-bit immediate
183 jnz LVeryLongUnaligned
184
185LVeryLongChunkEnd:
186 cmpq $4096,%rdx // at least another page to go?
187 jae LBigChunkLoop // yes
188
189// Done. Call memcpy() again to handle the 0-4095 bytes at the end.
190// We still have the args in the right registers:
191// rdi = destination ptr
192// rsi = source ptr
193// rdx = length remaining (0..4095)
194
195 sfence // required by non-temporal stores
196 testl %edx,%edx // anything left to copy?
197 jz 1f
198 movq $_COMM_PAGE_32_TO_64(_COMM_PAGE_MEMCPY),%rax
199 call *%rax
2001:
201 popq %rbp // restore frame ptr
202 ret
203
204 /* always match for now, as commpage_stuff_routine() will panic if no match */
205 COMMPAGE_DESCRIPTOR(longcopy_sse4_64, _COMM_PAGE_LONGCOPY, 0 ,0)