]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/commpage/bzero_sse42.s
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / bzero_sse42.s
1 /*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/cpu_capabilities.h>
30 #include <machine/commpage.h>
31
32 /*
33 * Bzero, tuned for processors with SSE4.2 and 64-byte cache lines, ie Nehalem.
34 * We don't actually use SSE4.2, but rather use it to identify Nehalem.
35 *
36 * We do not use nontemporal operations, but use MOVDQA in preference to REP/STOS.
37 *
38 * This routine is also used for memset(p,0,n), which is a common case
39 * since gcc sometimes silently maps bzero() into memset(). As a result,
40 * we always load the original ptr into %eax before returning.
41 */
42
43 #define kShort 80 // too short to bother with SSE (must be >=80)
44
45
46 COMMPAGE_FUNCTION_START(bzero_sse42, 32, 5)
47 pushl %ebp // set up a frame for backtraces
48 movl %esp,%ebp
49 pushl %edi
50 movl 8(%ebp),%edi // get ptr
51 movl 12(%ebp),%edx // get length
52
53 xorl %eax,%eax // set fill data to 0
54 cmpl $(kShort),%edx // long enough for SSE?
55 jg LNotShort // yes
56
57 // Here for short operands or the end of long ones.
58 // %edx = length
59 // %edi = ptr
60 // %eax = zero
61
62 Lshort:
63 cmpl $12,%edx // long enough to word align?
64 jge 3f // yes
65 test %edx,%edx // length==0?
66 jz 6f
67 1:
68 movb %al,(%edi) // zero a byte
69 inc %edi
70 dec %edx
71 jnz 1b
72 jmp 6f
73 2:
74 movb %al,(%edi) // zero a byte
75 inc %edi
76 dec %edx
77 3:
78 test $3,%edi // is ptr doubleword aligned?
79 jnz 2b // no
80 movl %edx,%ecx // copy length
81 shrl $2,%edx // #doublewords to store
82 4:
83 movl %eax,(%edi) // zero an aligned doubleword
84 addl $4,%edi
85 dec %edx
86 jnz 4b
87 andl $3,%ecx // mask down to #bytes at end (0..3)
88 jz 6f // none
89 5:
90 movb %al,(%edi) // zero a byte
91 inc %edi
92 dec %ecx
93 jnz 5b
94 6:
95 movl 8(%ebp),%eax // get return value in case this was a call of memset()
96 popl %edi
97 popl %ebp
98 ret
99
100
101 // We will be using SSE, so align ptr.
102 // %edx = length
103 // %edi = ptr
104 // %eax = zero
105
106 LNotShort:
107 testl $3,%edi // 4-byte aligned?
108 jz 2f // yes
109 movb %al,(%edi) // zero another byte
110 incl %edi
111 decl %edx
112 jmp LNotShort
113 1: // zero doublewords until 16-byte aligned
114 movl %eax,(%edi)
115 addl $4,%edi
116 subl $4,%edx
117 2:
118 testl $15,%edi // 16-byte aligned?
119 jnz 1b // no
120
121
122 // Destination is now 16-byte aligned. Prepare to loop over 64-byte chunks.
123 // %edx = length
124 // %edi = ptr
125 // %eax = zero
126
127 LDestAligned:
128 movl %edx,%ecx
129 andl $63,%edx // mask down to residual length (0..63)
130 andl $-64,%ecx // get #bytes we will zero in this loop
131 pxor %xmm0,%xmm0 // zero an SSE register
132 addl %ecx,%edi // increment ptr by length to move
133 negl %ecx // negate length to move
134 jmp 1f
135
136 // Loop over 64-byte chunks, storing into cache.
137
138 .align 4,0x90 // keep inner loops 16-byte aligned
139 1:
140 movdqa %xmm0,(%edi,%ecx)
141 movdqa %xmm0,16(%edi,%ecx)
142 movdqa %xmm0,32(%edi,%ecx)
143 movdqa %xmm0,48(%edi,%ecx)
144 addl $64,%ecx
145 jne 1b
146
147 jmp Lshort
148
149
150
151 COMMPAGE_DESCRIPTOR(bzero_sse42,_COMM_PAGE_BZERO,kHasSSE4_2,0)