]> git.saurik.com Git - apple/libc.git/blame - i386/string/bzero_sse2.s
Libc-825.25.tar.gz
[apple/libc.git] / i386 / string / bzero_sse2.s
CommitLineData
1f2f436a
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/cpu_capabilities.h>
30#include <platfunc.h>
31
32/*
33 * Bzero, tuned for Pentium-M class processors with SSE2
34 * and 64-byte cache lines.
35 *
36 * This routine is also used for memset(p,0,n), which is a common case
37 * since gcc sometimes silently maps bzero() into memset(). As a result,
38 * we always load the original ptr into %eax before returning.
39 */
1f2f436a
A
40#define kShort 80 // too short to bother with SSE (must be >=80)
41#define kVeryLong (1024*1024)
42
43// void bzero(void *b, size_t len);
44
45PLATFUNC_FUNCTION_START(bzero, sse2, 32, 5)
46 pushl %ebp // set up a frame for backtraces
47 movl %esp,%ebp
48 pushl %edi
49 movl 8(%ebp),%edi // get ptr
50 movl 12(%ebp),%edx // get length
51
52 xorl %eax,%eax // set fill data to 0
53 cmpl $(kShort),%edx // long enough for SSE?
54 jg LNotShort // yes
55
56// Here for short operands or the end of long ones.
57// %edx = length
58// %edi = ptr
59// %eax = zero
60
61Lshort:
62 cmpl $16,%edx // long enough to word align?
63 jge 3f // yes
64 test %edx,%edx // length==0?
65 jz 6f
661:
67 movb %al,(%edi) // zero a byte
68 inc %edi
69 dec %edx
70 jnz 1b
71 jmp 6f
722:
73 movb %al,(%edi) // zero a byte
74 inc %edi
75 dec %edx
763:
77 test $3,%edi // is ptr doubleword aligned?
78 jnz 2b // no
79 movl %edx,%ecx // copy length
80 shrl $2,%edx // #doublewords to store
814:
82 movl %eax,(%edi) // zero an aligned doubleword
83 addl $4,%edi
84 dec %edx
85 jnz 4b
86 andl $3,%ecx // mask down to #bytes at end (0..3)
87 jz 6f // none
885:
89 movb %al,(%edi) // zero a byte
90 inc %edi
91 dec %ecx
92 jnz 5b
936:
94 movl 8(%ebp),%eax // get return value in case this was a call of memset()
95 popl %edi
96 popl %ebp
97 ret
98
99
100// We will be using SSE, so align ptr.
101
102LNotShort:
103 movl %edi,%ecx
104 negl %ecx
105 andl $15,%ecx // mask down to #bytes to 16-byte align
106 jz LDestAligned // already aligned
107 subl %ecx,%edx // decrement length
1080: // loop storing bytes to align the ptr
109 movb %al,(%edi) // pack in a byte
110 inc %edi
111 dec %ecx
112 jnz 0b
113
114// Destination is now 16-byte aligned. Prepare to loop over 64-byte chunks.
115// %edx = length
116// %edi = ptr
117// %eax = zero
118
119LDestAligned:
120 movl %edx,%ecx
121 andl $63,%edx // mask down to residual length (0..63)
122 andl $-64,%ecx // get #bytes we will zero in this loop
123 pxor %xmm0,%xmm0 // zero an SSE register
124 addl %ecx,%edi // increment ptr by length to move
125 cmpl $(kVeryLong),%ecx // long enough to justify non-temporal stores?
126 jae LVeryLong // yes
127 negl %ecx // negate length to move
128 jmp 1f
129
130// Loop over 64-byte chunks, storing into cache.
131
132 .align 4,0x90 // keep inner loops 16-byte aligned
1331:
134 movdqa %xmm0,(%edi,%ecx)
135 movdqa %xmm0,16(%edi,%ecx)
136 movdqa %xmm0,32(%edi,%ecx)
137 movdqa %xmm0,48(%edi,%ecx)
138 addl $64,%ecx
139 jne 1b
140
141 jmp Lshort
142
143// Very long operands: use non-temporal stores to bypass cache.
144
145LVeryLong:
146 negl %ecx // negate length to move
147 jmp 1f
148
149 .align 4,0x90 // keep inner loops 16-byte aligned
1501:
151 movntdq %xmm0,(%edi,%ecx)
152 movntdq %xmm0,16(%edi,%ecx)
153 movntdq %xmm0,32(%edi,%ecx)
154 movntdq %xmm0,48(%edi,%ecx)
155 addl $64,%ecx
156 jne 1b
157
158 sfence // required by non-temporal stores
159 jmp Lshort
160
161PLATFUNC_DESCRIPTOR(bzero,sse2,kHasSSE2,kHasSSE4_2)