2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/cpu_capabilities.h>
30 #include <machine/commpage.h>
33 * Bzero, tuned for processors with SSE4.2 and 64-byte cache lines, ie Nehalem.
34 * We don't actually use SSE4.2, but rather use it to identify Nehalem.
36 * We do not use nontemporal operations, but use MOVDQA in preference to REP/STOS.
38 * This routine is also used for memset(p,0,n), which is a common case
39 * since gcc sometimes silently maps bzero() into memset(). As a result,
40 * we always load the original ptr into %eax before returning.
43 #define kShort 80 // too short to bother with SSE (must be >=80)
46 COMMPAGE_FUNCTION_START(bzero_sse42, 32, 5)
47 pushl %ebp // set up a frame for backtraces
50 movl 8(%ebp),%edi // get ptr
51 movl 12(%ebp),%edx // get length
53 xorl %eax,%eax // set fill data to 0
54 cmpl $(kShort),%edx // long enough for SSE?
57 // Here for short operands or the end of long ones.
63 cmpl $12,%edx // long enough to word align?
65 test %edx,%edx // length==0?
68 movb %al,(%edi) // zero a byte
74 movb %al,(%edi) // zero a byte
78 test $3,%edi // is ptr doubleword aligned?
80 movl %edx,%ecx // copy length
81 shrl $2,%edx // #doublewords to store
83 movl %eax,(%edi) // zero an aligned doubleword
87 andl $3,%ecx // mask down to #bytes at end (0..3)
90 movb %al,(%edi) // zero a byte
95 movl 8(%ebp),%eax // get return value in case this was a call of memset()
101 // We will be using SSE, so align ptr.
107 testl $3,%edi // 4-byte aligned?
109 movb %al,(%edi) // zero another byte
113 1: // zero doublewords until 16-byte aligned
118 testl $15,%edi // 16-byte aligned?
122 // Destination is now 16-byte aligned. Prepare to loop over 64-byte chunks.
129 andl $63,%edx // mask down to residual length (0..63)
130 andl $-64,%ecx // get #bytes we will zero in this loop
131 pxor %xmm0,%xmm0 // zero an SSE register
132 addl %ecx,%edi // increment ptr by length to move
133 negl %ecx // negate length to move
136 // Loop over 64-byte chunks, storing into cache.
138 .align 4,0x90 // keep inner loops 16-byte aligned
140 movdqa %xmm0,(%edi,%ecx)
141 movdqa %xmm0,16(%edi,%ecx)
142 movdqa %xmm0,32(%edi,%ecx)
143 movdqa %xmm0,48(%edi,%ecx)
151 COMMPAGE_DESCRIPTOR(bzero_sse42,_COMM_PAGE_BZERO,kHasSSE4_2,0)