]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/commpage/bzero_sse42_64.s
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / bzero_sse42_64.s
CommitLineData
c910b4d9
A
1/*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/cpu_capabilities.h>
30#include <machine/commpage.h>
31
32/*
33 * Bzero, tuned for processors with SSE4.2 and 64-byte cache lines, ie Nehalem.
34 * We don't actually use SSE4.2, but rather use it to identify Nehalem.
35 * This is the 64-bit version.
36 *
37 * We do not use nontemporal operations, but use MOVDQA in preference to REP/STOS.
38 *
39 * This routine is also used for memset(p,0,n), which is a common case
40 * since gcc sometimes silently maps bzero() into memset(). As a result,
41 * we always load the original ptr into %eax before returning.
42 */
43
44#define kShort 80 // too short to bother with SSE (must be >=80)
45
46
b0d623f7
A
47// void bzero(void *b, size_t len);
48
49COMMPAGE_FUNCTION_START(bzero_sse42_64, 64, 5)
c910b4d9
A
50 pushq %rbp // set up a frame for backtraces
51 movq %rsp,%rbp
52 xorl %eax,%eax // set fill data to 0
53 movq %rdi,%r11 // save original ptr as return value
54 cmpq $(kShort),%rsi // long enough for SSE?
55 jg LNotShort // yes
56
57// Here for short operands or the end of long ones.
58// %esi = length (<= kShort)
59// %rdi = ptr
60// %eax = zero
61
62Lshort:
63 cmpl $12,%esi // long enough to word align?
64 jge 3f // yes
65 test %esi,%esi // length==0?
66 jz 6f
671:
68 movb %al,(%rdi) // zero a byte
69 incq %rdi
70 decl %esi
71 jnz 1b
72 jmp 6f
732:
74 movb %al,(%rdi) // zero a byte
75 incq %rdi
76 decl %esi
773:
78 testl $3,%edi // is ptr doubleword aligned?
79 jnz 2b // no
80 movl %esi,%ecx // copy length
81 shrl $2,%esi // #doublewords to store
824:
83 movl %eax,(%rdi) // zero an aligned doubleword
84 addq $4,%rdi
85 decl %esi
86 jnz 4b
87 andl $3,%ecx // mask down to #bytes at end (0..3)
88 jz 6f // none
895:
90 movb %al,(%rdi) // zero a byte
91 incq %rdi
92 decl %ecx
93 jnz 5b
946:
95 movq %r11,%rax // set return value in case this was a call of memset()
96 popq %rbp
97 ret
98
99
100// We will be using SSE, so align ptr.
101// %rsi = length (> kShort)
102// %rdi = ptr
103// %eax = zero
104
105LNotShort:
106 testl $3,%edi // 4-byte aligned?
107 jz 2f // yes
108 movb %al,(%rdi) // zero another byte
109 incq %rdi
110 decq %rsi
111 jmp LNotShort
1121: // zero doublewords until 16-byte aligned
113 movl %eax,(%rdi)
114 addq $4,%rdi
115 subq $4,%rsi
1162:
117 testl $15,%edi // 16-byte aligned?
118 jnz 1b // no
119
120// Destination is now 16-byte aligned. Prepare to loop over 64-byte chunks.
121// %rsi = length (> (kShort-15))
122// %rdi = ptr (aligned)
123// %eax = zero
124
125LDestAligned:
126 movq %rsi,%rcx
127 andl $63,%esi // mask down to residual length (0..63)
128 andq $-64,%rcx // get #bytes we will zero in this loop
129 pxor %xmm0,%xmm0 // zero an SSE register
130 addq %rcx,%rdi // increment ptr by length to move
131 negq %rcx // negate length to move
132 jmp 1f
133
134// Loop over 64-byte chunks, storing into cache.
135
136 .align 4,0x90 // keep inner loops 16-byte aligned
1371:
138 movdqa %xmm0,(%rdi,%rcx)
139 movdqa %xmm0,16(%rdi,%rcx)
140 movdqa %xmm0,32(%rdi,%rcx)
141 movdqa %xmm0,48(%rdi,%rcx)
142 addq $64,%rcx
143 jne 1b
144
145 jmp Lshort
146
147
148 COMMPAGE_DESCRIPTOR(bzero_sse42_64,_COMM_PAGE_BZERO,kHasSSE4_2,0)