2 * Copyright (c) 2010-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <System/machine/cpu_capabilities.h>
26 // bool save_xxm = (*((uint32_t*)_COMM_PAGE_CPU_CAPABILITIES) & kHasAVX1_0) != 0;
29 // returns address of TLV in %rax, all other registers preserved
31 #define VECTOR_SAVE -704
32 #define STACK_SIZE 704
35 .private_extern _tlv_get_addr
37 movq 8(%rdi),%rax // get key from descriptor
38 movq %gs:0x0(,%rax,8),%rax // get thread value
39 testq %rax,%rax // if NULL, lazily allocate
41 addq 16(%rdi),%rax // add offset from descriptor
46 subq $STACK_SIZE,%rsp // fxsave uses 512 bytes of store, xsave uses
56 movq $(_COMM_PAGE_CPU_CAPABILITIES), %rcx
58 testl $kHasAVX1_0, %ecx
60 movdqa %xmm0, VECTOR_SAVE+0x00(%rbp)
61 movdqa %xmm1, VECTOR_SAVE+0x10(%rbp)
62 movdqa %xmm2, VECTOR_SAVE+0x20(%rbp)
63 movdqa %xmm3, VECTOR_SAVE+0x30(%rbp)
64 movdqa %xmm4, VECTOR_SAVE+0x40(%rbp)
65 movdqa %xmm5, VECTOR_SAVE+0x50(%rbp)
66 movdqa %xmm6, VECTOR_SAVE+0x60(%rbp)
67 movdqa %xmm7, VECTOR_SAVE+0x70(%rbp)
68 movdqa %xmm8, VECTOR_SAVE+0x80(%rbp)
69 movdqa %xmm9, VECTOR_SAVE+0x90(%rbp)
70 movdqa %xmm10,VECTOR_SAVE+0xA0(%rbp)
71 movdqa %xmm11,VECTOR_SAVE+0xB0(%rbp)
72 movdqa %xmm12,VECTOR_SAVE+0xC0(%rbp)
73 movdqa %xmm13,VECTOR_SAVE+0xD0(%rbp)
74 movdqa %xmm14,VECTOR_SAVE+0xE0(%rbp)
75 movdqa %xmm15,VECTOR_SAVE+0xF0(%rbp)
77 L2: vmovdqu %ymm0, VECTOR_SAVE+0x00(%rbp)
78 vmovdqu %ymm1, VECTOR_SAVE+0x20(%rbp)
79 vmovdqu %ymm2, VECTOR_SAVE+0x40(%rbp)
80 vmovdqu %ymm3, VECTOR_SAVE+0x60(%rbp)
81 vmovdqu %ymm4, VECTOR_SAVE+0x80(%rbp)
82 vmovdqu %ymm5, VECTOR_SAVE+0xA0(%rbp)
83 vmovdqu %ymm6, VECTOR_SAVE+0xC0(%rbp)
84 vmovdqu %ymm7, VECTOR_SAVE+0xE0(%rbp)
85 vmovdqu %ymm8, VECTOR_SAVE+0x100(%rbp)
86 vmovdqu %ymm9, VECTOR_SAVE+0x120(%rbp)
87 vmovdqu %ymm10,VECTOR_SAVE+0x140(%rbp)
88 vmovdqu %ymm11,VECTOR_SAVE+0x160(%rbp)
89 vmovdqu %ymm12,VECTOR_SAVE+0x180(%rbp)
90 vmovdqu %ymm13,VECTOR_SAVE+0x1A0(%rbp)
91 vmovdqu %ymm14,VECTOR_SAVE+0x1C0(%rbp)
92 vmovdqu %ymm15,VECTOR_SAVE+0x1E0(%rbp)
93 L3: movq -32(%rbp),%rcx
94 movq 8(%rdi),%rdi // get key from descriptor
95 call _tlv_allocate_and_initialize_for_key
98 movq $(_COMM_PAGE_CPU_CAPABILITIES), %rcx
100 testl $kHasAVX1_0, %ecx
102 movdqa VECTOR_SAVE+0x00(%rbp), %xmm0
103 movdqa VECTOR_SAVE+0x10(%rbp), %xmm1
104 movdqa VECTOR_SAVE+0x20(%rbp), %xmm2
105 movdqa VECTOR_SAVE+0x30(%rbp), %xmm3
106 movdqa VECTOR_SAVE+0x40(%rbp), %xmm4
107 movdqa VECTOR_SAVE+0x50(%rbp), %xmm5
108 movdqa VECTOR_SAVE+0x60(%rbp), %xmm6
109 movdqa VECTOR_SAVE+0x70(%rbp), %xmm7
110 movdqa VECTOR_SAVE+0x80(%rbp), %xmm8
111 movdqa VECTOR_SAVE+0x90(%rbp), %xmm9
112 movdqa VECTOR_SAVE+0xA0(%rbp), %xmm10
113 movdqa VECTOR_SAVE+0xB0(%rbp), %xmm11
114 movdqa VECTOR_SAVE+0xC0(%rbp), %xmm12
115 movdqa VECTOR_SAVE+0xD0(%rbp), %xmm13
116 movdqa VECTOR_SAVE+0xE0(%rbp), %xmm14
117 movdqa VECTOR_SAVE+0xF0(%rbp), %xmm15
119 L4: vmovdqu VECTOR_SAVE+0x00(%rbp), %ymm0
120 vmovdqu VECTOR_SAVE+0x20(%rbp), %ymm1
121 vmovdqu VECTOR_SAVE+0x40(%rbp), %ymm2
122 vmovdqu VECTOR_SAVE+0x60(%rbp), %ymm3
123 vmovdqu VECTOR_SAVE+0x80(%rbp), %ymm4
124 vmovdqu VECTOR_SAVE+0xA0(%rbp), %ymm5
125 vmovdqu VECTOR_SAVE+0xC0(%rbp), %ymm6
126 vmovdqu VECTOR_SAVE+0xE0(%rbp), %ymm7
127 vmovdqu VECTOR_SAVE+0x100(%rbp), %ymm8
128 vmovdqu VECTOR_SAVE+0x120(%rbp), %ymm9
129 vmovdqu VECTOR_SAVE+0x140(%rbp), %ymm10
130 vmovdqu VECTOR_SAVE+0x160(%rbp), %ymm11
131 vmovdqu VECTOR_SAVE+0x180(%rbp), %ymm12
132 vmovdqu VECTOR_SAVE+0x1A0(%rbp), %ymm13
133 vmovdqu VECTOR_SAVE+0x1C0(%rbp), %ymm14
134 vmovdqu VECTOR_SAVE+0x1E0(%rbp), %ymm15
135 L5: movq -64(%rbp),%r11
143 addq 16(%rdi),%rax // result = buffer + offset
144 addq $STACK_SIZE,%rsp
152 // returns address of TLV in %eax, all other registers (except %ecx) preserved
154 .private_extern _tlv_get_addr
156 movl 4(%eax),%ecx // get key from descriptor
157 movl %gs:0x0(,%ecx,4),%ecx // get thread value
158 testl %ecx,%ecx // if NULL, lazily allocate
160 movl 8(%eax),%eax // add offset from descriptor
166 pushl %edx // save edx
168 movl %eax,-8(%ebp) // save descriptor
169 lea -528(%ebp),%ecx // get 512 byte buffer in frame
170 and $-16, %ecx // 16-byte align buffer for fxsave
172 movl 4(%eax),%ecx // get key from descriptor
173 movl %ecx,(%esp) // push key parameter, also leaves stack aligned properly
174 call _tlv_allocate_and_initialize_for_key
175 movl -8(%ebp),%ecx // get descriptor
176 movl 8(%ecx),%ecx // get offset from descriptor
177 addl %ecx,%eax // add offset to buffer
179 and $-16, %ecx // 16-byte align buffer for fxrstor
182 popl %edx // restore edx
190 // returns address of TLV in r0, all other registers preserved
192 .private_extern _tlv_get_addr
194 push {r1,r2,r3,r7,lr}
195 mov r7,r0 // save descriptor in r7
196 ldr r0, [r7, #4] // get key from descriptor
197 bl _pthread_getspecific // get thread value
199 bne L2 // if NULL, lazily allocate
200 ldr r0, [r7, #4] // get key from descriptor
201 bl _tlv_allocate_and_initialize_for_key
202 L2: ldr r1, [r7, #8] // get offset from descriptor
203 add r0, r1, r0 // add offset into allocation block
208 .subsections_via_symbols