]>
Commit | Line | Data |
---|---|---|
e9ce8d39 A |
1 | /* |
2 | * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
734aad71 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
e9ce8d39 | 7 | * |
734aad71 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
e9ce8d39 A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
734aad71 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
e9ce8d39 A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | ||
26 | /* void _longjmp(jmp_buf env, int val); */ | |
27 | ||
28 | /* int _longjmp(jmp_buf env); */ | |
29 | /* | |
30 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
31 | * | |
32 | * File: sys/ppc/_longjmp.s | |
33 | * | |
34 | * Implements _longjmp() | |
35 | * | |
36 | * History: | |
37 | * 8 September 1998 Matt Watson (mwatson@apple.com) | |
38 | * Created. Derived from longjmp.s | |
39 | */ | |
3b2a1fe8 | 40 | |
e9ce8d39 A |
41 | #include <architecture/ppc/asm_help.h> |
42 | #include "_setjmp.h" | |
43 | ||
3b2a1fe8 A |
44 | #define VRSave 256 |
45 | ||
46 | /* special flag bit definitions copied from /osfmk/ppc/thread_act.h */ | |
47 | ||
48 | #define floatUsedbit 1 | |
49 | #define vectorUsedbit 2 | |
50 | ||
51 | ||
52 | #if defined(__DYNAMIC__) | |
53 | .data | |
54 | .non_lazy_symbol_pointer | |
55 | .align 2 | |
56 | L_memmove$non_lazy_ptr: | |
57 | .indirect_symbol _memmove | |
58 | .long 0 | |
59 | .non_lazy_symbol_pointer | |
60 | .align 2 | |
61 | L__cpu_has_altivec$non_lazy_ptr: | |
62 | .indirect_symbol __cpu_has_altivec | |
63 | .long 0 | |
64 | .text | |
65 | #endif | |
66 | ||
e9ce8d39 | 67 | LEAF(__longjmp) |
3b2a1fe8 A |
68 | |
69 | ; need to restore FPRs or VRs? | |
70 | ||
71 | lwz r5,JMP_flags(r3) | |
72 | lwz r6,JMP_addr_at_setjmp(r3) | |
73 | rlwinm r7,r5,0,vectorUsedbit,vectorUsedbit | |
74 | rlwinm r8,r5,0,floatUsedbit,floatUsedbit | |
75 | cmpw cr1,r3,r6 ; jmp_buf still at same address? | |
76 | cmpwi cr3,r7,0 ; set cr3 iff VRs in use (non-volatile CR) | |
77 | cmpwi cr4,r8,0 ; set cr4 iff FPRs in use (non-volatile CR) | |
78 | beq+ cr1,LRestoreVRs | |
79 | ||
80 | ; jmp_buf was moved since setjmp (or is uninitialized.) | |
81 | ; We must move VRs and FPRs to be quadword aligned at present address. | |
82 | ||
83 | stw r3,JMP_addr_at_setjmp(r3) ; update, in case we longjmp to this again | |
84 | mr r31,r4 ; save "val" arg across memmove | |
85 | mr r30,r3 ; and jmp_buf ptr | |
86 | addi r3,r3,JMP_vr_base_addr | |
87 | addi r4,r6,JMP_vr_base_addr | |
88 | rlwinm r3,r3,0,0,27 ; r3 <- QW aligned addr where they should be | |
89 | rlwinm r4,r4,0,0,27 ; r4 <- QW aligned addr where they originally were | |
90 | sub r7,r4,r6 ; r7 <- offset of VRs/FPRs within jmp_buf | |
91 | add r4,r30,r7 ; r4 <- where they are now | |
92 | li r5,(JMP_buf_end - JMP_vr_base_addr) | |
93 | #if defined(__DYNAMIC__) | |
94 | bcl 20,31,1f ; Get pic-base | |
95 | 1: mflr r12 | |
96 | addis r12, r12, ha16(L_memmove$non_lazy_ptr - 1b) | |
97 | lwz r12, lo16(L_memmove$non_lazy_ptr - 1b)(r12) | |
98 | mtctr r12 ; Get address left by dyld | |
99 | bctrl | |
100 | #else | |
101 | bl _memmove | |
102 | #endif | |
103 | mr r3,r30 | |
104 | mr r4,r31 | |
105 | ||
106 | ; Restore VRs iff any | |
107 | ; cr3 - bne if VRs | |
108 | ; cr4 - bne if FPRs | |
109 | ||
110 | LRestoreVRs: | |
111 | beq+ cr3,LZeroVRSave ; no VRs | |
112 | lwz r0,JMP_vrsave(r3) | |
113 | addi r6,r3,JMP_vr_base_addr | |
114 | cmpwi r0,0 ; any live VRs? | |
115 | mtspr VRSave,r0 | |
116 | beq+ LRestoreFPRs | |
117 | lvx v20,0,r6 | |
118 | li r7,16*1 | |
119 | lvx v21,r7,r6 | |
120 | li r7,16*2 | |
121 | lvx v22,r7,r6 | |
122 | li r7,16*3 | |
123 | lvx v23,r7,r6 | |
124 | li r7,16*4 | |
125 | lvx v24,r7,r6 | |
126 | li r7,16*5 | |
127 | lvx v25,r7,r6 | |
128 | li r7,16*6 | |
129 | lvx v26,r7,r6 | |
130 | li r7,16*7 | |
131 | lvx v27,r7,r6 | |
132 | li r7,16*8 | |
133 | lvx v28,r7,r6 | |
134 | li r7,16*9 | |
135 | lvx v29,r7,r6 | |
136 | li r7,16*10 | |
137 | lvx v30,r7,r6 | |
138 | li r7,16*11 | |
139 | lvx v31,r7,r6 | |
140 | b LRestoreFPRs ; skip zeroing VRSave | |
141 | ||
142 | ; Zero VRSave iff Altivec is supported, but VRs were not in use | |
143 | ; at setjmp time. This covers the case where VRs are first used after | |
144 | ; the setjmp but before the longjmp, and where VRSave is nonzero at | |
145 | ; the longjmp. We need to zero it now, or it will always remain | |
146 | ; nonzero since they are sticky bits. | |
147 | ||
148 | LZeroVRSave: | |
149 | #if defined(__DYNAMIC__) | |
150 | bcl 20,31,1f | |
151 | 1: mflr r9 ; get our address | |
152 | addis r6,r9,ha16(L__cpu_has_altivec$non_lazy_ptr - 1b) | |
153 | lwz r7,lo16(L__cpu_has_altivec$non_lazy_ptr - 1b)(r6) | |
154 | lwz r7,0(r7) ; load the flag | |
155 | #else | |
156 | lis r7, ha16(__cpu_has_altivec) | |
157 | lwz r7, lo16(__cpu_has_altivec)(r7) | |
158 | #endif | |
159 | cmpwi r7,0 | |
160 | li r8,0 | |
161 | beq LRestoreFPRs ; no Altivec, so skip | |
162 | mtspr VRSave,r8 | |
163 | ||
164 | ; Restore FPRs if any | |
165 | ; cr4 - bne iff FPRs | |
166 | ||
167 | LRestoreFPRs: | |
168 | beq cr4,LRestoreGPRs ; FPRs not in use at setjmp | |
169 | addi r6,r3,JMP_fp_base_addr | |
170 | rlwinm r6,r6,0,0,27 ; mask off low 4 bits to qw align | |
171 | lfd f14,0*8(r6) | |
172 | lfd f15,1*8(r6) | |
173 | lfd f16,2*8(r6) | |
174 | lfd f17,3*8(r6) | |
175 | lfd f18,4*8(r6) | |
176 | lfd f19,5*8(r6) | |
177 | lfd f20,6*8(r6) | |
178 | lfd f21,7*8(r6) | |
179 | lfd f22,8*8(r6) | |
180 | lfd f23,9*8(r6) | |
181 | lfd f24,10*8(r6) | |
182 | lfd f25,11*8(r6) | |
183 | lfd f26,12*8(r6) | |
184 | lfd f27,13*8(r6) | |
185 | lfd f28,14*8(r6) | |
186 | lfd f29,15*8(r6) | |
187 | lfd f30,16*8(r6) | |
188 | lfd f31,17*8(r6) | |
189 | ||
190 | ; Restore GPRs | |
191 | ||
192 | LRestoreGPRs: | |
e9ce8d39 A |
193 | lwz r31, JMP_r31(r3) |
194 | /* r1, r14-r30 */ | |
195 | lwz r1, JMP_r1 (r3) | |
196 | lwz r2, JMP_r2 (r3) | |
197 | lwz r13, JMP_r13(r3) | |
198 | lwz r14, JMP_r14(r3) | |
199 | lwz r15, JMP_r15(r3) | |
200 | lwz r16, JMP_r16(r3) | |
201 | lwz r17, JMP_r17(r3) | |
202 | lwz r18, JMP_r18(r3) | |
203 | lwz r19, JMP_r19(r3) | |
204 | lwz r20, JMP_r20(r3) | |
205 | lwz r21, JMP_r21(r3) | |
206 | lwz r22, JMP_r22(r3) | |
207 | lwz r23, JMP_r23(r3) | |
208 | lwz r24, JMP_r24(r3) | |
209 | lwz r25, JMP_r25(r3) | |
210 | lwz r26, JMP_r26(r3) | |
211 | lwz r27, JMP_r27(r3) | |
212 | lwz r28, JMP_r28(r3) | |
213 | lwz r29, JMP_r29(r3) | |
214 | lwz r30, JMP_r30(r3) | |
215 | lwz r0, JMP_cr(r3) | |
216 | mtcrf 0xff,r0 | |
217 | lwz r0, JMP_lr(r3) | |
218 | mtlr r0 | |
219 | lwz r0, JMP_ctr(r3) ; XXX ctr is volatile | |
220 | mtctr r0 | |
221 | lwz r0, JMP_xer(r3) ; XXX xer is volatile | |
222 | mtxer r0 | |
223 | mr. r3, r4 | |
224 | bnelr | |
225 | li r3, 1 | |
226 | blr | |
227 |