]>
Commit | Line | Data |
---|---|---|
e9ce8d39 A |
1 | /* |
2 | * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | /* void _longjmp(jmp_buf env, int val); */ | |
24 | ||
25 | /* int _longjmp(jmp_buf env); */ | |
26 | /* | |
27 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
28 | * | |
29 | * File: sys/ppc/_longjmp.s | |
30 | * | |
31 | * Implements _longjmp() | |
32 | * | |
33 | * History: | |
34 | * 8 September 1998 Matt Watson (mwatson@apple.com) | |
35 | * Created. Derived from longjmp.s | |
36 | */ | |
3b2a1fe8 | 37 | |
e9ce8d39 A |
38 | #include <architecture/ppc/asm_help.h> |
39 | #include "_setjmp.h" | |
40 | ||
3b2a1fe8 A |
41 | #define VRSave 256 |
42 | ||
43 | /* special flag bit definitions copied from /osfmk/ppc/thread_act.h */ | |
44 | ||
45 | #define floatUsedbit 1 | |
46 | #define vectorUsedbit 2 | |
47 | ||
48 | ||
49 | #if defined(__DYNAMIC__) | |
50 | .data | |
51 | .non_lazy_symbol_pointer | |
52 | .align 2 | |
53 | L_memmove$non_lazy_ptr: | |
54 | .indirect_symbol _memmove | |
55 | .long 0 | |
56 | .non_lazy_symbol_pointer | |
57 | .align 2 | |
58 | L__cpu_has_altivec$non_lazy_ptr: | |
59 | .indirect_symbol __cpu_has_altivec | |
60 | .long 0 | |
61 | .text | |
62 | #endif | |
63 | ||
e9ce8d39 | 64 | LEAF(__longjmp) |
3b2a1fe8 A |
65 | |
66 | ; need to restore FPRs or VRs? | |
67 | ||
68 | lwz r5,JMP_flags(r3) | |
69 | lwz r6,JMP_addr_at_setjmp(r3) | |
70 | rlwinm r7,r5,0,vectorUsedbit,vectorUsedbit | |
71 | rlwinm r8,r5,0,floatUsedbit,floatUsedbit | |
72 | cmpw cr1,r3,r6 ; jmp_buf still at same address? | |
73 | cmpwi cr3,r7,0 ; set cr3 iff VRs in use (non-volatile CR) | |
74 | cmpwi cr4,r8,0 ; set cr4 iff FPRs in use (non-volatile CR) | |
75 | beq+ cr1,LRestoreVRs | |
76 | ||
77 | ; jmp_buf was moved since setjmp (or is uninitialized.) | |
78 | ; We must move VRs and FPRs to be quadword aligned at present address. | |
79 | ||
80 | stw r3,JMP_addr_at_setjmp(r3) ; update, in case we longjmp to this again | |
81 | mr r31,r4 ; save "val" arg across memmove | |
82 | mr r30,r3 ; and jmp_buf ptr | |
83 | addi r3,r3,JMP_vr_base_addr | |
84 | addi r4,r6,JMP_vr_base_addr | |
85 | rlwinm r3,r3,0,0,27 ; r3 <- QW aligned addr where they should be | |
86 | rlwinm r4,r4,0,0,27 ; r4 <- QW aligned addr where they originally were | |
87 | sub r7,r4,r6 ; r7 <- offset of VRs/FPRs within jmp_buf | |
88 | add r4,r30,r7 ; r4 <- where they are now | |
89 | li r5,(JMP_buf_end - JMP_vr_base_addr) | |
90 | #if defined(__DYNAMIC__) | |
91 | bcl 20,31,1f ; Get pic-base | |
92 | 1: mflr r12 | |
93 | addis r12, r12, ha16(L_memmove$non_lazy_ptr - 1b) | |
94 | lwz r12, lo16(L_memmove$non_lazy_ptr - 1b)(r12) | |
95 | mtctr r12 ; Get address left by dyld | |
96 | bctrl | |
97 | #else | |
98 | bl _memmove | |
99 | #endif | |
100 | mr r3,r30 | |
101 | mr r4,r31 | |
102 | ||
103 | ; Restore VRs iff any | |
104 | ; cr3 - bne if VRs | |
105 | ; cr4 - bne if FPRs | |
106 | ||
107 | LRestoreVRs: | |
108 | beq+ cr3,LZeroVRSave ; no VRs | |
109 | lwz r0,JMP_vrsave(r3) | |
110 | addi r6,r3,JMP_vr_base_addr | |
111 | cmpwi r0,0 ; any live VRs? | |
112 | mtspr VRSave,r0 | |
113 | beq+ LRestoreFPRs | |
114 | lvx v20,0,r6 | |
115 | li r7,16*1 | |
116 | lvx v21,r7,r6 | |
117 | li r7,16*2 | |
118 | lvx v22,r7,r6 | |
119 | li r7,16*3 | |
120 | lvx v23,r7,r6 | |
121 | li r7,16*4 | |
122 | lvx v24,r7,r6 | |
123 | li r7,16*5 | |
124 | lvx v25,r7,r6 | |
125 | li r7,16*6 | |
126 | lvx v26,r7,r6 | |
127 | li r7,16*7 | |
128 | lvx v27,r7,r6 | |
129 | li r7,16*8 | |
130 | lvx v28,r7,r6 | |
131 | li r7,16*9 | |
132 | lvx v29,r7,r6 | |
133 | li r7,16*10 | |
134 | lvx v30,r7,r6 | |
135 | li r7,16*11 | |
136 | lvx v31,r7,r6 | |
137 | b LRestoreFPRs ; skip zeroing VRSave | |
138 | ||
139 | ; Zero VRSave iff Altivec is supported, but VRs were not in use | |
140 | ; at setjmp time. This covers the case where VRs are first used after | |
141 | ; the setjmp but before the longjmp, and where VRSave is nonzero at | |
142 | ; the longjmp. We need to zero it now, or it will always remain | |
143 | ; nonzero since they are sticky bits. | |
144 | ||
145 | LZeroVRSave: | |
146 | #if defined(__DYNAMIC__) | |
147 | bcl 20,31,1f | |
148 | 1: mflr r9 ; get our address | |
149 | addis r6,r9,ha16(L__cpu_has_altivec$non_lazy_ptr - 1b) | |
150 | lwz r7,lo16(L__cpu_has_altivec$non_lazy_ptr - 1b)(r6) | |
151 | lwz r7,0(r7) ; load the flag | |
152 | #else | |
153 | lis r7, ha16(__cpu_has_altivec) | |
154 | lwz r7, lo16(__cpu_has_altivec)(r7) | |
155 | #endif | |
156 | cmpwi r7,0 | |
157 | li r8,0 | |
158 | beq LRestoreFPRs ; no Altivec, so skip | |
159 | mtspr VRSave,r8 | |
160 | ||
161 | ; Restore FPRs if any | |
162 | ; cr4 - bne iff FPRs | |
163 | ||
164 | LRestoreFPRs: | |
165 | beq cr4,LRestoreGPRs ; FPRs not in use at setjmp | |
166 | addi r6,r3,JMP_fp_base_addr | |
167 | rlwinm r6,r6,0,0,27 ; mask off low 4 bits to qw align | |
168 | lfd f14,0*8(r6) | |
169 | lfd f15,1*8(r6) | |
170 | lfd f16,2*8(r6) | |
171 | lfd f17,3*8(r6) | |
172 | lfd f18,4*8(r6) | |
173 | lfd f19,5*8(r6) | |
174 | lfd f20,6*8(r6) | |
175 | lfd f21,7*8(r6) | |
176 | lfd f22,8*8(r6) | |
177 | lfd f23,9*8(r6) | |
178 | lfd f24,10*8(r6) | |
179 | lfd f25,11*8(r6) | |
180 | lfd f26,12*8(r6) | |
181 | lfd f27,13*8(r6) | |
182 | lfd f28,14*8(r6) | |
183 | lfd f29,15*8(r6) | |
184 | lfd f30,16*8(r6) | |
185 | lfd f31,17*8(r6) | |
186 | ||
187 | ; Restore GPRs | |
188 | ||
189 | LRestoreGPRs: | |
e9ce8d39 A |
190 | lwz r31, JMP_r31(r3) |
191 | /* r1, r14-r30 */ | |
192 | lwz r1, JMP_r1 (r3) | |
193 | lwz r2, JMP_r2 (r3) | |
194 | lwz r13, JMP_r13(r3) | |
195 | lwz r14, JMP_r14(r3) | |
196 | lwz r15, JMP_r15(r3) | |
197 | lwz r16, JMP_r16(r3) | |
198 | lwz r17, JMP_r17(r3) | |
199 | lwz r18, JMP_r18(r3) | |
200 | lwz r19, JMP_r19(r3) | |
201 | lwz r20, JMP_r20(r3) | |
202 | lwz r21, JMP_r21(r3) | |
203 | lwz r22, JMP_r22(r3) | |
204 | lwz r23, JMP_r23(r3) | |
205 | lwz r24, JMP_r24(r3) | |
206 | lwz r25, JMP_r25(r3) | |
207 | lwz r26, JMP_r26(r3) | |
208 | lwz r27, JMP_r27(r3) | |
209 | lwz r28, JMP_r28(r3) | |
210 | lwz r29, JMP_r29(r3) | |
211 | lwz r30, JMP_r30(r3) | |
212 | lwz r0, JMP_cr(r3) | |
213 | mtcrf 0xff,r0 | |
214 | lwz r0, JMP_lr(r3) | |
215 | mtlr r0 | |
216 | lwz r0, JMP_ctr(r3) ; XXX ctr is volatile | |
217 | mtctr r0 | |
218 | lwz r0, JMP_xer(r3) ; XXX xer is volatile | |
219 | mtxer r0 | |
220 | mr. r3, r4 | |
221 | bnelr | |
222 | li r3, 1 | |
223 | blr | |
224 |