2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* void _longjmp(jmp_buf env, int val); */
26 /* int _longjmp(jmp_buf env); */
28 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 * File: sys/ppc/_longjmp.s
32 * Implements _longjmp()
35 * 8 September 1998 Matt Watson (mwatson@apple.com)
36 * Created. Derived from longjmp.s
39 #include <architecture/ppc/asm_help.h>
44 /* special flag bit definitions copied from /osfmk/ppc/thread_act.h */
46 #define floatUsedbit 1
47 #define vectorUsedbit 2
50 #if defined(__DYNAMIC__)
52 .non_lazy_symbol_pointer
54 L_memmove$non_lazy_ptr:
55 .indirect_symbol _memmove
57 .non_lazy_symbol_pointer
59 L__cpu_has_altivec$non_lazy_ptr:
60 .indirect_symbol __cpu_has_altivec
67 ; need to restore FPRs or VRs?
70 lwz r6,JMP_addr_at_setjmp(r3)
71 rlwinm r7,r5,0,vectorUsedbit,vectorUsedbit
72 rlwinm r8,r5,0,floatUsedbit,floatUsedbit
73 cmpw cr1,r3,r6 ; jmp_buf still at same address?
74 cmpwi cr3,r7,0 ; set cr3 iff VRs in use (non-volatile CR)
75 cmpwi cr4,r8,0 ; set cr4 iff FPRs in use (non-volatile CR)
78 ; jmp_buf was moved since setjmp (or is uninitialized.)
79 ; We must move VRs and FPRs to be quadword aligned at present address.
81 stw r3,JMP_addr_at_setjmp(r3) ; update, in case we longjmp to this again
82 mr r31,r4 ; save "val" arg across memmove
83 mr r30,r3 ; and jmp_buf ptr
84 addi r3,r3,JMP_vr_base_addr
85 addi r4,r6,JMP_vr_base_addr
86 rlwinm r3,r3,0,0,27 ; r3 <- QW aligned addr where they should be
87 rlwinm r4,r4,0,0,27 ; r4 <- QW aligned addr where they originally were
88 sub r7,r4,r6 ; r7 <- offset of VRs/FPRs within jmp_buf
89 add r4,r30,r7 ; r4 <- where they are now
90 li r5,(JMP_buf_end - JMP_vr_base_addr)
91 #if defined(__DYNAMIC__)
92 bcl 20,31,1f ; Get pic-base
94 addis r12, r12, ha16(L_memmove$non_lazy_ptr - 1b)
95 lwz r12, lo16(L_memmove$non_lazy_ptr - 1b)(r12)
96 mtctr r12 ; Get address left by dyld
104 ; Restore VRs iff any
109 beq+ cr3,LZeroVRSave ; no VRs
110 lwz r0,JMP_vrsave(r3)
111 addi r6,r3,JMP_vr_base_addr
112 cmpwi r0,0 ; any live VRs?
138 b LRestoreFPRs ; skip zeroing VRSave
140 ; Zero VRSave iff Altivec is supported, but VRs were not in use
141 ; at setjmp time. This covers the case where VRs are first used after
142 ; the setjmp but before the longjmp, and where VRSave is nonzero at
143 ; the longjmp. We need to zero it now, or it will always remain
144 ; nonzero since they are sticky bits.
147 #if defined(__DYNAMIC__)
149 1: mflr r9 ; get our address
150 addis r6,r9,ha16(L__cpu_has_altivec$non_lazy_ptr - 1b)
151 lwz r7,lo16(L__cpu_has_altivec$non_lazy_ptr - 1b)(r6)
152 lwz r7,0(r7) ; load the flag
154 lis r7, ha16(__cpu_has_altivec)
155 lwz r7, lo16(__cpu_has_altivec)(r7)
159 beq LRestoreFPRs ; no Altivec, so skip
162 ; Restore FPRs if any
166 beq cr4,LRestoreGPRs ; FPRs not in use at setjmp
167 addi r6,r3,JMP_fp_base_addr
168 rlwinm r6,r6,0,0,27 ; mask off low 4 bits to qw align
217 lwz r0, JMP_ctr(r3) ; XXX ctr is volatile
219 lwz r0, JMP_xer(r3) ; XXX xer is volatile