1 | /* This is a simple version of setjmp and longjmp for the PowerPC. |
---|
2 | Ian Lance Taylor, Cygnus Support, 9 Feb 1994. |
---|
3 | Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001. */ |
---|
4 | |
---|
5 | #include "ppc-asm.h" |
---|
6 | |
---|
7 | FUNC_START(setjmp) |
---|
8 | #ifdef __ALTIVEC__ |
---|
9 | addi 3,3,15 # align Altivec to 16 byte boundary |
---|
10 | rlwinm 3,3,0,0,27 |
---|
11 | #else |
---|
12 | addi 3,3,7 # align to 8 byte boundary |
---|
13 | rlwinm 3,3,0,0,28 |
---|
14 | #endif |
---|
15 | #if __SPE__ |
---|
16 | /* If we are E500, then save 64-bit registers. */ |
---|
17 | evstdd 1,0(3) # offset 0 |
---|
18 | evstdd 2,8(3) # offset 8 |
---|
19 | evstdd 13,16(3) # offset 16 |
---|
20 | evstdd 14,24(3) # offset 24 |
---|
21 | evstdd 15,32(3) # offset 32 |
---|
22 | evstdd 16,40(3) # offset 40 |
---|
23 | evstdd 17,48(3) # offset 48 |
---|
24 | evstdd 18,56(3) # offset 56 |
---|
25 | evstdd 19,64(3) # offset 64 |
---|
26 | evstdd 20,72(3) # offset 72 |
---|
27 | evstdd 21,80(3) # offset 80 |
---|
28 | evstdd 22,88(3) # offset 88 |
---|
29 | evstdd 23,96(3) # offset 96 |
---|
30 | evstdd 24,104(3) # offset 104 |
---|
31 | evstdd 25,112(3) # offset 112 |
---|
32 | evstdd 26,120(3) # offset 120 |
---|
33 | evstdd 27,128(3) # offset 128 |
---|
34 | evstdd 28,136(3) # offset 136 |
---|
35 | evstdd 29,144(3) # offset 144 |
---|
36 | evstdd 30,152(3) # offset 152 |
---|
37 | evstdd 31,160(3) # offset 160 |
---|
38 | |
---|
39 | /* Add 164 to r3 to account for the amount of data we just |
---|
40 | stored. Note that we are not adding 168 because the next |
---|
41 | store instruction uses an offset of 4. */ |
---|
42 | addi 3,3,164 |
---|
43 | #else |
---|
44 | stw 1,0(3) # offset 0 |
---|
45 | stwu 2,4(3) # offset 4 |
---|
46 | stwu 13,4(3) # offset 8 |
---|
47 | stwu 14,4(3) # offset 12 |
---|
48 | stwu 15,4(3) # offset 16 |
---|
49 | stwu 16,4(3) # offset 20 |
---|
50 | stwu 17,4(3) # offset 24 |
---|
51 | stwu 18,4(3) # offset 28 |
---|
52 | stwu 19,4(3) # offset 32 |
---|
53 | stwu 20,4(3) # offset 36 |
---|
54 | stwu 21,4(3) # offset 40 |
---|
55 | stwu 22,4(3) # offset 44 |
---|
56 | stwu 23,4(3) # offset 48 |
---|
57 | stwu 24,4(3) # offset 52 |
---|
58 | stwu 25,4(3) # offset 56 |
---|
59 | stwu 26,4(3) # offset 60 |
---|
60 | stwu 27,4(3) # offset 64 |
---|
61 | stwu 28,4(3) # offset 68 |
---|
62 | stwu 29,4(3) # offset 72 |
---|
63 | stwu 30,4(3) # offset 76 |
---|
64 | stwu 31,4(3) # offset 80 |
---|
65 | #endif |
---|
66 | |
---|
67 | /* From this point on until the end of this function, add 84 |
---|
68 | to the offset shown if __SPE__. This difference comes from |
---|
69 | the fact that we save 21 64-bit registers instead of 21 |
---|
70 | 32-bit registers above. */ |
---|
71 | mflr 4 |
---|
72 | stwu 4,4(3) # offset 84 |
---|
73 | mfcr 4 |
---|
74 | stwu 4,4(3) # offset 88 |
---|
75 | # one word pad to get floating point aligned on 8 byte boundary |
---|
76 | |
---|
77 | /* Check whether we need to save FPRs. Checking __NO_FPRS__ |
---|
78 | on its own would be enough for GCC 4.1 and above, but older |
---|
79 | compilers only define _SOFT_FLOAT, so check both. */ |
---|
80 | #if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT) |
---|
81 | stfdu 14,8(3) # offset 96 |
---|
82 | stfdu 15,8(3) # offset 104 |
---|
83 | stfdu 16,8(3) # offset 112 |
---|
84 | stfdu 17,8(3) # offset 120 |
---|
85 | stfdu 18,8(3) # offset 128 |
---|
86 | stfdu 19,8(3) # offset 136 |
---|
87 | stfdu 20,8(3) # offset 144 |
---|
88 | stfdu 21,8(3) # offset 152 |
---|
89 | stfdu 22,8(3) # offset 160 |
---|
90 | stfdu 23,8(3) # offset 168 |
---|
91 | stfdu 24,8(3) # offset 176 |
---|
92 | stfdu 25,8(3) # offset 184 |
---|
93 | stfdu 26,8(3) # offset 192 |
---|
94 | stfdu 27,8(3) # offset 200 |
---|
95 | stfdu 28,8(3) # offset 208 |
---|
96 | stfdu 29,8(3) # offset 216 |
---|
97 | stfdu 30,8(3) # offset 224 |
---|
98 | stfdu 31,8(3) # offset 232 |
---|
99 | #endif |
---|
100 | |
---|
101 | /* This requires a total of 21 * 4 + 18 * 8 + 4 + 4 + 4 |
---|
102 | bytes == 60 * 4 bytes == 240 bytes. */ |
---|
103 | |
---|
104 | #ifdef __ALTIVEC__ |
---|
105 | /* save Altivec vrsave and vr20-vr31 registers */ |
---|
106 | mfspr 4,256 # vrsave register |
---|
107 | stwu 4,16(3) # offset 248 |
---|
108 | addi 3,3,8 |
---|
109 | stvx 20,0,3 # offset 256 |
---|
110 | addi 3,3,16 |
---|
111 | stvx 21,0,3 # offset 272 |
---|
112 | addi 3,3,16 |
---|
113 | stvx 22,0,3 # offset 288 |
---|
114 | addi 3,3,16 |
---|
115 | stvx 23,0,3 # offset 304 |
---|
116 | addi 3,3,16 |
---|
117 | stvx 24,0,3 # offset 320 |
---|
118 | addi 3,3,16 |
---|
119 | stvx 25,0,3 # offset 336 |
---|
120 | addi 3,3,16 |
---|
121 | stvx 26,0,3 # offset 352 |
---|
122 | addi 3,3,16 |
---|
123 | stvx 27,0,3 # offset 368 |
---|
124 | addi 3,3,16 |
---|
125 | stvx 28,0,3 # offset 384 |
---|
126 | addi 3,3,16 |
---|
127 | stvx 29,0,3 # offset 400 |
---|
128 | addi 3,3,16 |
---|
129 | stvx 30,0,3 # offset 416 |
---|
130 | addi 3,3,16 |
---|
131 | stvx 31,0,3 # offset 432 |
---|
132 | |
---|
133 | /* This requires a total of 240 + 8 + 8 + 12 * 16 == 448 bytes. */ |
---|
134 | #endif |
---|
135 | li 3,0 |
---|
136 | blr |
---|
137 | FUNC_END(setjmp) |
---|
138 | |
---|
139 | |
---|
140 | FUNC_START(longjmp) |
---|
141 | #ifdef __ALTIVEC__ |
---|
142 | addi 3,3,15 # align Altivec to 16 byte boundary |
---|
143 | rlwinm 3,3,0,0,27 |
---|
144 | #else |
---|
145 | addi 3,3,7 # align to 8 byte boundary |
---|
146 | rlwinm 3,3,0,0,28 |
---|
147 | #endif |
---|
148 | #if __SPE__ |
---|
149 | /* If we are E500, then restore 64-bit registers. */ |
---|
150 | evldd 1,0(3) # offset 0 |
---|
151 | evldd 2,8(3) # offset 8 |
---|
152 | evldd 13,16(3) # offset 16 |
---|
153 | evldd 14,24(3) # offset 24 |
---|
154 | evldd 15,32(3) # offset 32 |
---|
155 | evldd 16,40(3) # offset 40 |
---|
156 | evldd 17,48(3) # offset 48 |
---|
157 | evldd 18,56(3) # offset 56 |
---|
158 | evldd 19,64(3) # offset 64 |
---|
159 | evldd 20,72(3) # offset 72 |
---|
160 | evldd 21,80(3) # offset 80 |
---|
161 | evldd 22,88(3) # offset 88 |
---|
162 | evldd 23,96(3) # offset 96 |
---|
163 | evldd 24,104(3) # offset 104 |
---|
164 | evldd 25,112(3) # offset 112 |
---|
165 | evldd 26,120(3) # offset 120 |
---|
166 | evldd 27,128(3) # offset 128 |
---|
167 | evldd 28,136(3) # offset 136 |
---|
168 | evldd 29,144(3) # offset 144 |
---|
169 | evldd 30,152(3) # offset 152 |
---|
170 | evldd 31,160(3) # offset 160 |
---|
171 | |
---|
172 | /* Add 164 to r3 to account for the amount of data we just |
---|
173 | loaded. Note that we are not adding 168 because the next |
---|
174 | load instruction uses an offset of 4. */ |
---|
175 | addi 3,3,164 |
---|
176 | #else |
---|
177 | lwz 1,0(3) # offset 0 |
---|
178 | lwzu 2,4(3) # offset 4 |
---|
179 | lwzu 13,4(3) # offset 8 |
---|
180 | lwzu 14,4(3) # offset 12 |
---|
181 | lwzu 15,4(3) # offset 16 |
---|
182 | lwzu 16,4(3) # offset 20 |
---|
183 | lwzu 17,4(3) # offset 24 |
---|
184 | lwzu 18,4(3) # offset 28 |
---|
185 | lwzu 19,4(3) # offset 32 |
---|
186 | lwzu 20,4(3) # offset 36 |
---|
187 | lwzu 21,4(3) # offset 40 |
---|
188 | lwzu 22,4(3) # offset 44 |
---|
189 | lwzu 23,4(3) # offset 48 |
---|
190 | lwzu 24,4(3) # offset 52 |
---|
191 | lwzu 25,4(3) # offset 56 |
---|
192 | lwzu 26,4(3) # offset 60 |
---|
193 | lwzu 27,4(3) # offset 64 |
---|
194 | lwzu 28,4(3) # offset 68 |
---|
195 | lwzu 29,4(3) # offset 72 |
---|
196 | lwzu 30,4(3) # offset 76 |
---|
197 | lwzu 31,4(3) # offset 80 |
---|
198 | #endif |
---|
199 | /* From this point on until the end of this function, add 84 |
---|
200 | to the offset shown if __SPE__. This difference comes from |
---|
201 | the fact that we restore 21 64-bit registers instead of 21 |
---|
202 | 32-bit registers above. */ |
---|
203 | lwzu 5,4(3) # offset 84 |
---|
204 | mtlr 5 |
---|
205 | lwzu 5,4(3) # offset 88 |
---|
206 | mtcrf 255,5 |
---|
207 | # one word pad to get floating point aligned on 8 byte boundary |
---|
208 | |
---|
209 | /* Check whether we need to restore FPRs. Checking |
---|
210 | __NO_FPRS__ on its own would be enough for GCC 4.1 and |
---|
211 | above, but older compilers only define _SOFT_FLOAT, so |
---|
212 | check both. */ |
---|
213 | #if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT) |
---|
214 | lfdu 14,8(3) # offset 96 |
---|
215 | lfdu 15,8(3) # offset 104 |
---|
216 | lfdu 16,8(3) # offset 112 |
---|
217 | lfdu 17,8(3) # offset 120 |
---|
218 | lfdu 18,8(3) # offset 128 |
---|
219 | lfdu 19,8(3) # offset 136 |
---|
220 | lfdu 20,8(3) # offset 144 |
---|
221 | lfdu 21,8(3) # offset 152 |
---|
222 | lfdu 22,8(3) # offset 160 |
---|
223 | lfdu 23,8(3) # offset 168 |
---|
224 | lfdu 24,8(3) # offset 176 |
---|
225 | lfdu 25,8(3) # offset 184 |
---|
226 | lfdu 26,8(3) # offset 192 |
---|
227 | lfdu 27,8(3) # offset 200 |
---|
228 | lfdu 28,8(3) # offset 208 |
---|
229 | lfdu 29,8(3) # offset 216 |
---|
230 | lfdu 30,8(3) # offset 224 |
---|
231 | lfdu 31,8(3) # offset 232 |
---|
232 | #endif |
---|
233 | |
---|
234 | #ifdef __ALTIVEC__ |
---|
235 | /* restore Altivec vrsave and v20-v31 registers */ |
---|
236 | lwzu 5,16(3) # offset 248 |
---|
237 | mtspr 256,5 # vrsave |
---|
238 | addi 3,3,8 |
---|
239 | lvx 20,0,3 # offset 256 |
---|
240 | addi 3,3,16 |
---|
241 | lvx 21,0,3 # offset 272 |
---|
242 | addi 3,3,16 |
---|
243 | lvx 22,0,3 # offset 288 |
---|
244 | addi 3,3,16 |
---|
245 | lvx 23,0,3 # offset 304 |
---|
246 | addi 3,3,16 |
---|
247 | lvx 24,0,3 # offset 320 |
---|
248 | addi 3,3,16 |
---|
249 | lvx 25,0,3 # offset 336 |
---|
250 | addi 3,3,16 |
---|
251 | lvx 26,0,3 # offset 352 |
---|
252 | addi 3,3,16 |
---|
253 | lvx 27,0,3 # offset 368 |
---|
254 | addi 3,3,16 |
---|
255 | lvx 28,0,3 # offset 384 |
---|
256 | addi 3,3,16 |
---|
257 | lvx 29,0,3 # offset 400 |
---|
258 | addi 3,3,16 |
---|
259 | lvx 30,0,3 # offset 416 |
---|
260 | addi 3,3,16 |
---|
261 | lvx 31,0,3 # offset 432 |
---|
262 | #endif |
---|
263 | |
---|
264 | mr. 3,4 |
---|
265 | bclr+ 4,2 |
---|
266 | li 3,1 |
---|
267 | blr |
---|
268 | FUNC_END(longjmp) |
---|