ia64/xen-unstable

view xen/arch/ia64/linux-xen/minstate.h @ 10888:5379548bfc79

[NET] Enable TCPv4 segmentation offload in front/back drivers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 11:54:45 2006 +0100 (2006-08-01)
parents d6b6d3defe81
children 77bf1d1628a7
line source
1 #include <linux/config.h>
3 #include <asm/cache.h>
5 #include "entry.h"
7 /*
8 * For ivt.s we want to access the stack virtually so we don't have to disable translation
9 * on interrupts.
10 *
11 * On entry:
12 * r1: pointer to current task (ar.k6)
13 */
14 #define MINSTATE_START_SAVE_MIN_VIRT \
15 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
16 ;; \
17 (pUStk) mov.m r24=ar.rnat; \
18 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
19 (pKStk) mov r1=sp; /* get sp */ \
20 ;; \
21 (pUStk) lfetch.fault.excl.nt1 [r22]; \
22 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
23 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
24 ;; \
25 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
26 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
27 ;; \
28 (pUStk) mov r18=ar.bsp; \
29 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
31 #define MINSTATE_END_SAVE_MIN_VIRT \
32 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
33 ;;
35 /*
36 * For mca_asm.S we want to access the stack physically since the state is saved before we
37 * go virtual and don't want to destroy the iip or ipsr.
38 */
39 #ifdef XEN
40 # define MINSTATE_START_SAVE_MIN_PHYS \
41 (pKStk) movl r3=THIS_CPU(ia64_mca_data);; \
42 (pKStk) tpa r3 = r3;; \
43 (pKStk) ld8 r3 = [r3];; \
44 (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
45 (pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
46 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
47 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
48 ;; \
49 (pUStk) mov r24=ar.rnat; \
50 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
51 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
52 (pUStk) dep r22=-1,r22,60,4; /* compute Xen virtual addr of RBS */ \
53 ;; \
54 (pUStk) mov ar.bspstore=r22; /* switch to Xen RBS */ \
55 ;; \
56 (pUStk) mov r18=ar.bsp; \
57 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
59 # define MINSTATE_END_SAVE_MIN_PHYS \
60 dep r12=-1,r12,60,4; /* make sp a Xen virtual address */ \
61 ;;
62 #else
63 # define MINSTATE_START_SAVE_MIN_PHYS \
64 (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
65 (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
66 (pKStk) ld8 r3 = [r3];; \
67 (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
68 (pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
69 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
70 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
71 ;; \
72 (pUStk) mov r24=ar.rnat; \
73 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
74 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
75 (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
76 ;; \
77 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
78 ;; \
79 (pUStk) mov r18=ar.bsp; \
80 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
82 # define MINSTATE_END_SAVE_MIN_PHYS \
83 dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
84 ;;
85 #endif /* XEN */
87 #ifdef MINSTATE_VIRT
88 #ifdef XEN
89 # define MINSTATE_GET_CURRENT(reg) \
90 movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
91 ld8 reg=[reg]
92 # define MINSTATE_GET_CURRENT_VIRT(reg) MINSTATE_GET_CURRENT(reg)
93 #else
94 # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
95 #endif
96 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
97 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
98 #endif
100 #ifdef MINSTATE_PHYS
101 # ifdef XEN
102 # define MINSTATE_GET_CURRENT(reg) \
103 movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
104 tpa reg=reg;; \
105 ld8 reg=[reg];; \
106 tpa reg=reg;;
107 # define MINSTATE_GET_CURRENT_VIRT(reg) \
108 movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
109 tpa reg=reg;; \
110 ld8 reg=[reg];;
111 #else
112 # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
113 #endif /* XEN */
114 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
115 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
116 #endif
118 /*
119 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
120 * the minimum state necessary that allows us to turn psr.ic back
121 * on.
122 *
123 * Assumed state upon entry:
124 * psr.ic: off
125 * r31: contains saved predicates (pr)
126 *
127 * Upon exit, the state is as follows:
128 * psr.ic: off
129 * r2 = points to &pt_regs.r16
130 * r8 = contents of ar.ccv
131 * r9 = contents of ar.csd
132 * r10 = contents of ar.ssd
133 * r11 = FPSR_DEFAULT
134 * r12 = kernel sp (kernel virtual address)
135 * r13 = points to current task_struct (kernel virtual address)
136 * p15 = TRUE if psr.i is set in cr.ipsr
137 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
138 * preserved
139 *
140 * Note that psr.ic is NOT turned on by this macro. This is so that
141 * we can pass interruption state as arguments to a handler.
142 */
143 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
144 MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
145 mov r27=ar.rsc; /* M */ \
146 mov r20=r1; /* A */ \
147 mov r25=ar.unat; /* M */ \
148 mov r29=cr.ipsr; /* M */ \
149 mov r26=ar.pfs; /* I */ \
150 mov r28=cr.iip; /* M */ \
151 mov r21=ar.fpsr; /* M */ \
152 COVER; /* B;; (or nothing) */ \
153 ;; \
154 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
155 ;; \
156 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
157 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
158 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
159 /* switch from user to kernel RBS: */ \
160 ;; \
161 invala; /* M */ \
162 SAVE_IFS; \
163 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
164 ;; \
165 MINSTATE_START_SAVE_MIN \
166 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
167 adds r16=PT(CR_IPSR),r1; \
168 ;; \
169 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
170 st8 [r16]=r29; /* save cr.ipsr */ \
171 ;; \
172 lfetch.fault.excl.nt1 [r17]; \
173 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
174 mov r29=b0 \
175 ;; \
176 adds r16=PT(R8),r1; /* initialize first base pointer */ \
177 adds r17=PT(R9),r1; /* initialize second base pointer */ \
178 (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
179 ;; \
180 .mem.offset 0,0; st8.spill [r16]=r8,16; \
181 .mem.offset 8,0; st8.spill [r17]=r9,16; \
182 ;; \
183 .mem.offset 0,0; st8.spill [r16]=r10,24; \
184 .mem.offset 8,0; st8.spill [r17]=r11,24; \
185 ;; \
186 st8 [r16]=r28,16; /* save cr.iip */ \
187 st8 [r17]=r30,16; /* save cr.ifs */ \
188 (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
189 mov r8=ar.ccv; \
190 mov r9=ar.csd; \
191 mov r10=ar.ssd; \
192 movl r11=FPSR_DEFAULT; /* L-unit */ \
193 ;; \
194 st8 [r16]=r25,16; /* save ar.unat */ \
195 st8 [r17]=r26,16; /* save ar.pfs */ \
196 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
197 ;; \
198 st8 [r16]=r27,16; /* save ar.rsc */ \
199 (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
200 (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
201 ;; /* avoid RAW on r16 & r17 */ \
202 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
203 st8 [r17]=r31,16; /* save predicates */ \
204 (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
205 ;; \
206 st8 [r16]=r29,16; /* save b0 */ \
207 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
208 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
209 ;; \
210 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
211 .mem.offset 8,0; st8.spill [r17]=r12,16; \
212 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
213 ;; \
214 .mem.offset 0,0; st8.spill [r16]=r13,16; \
215 .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
216 /* XEN mov r13=IA64_KR(CURRENT);*/ /* establish `current' */ \
217 MINSTATE_GET_CURRENT_VIRT(r13); /* XEN establish `current' */ \
218 ;; \
219 .mem.offset 0,0; st8.spill [r16]=r15,16; \
220 .mem.offset 8,0; st8.spill [r17]=r14,16; \
221 ;; \
222 .mem.offset 0,0; st8.spill [r16]=r2,16; \
223 .mem.offset 8,0; st8.spill [r17]=r3,16; \
224 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
225 ;; \
226 EXTRA; \
227 movl r1=__gp; /* establish kernel global pointer */ \
228 ;; \
229 MINSTATE_END_SAVE_MIN
231 /*
232 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
233 *
234 * Assumed state upon entry:
235 * psr.ic: on
236 * r2: points to &pt_regs.r16
237 * r3: points to &pt_regs.r17
238 * r8: contents of ar.ccv
239 * r9: contents of ar.csd
240 * r10: contents of ar.ssd
241 * r11: FPSR_DEFAULT
242 *
243 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
244 */
245 #define SAVE_REST \
246 .mem.offset 0,0; st8.spill [r2]=r16,16; \
247 .mem.offset 8,0; st8.spill [r3]=r17,16; \
248 ;; \
249 .mem.offset 0,0; st8.spill [r2]=r18,16; \
250 .mem.offset 8,0; st8.spill [r3]=r19,16; \
251 ;; \
252 .mem.offset 0,0; st8.spill [r2]=r20,16; \
253 .mem.offset 8,0; st8.spill [r3]=r21,16; \
254 mov r18=b6; \
255 ;; \
256 .mem.offset 0,0; st8.spill [r2]=r22,16; \
257 .mem.offset 8,0; st8.spill [r3]=r23,16; \
258 mov r19=b7; \
259 ;; \
260 .mem.offset 0,0; st8.spill [r2]=r24,16; \
261 .mem.offset 8,0; st8.spill [r3]=r25,16; \
262 ;; \
263 .mem.offset 0,0; st8.spill [r2]=r26,16; \
264 .mem.offset 8,0; st8.spill [r3]=r27,16; \
265 ;; \
266 .mem.offset 0,0; st8.spill [r2]=r28,16; \
267 .mem.offset 8,0; st8.spill [r3]=r29,16; \
268 ;; \
269 .mem.offset 0,0; st8.spill [r2]=r30,16; \
270 .mem.offset 8,0; st8.spill [r3]=r31,32; \
271 ;; \
272 mov ar.fpsr=r11; /* M-unit */ \
273 st8 [r2]=r8,8; /* ar.ccv */ \
274 adds r24=PT(B6)-PT(F7),r3; \
275 ;; \
276 stf.spill [r2]=f6,32; \
277 stf.spill [r3]=f7,32; \
278 ;; \
279 stf.spill [r2]=f8,32; \
280 stf.spill [r3]=f9,32; \
281 ;; \
282 stf.spill [r2]=f10,32; \
283 stf.spill [r3]=f11,24; \
284 ;; \
285 .mem.offset 0,0; st8.spill [r2]=r4,16; \
286 .mem.offset 8,0; st8.spill [r3]=r5,16; \
287 ;; \
288 .mem.offset 0,0; st8.spill [r2]=r6,16; \
289 .mem.offset 8,0; st8.spill [r3]=r7; \
290 adds r25=PT(B7)-PT(R7),r3; \
291 ;; \
292 st8 [r24]=r18,16; /* b6 */ \
293 st8 [r25]=r19,16; /* b7 */ \
294 ;; \
295 st8 [r24]=r9; /* ar.csd */ \
296 mov r26=ar.unat; \
297 ;; \
298 st8 [r25]=r10; /* ar.ssd */ \
299 st8 [r2]=r26; /* eml_unat */ \
300 ;;
302 #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
303 #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
304 #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )