ia64/xen-unstable

view xen/include/asm-ia64/vhpt.h @ 5704:9b73afea874e

Certain types of event channel are now auto-bound to vcpu0 by Xen.
Make sure that xenolinux agrees with this.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 15:35:43 2005 +0000 (2005-07-08)
parents 649cd37aa1ab
children f5c4042212b0 b2f4823b6ff0 b35215021b32 9af349b055e5 3233e7ecfa9f
line source
1 #ifndef ASM_VHPT_H
2 #define ASM_VHPT_H
4 #define VHPT_ENABLED 1
5 #define VHPT_ENABLED_REGION_0_TO_6 1
6 #define VHPT_ENABLED_REGION_7 0
9 #if 0
10 #define VHPT_CACHE_ENTRY_SIZE 64
11 #define VHPT_CACHE_MASK 2097151
12 #define VHPT_CACHE_NUM_ENTRIES 32768
13 #define VHPT_NUM_ENTRIES 2097152
14 #define VHPT_CACHE_ENTRY_SIZE_LOG2 6
15 #define VHPT_SIZE_LOG2 26 //????
16 #define VHPT_PAGE_SHIFT 26 //????
17 #else
18 //#define VHPT_CACHE_NUM_ENTRIES 2048
19 //#define VHPT_NUM_ENTRIES 131072
20 //#define VHPT_CACHE_MASK 131071
21 //#define VHPT_SIZE_LOG2 22 //????
22 #define VHPT_CACHE_ENTRY_SIZE 64
23 #define VHPT_CACHE_NUM_ENTRIES 8192
24 #define VHPT_NUM_ENTRIES 524288
25 #define VHPT_CACHE_MASK 524287
26 #define VHPT_SIZE_LOG2 24 //????
27 #define VHPT_PAGE_SHIFT 24 //????
28 #endif
30 // FIXME: These should be automatically generated
32 #define VLE_PGFLAGS_OFFSET 0
33 #define VLE_ITIR_OFFSET 8
34 #define VLE_TITAG_OFFSET 16
35 #define VLE_CCHAIN_OFFSET 24
37 #define VCE_TITAG_OFFSET 0
38 #define VCE_CCNEXT_OFFSET 8
39 #define VCE_CCPREV_OFFSET 16
40 #define VCE_PGFLAGS_OFFSET 24
41 #define VCE_ITIR_OFFSET 32
42 #define VCE_FNEXT_OFFSET 32
43 #define VCE_CCHEAD_OFFSET 40
44 #define VCE_VADDR_OFFSET 48
46 //FIXME: change and declare elsewhere
47 #define CAUSE_VHPT_CC_HANDLED 0
49 #ifndef __ASSEMBLY__
51 //
52 // VHPT collison chain entry (part of the "V-Cache")
53 // DO NOT CHANGE THE SIZE OF THIS STRUCTURE (see vhpt.S banked regs calculations)
54 //
55 typedef struct vcache_entry {
56 union {
57 struct {
58 unsigned long tag : 63; // 0-62
59 unsigned long ti : 1; // 63
60 };
61 unsigned long ti_tag;
62 };
64 struct vcache_entry *CCNext; // collision chain next
65 struct vcache_entry *CCPrev; // collision chain previous
67 union {
68 struct {
69 unsigned long p : 1; // 0
70 unsigned long : 1; // 1
71 unsigned long ma : 3; // 2-4
72 unsigned long a : 1; // 5
73 unsigned long d : 1; // 6
74 unsigned long pl : 2; // 7-8
75 unsigned long ar : 3; // 9-11
76 unsigned long ppn : 38; // 12-49
77 unsigned long : 2; // 50-51
78 unsigned long ed : 1; // 52
80 unsigned long translation_type : 2; // 53-54 -- hack
81 unsigned long Counter : 9; // 55-63
82 };
83 unsigned long page_flags;
84 };
86 union {
87 struct {
88 unsigned long : 2; // 0-1
89 unsigned long ps : 6; // 2-7
90 unsigned long key : 24; // 8-31
91 unsigned long : 32; // 32-63
92 };
93 unsigned long itir;
95 //
96 // the free list pointer when entry not in use
97 //
98 struct vcache_entry *FNext; // free list
99 };
101 //
102 // store head of collison chain for removal since thash will only work if
103 // current RID is same as when element was added to chain.
104 //
105 struct vhpt_lf_entry *CCHead;
107 unsigned long virtual_address;
109 unsigned int CChainCnt;
110 unsigned int Signature;
111 };
114 //
115 // VHPT Long Format Entry (as recognized by hw)
116 //
117 struct vhpt_lf_entry {
118 unsigned long page_flags;
119 unsigned long itir;
120 unsigned long ti_tag;
121 struct vcache_entry *CChain;
122 };
124 #define INVALID_TI_TAG 0x8000000000000000L
126 #endif /* !__ASSEMBLY */
128 #if !VHPT_ENABLED
129 #define VHPT_CCHAIN_LOOKUP(Name, i_or_d)
130 #else
131 #ifdef CONFIG_SMP
132 #error "VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
133 #endif
135 // VHPT_CCHAIN_LOOKUP is intended to run with psr.i+ic off
136 #define VHPT_CCHAIN_LOOKUP(Name, i_or_d) \
137 \
138 CC_##Name:; \
139 mov r31 = pr; \
140 mov r16 = cr.ifa; \
141 movl r30 = int_counts; \
142 ;; \
143 extr.u r17=r16,59,5 \
144 ;; \
145 cmp.eq p6,p0=0x1e,r17; \
146 (p6) br.cond.spnt .Alt_##Name \
147 ;; \
148 cmp.eq p6,p0=0x1d,r17; \
149 (p6) br.cond.spnt .Alt_##Name \
150 ;; \
151 thash r28 = r16; \
152 adds r30 = CAUSE_VHPT_CC_HANDLED << 3, r30; \
153 ;; \
154 ttag r19 = r16; \
155 ld8 r27 = [r30]; \
156 adds r17 = VLE_CCHAIN_OFFSET, r28; \
157 ;; \
158 ld8 r17 = [r17]; \
159 ;; \
160 cmp.eq p6,p0 = 0, r17; \
161 mov r21 = r17; \
162 adds r22 = VCE_CCNEXT_OFFSET, r17; \
163 adds r28 = VLE_ITIR_OFFSET, r28; \
164 (p6) br .Out_##Name; \
165 ;; \
166 \
167 .loop_##Name:; \
168 ld8 r20 = [r21]; \
169 ld8 r18 = [r22]; \
170 adds r23 = VCE_PGFLAGS_OFFSET, r21; \
171 adds r24 = VCE_ITIR_OFFSET, r21; \
172 cmp.eq p6,p0 = r17, r21; \
173 cmp.eq p7,p0 = r0, r0; \
174 ;; \
175 lfetch [r18]; \
176 cmp.eq.andcm p6,p7 = r19, r20; \
177 mov r21 = r18; \
178 adds r22 = VCE_CCNEXT_OFFSET, r18; \
179 (p6) br.spnt .Out_##Name; \
180 (p7) br.sptk .loop_##Name; \
181 ;; \
182 \
183 ld8 r26 = [r23]; \
184 ld8 r25 = [r24]; \
185 adds r29 = VLE_TITAG_OFFSET - VLE_ITIR_OFFSET, r28; \
186 adds r27 = 1, r27; \
187 ;; \
188 mov cr.itir = r25; \
189 st8 [r28] = r25, VLE_PGFLAGS_OFFSET - VLE_ITIR_OFFSET; \
190 or r26 = 1, r26; \
191 st8 [r30] = r27; \
192 ;; \
193 itc.i_or_d r26; \
194 ;; \
195 srlz.i_or_d; \
196 ;; \
197 st8 [r28] = r26; \
198 mov pr = r31, 0x1ffff; \
199 st8 [r29] = r20; \
200 rfi; \
201 ;; \
202 \
203 .Alt_##Name:; \
204 mov pr = r31, 0x1ffff; \
205 ;; \
206 br.cond.sptk late_alt_##Name \
207 ;; \
208 .Out_##Name:; \
209 mov pr = r31, 0x1ffff; \
210 ;; \
211 .End_##Name:;
213 // br.cond.sptk.few dorfi;
217 #define VHPT_INSERT() \
218 {.mmi;\
219 thash r17 = r16;\
220 or r26 = 1, r26;\
221 nop 0;\
222 ;;\
223 };\
224 {.mii;\
225 ttag r21 = r16;\
226 adds r18 = VLE_ITIR_OFFSET, r17;\
227 adds r19 = VLE_PGFLAGS_OFFSET, r17;\
228 ;;\
229 };\
230 {.mmi;\
231 \
232 st8[r18] = r27;\
233 adds r20 = VLE_TITAG_OFFSET, r17;\
234 nop 0;\
235 ;;\
236 };\
237 {.mmb;\
238 st8[r19] = r26;\
239 st8[r20] = r21;\
240 nop 0;\
241 ;;\
242 };\
259 #define VHPT_INSERT1() \
260 VCacheInsert:;\
261 mov r18 = 1;\
262 extr.u r17 = r27, 2, 6;\
263 ;;\
264 \
265 \
266 shl r17 = r18, r17;\
267 ;;\
268 \
269 \
270 add r30 = r16, r17;\
271 ;;\
272 \
273 .MainLoop:;\
274 thash r18 = r16;\
275 ;;\
276 \
277 ttag r24 = r16;\
278 adds r29 = VLE_CCHAIN_OFFSET, r18;\
279 ;;\
280 \
281 \
282 ld8 r21 = [r29];\
283 ;;\
284 \
285 adds r19 = VCE_CCNEXT_OFFSET, r21;\
286 adds r20 = VCE_TITAG_OFFSET, r21;\
287 mov r28 = r21;\
288 \
289 cmp.eq p11, p4 = r0, r21;\
290 (p11) br FindOne;\
291 ;;\
292 \
293 \
294 .find_loop:;\
295 \
296 ld8 r17 = [r19];\
297 ld8 r18 = [r20];\
298 ;;\
299 \
300 adds r19 = VCE_CCNEXT_OFFSET, r17;\
301 adds r20 = VCE_TITAG_OFFSET, r17;\
302 cmp.eq.unc p10, p8 = r18, r24;\
303 \
304 \
305 \
306 cmp.eq.unc p1, p2 = r17, r21;\
307 \
308 \
309 (p10) br .FillVce;\
310 ;;\
311 \
312 \
313 (p8) mov r28 = r17;\
314 \
315 lfetch [r19];\
316 \
317 (p2) br .find_loop;\
318 ;;\
319 \
320 FindOne:;\
321 \
322 \
323 \
324 movl r22 = G_VCacheRpl;\
325 ;;\
326 \
327 \
328 ld8 r23 = [r22];\
329 ;;\
330 \
331 \
332 mov r28 = r23;\
333 \
334 \
335 adds r17 = VCE_FNEXT_OFFSET, r23;\
336 \
337 \
338 cmp.eq p14, p3 = r0, r23;\
339 ;;\
340 \
341 (p3) ld8 r23 = [r17];\
342 ;;\
343 \
344 \
345 (p3) st8 [r22] = r23;\
346 (p3) br .AddChain;\
347 ;;\
348 \
349 \
350 \
351 \
352 movl r24 = VHPT_CACHE_MASK;\
353 \
354 \
355 adds r25 = 8, r22;\
356 ;;\
357 \
358 \
359 ld8 r23 = [r25];\
360 ;;\
361 \
362 \
363 adds r23 = VHPT_CACHE_ENTRY_SIZE, r23;\
364 ;;\
365 \
366 \
367 and r23 = r23, r24;\
368 \
369 \
370 movl r17 = VHPT_ADDR;\
371 ;;\
372 \
373 \
374 st8 [r25] = r23;\
375 \
376 \
377 add r28 = r17, r23;\
378 ;;\
379 \
380 \
381 adds r22 = VCE_CCHEAD_OFFSET, r28;\
382 ;;\
383 \
384 ld8 r17 = [r22], VLE_PGFLAGS_OFFSET - VLE_CCHAIN_OFFSET;\
385 \
386 adds r19 = VCE_CCNEXT_OFFSET, r28;\
387 adds r20 = VCE_CCPREV_OFFSET, r28;\
388 ;;\
389 \
390 ld8 r20 = [r20];\
391 ld8 r19 = [r19];\
392 \
393 adds r21 = VLE_CCHAIN_OFFSET, r17;\
394 ;;\
395 \
396 ld8 r18 = [r21];\
397 \
398 \
399 cmp.eq.unc p9, p7 = r19, r28;\
400 \
401 \
402 adds r23 = VLE_TITAG_OFFSET + 7, r17;\
403 \
404 \
405 mov r17 = 0x80;\
406 ;;\
407 \
408 \
409 (p9) st8 [r21] = r0;\
410 \
411 \
412 (p9) st1 [r23] = r17;\
413 \
414 adds r24 = VCE_CCPREV_OFFSET, r19;\
415 adds r25 = VCE_CCNEXT_OFFSET, r20;\
416 \
417 \
418 (p7) cmp.eq.unc p13, p6 = r18, r28;\
419 ;;\
420 \
421 (p7) st8 [r24] = r20;\
422 (p7) st8 [r25] = r19;\
423 \
424 adds r17 = VCE_PGFLAGS_OFFSET, r28;\
425 ;;\
426 \
427 (p13) st8 [r21] = r19;\
428 (p13) ld8 r18 = [r17], VCE_ITIR_OFFSET - VCE_PGFLAGS_OFFSET;\
429 ;;\
430 (p13) st8 [r22] = r18, VLE_ITIR_OFFSET - VLE_PGFLAGS_OFFSET;\
431 \
432 ;;\
433 (p13) ld8 r18 = [r17], VCE_TITAG_OFFSET - VCE_ITIR_OFFSET;\
434 ;;\
435 \
436 (p13) st8 [r22] = r18, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET;\
437 ;;\
438 \
439 .AddChain:;\
440 \
441 \
442 ld8 r24 = [r29];\
443 ;;\
444 \
445 \
446 st8 [r29] = r28, 0 - VLE_CCHAIN_OFFSET;\
447 \
448 adds r25 = VCE_CCNEXT_OFFSET, r28;\
449 adds r19 = VCE_CCPREV_OFFSET, r28;\
450 adds r20 = VCE_CCHEAD_OFFSET, r28;\
451 ;;\
452 \
453 \
454 st8 [r20] = r29;\
455 \
456 cmp.eq p12, p5 = r0, r24;\
457 \
458 adds r23 = VCE_CCPREV_OFFSET, r24;\
459 ;;\
460 \
461 (p12) st8 [r25] = r28;\
462 (p12) st8 [r19] = r28;\
463 \
464 (p5)ld8 r21 = [r23];\
465 adds r29 = VLE_CCHAIN_OFFSET, r29;\
466 ;;\
467 \
468 (p5)st8 [r25] = r24;\
469 (p5)st8 [r19] = r21;\
470 \
471 adds r22 = VCE_CCNEXT_OFFSET, r21;\
472 ;;\
473 \
474 (p5)st8 [r22] = r28;\
475 (p5)st8 [r23] = r28;\
476 ;;\
477 \
478 .FillVce:;\
479 ttag r24 = r16;\
480 \
481 \
482 adds r29 = 0 - VLE_CCHAIN_OFFSET, r29;\
483 adds r17 = VCE_PGFLAGS_OFFSET, r28;\
484 movl r19 = PAGE_SIZE_OFFSET;\
485 ;;\
486 \
487 st8 [r29] = r26, VLE_ITIR_OFFSET - VLE_PGFLAGS_OFFSET;\
488 st8 [r17] = r26, VCE_ITIR_OFFSET - VCE_PGFLAGS_OFFSET;\
489 add r16 = r16, r19;\
490 ;;\
491 \
492 st8 [r29] = r27, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET;\
493 st8 [r17] = r27, VCE_TITAG_OFFSET - VCE_ITIR_OFFSET;\
494 ;;\
495 \
496 st8 [r29] = r24;\
497 st8 [r17] = r24;\
498 \
499 cmp.lt p15, p0 = r16, r30;\
500 (p15) br .MainLoop;\
501 ;;\
506 #endif /* VHPT_ENABLED */
507 #endif