ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents ddc25d4ebf60
children 27ccf13dc3b7
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
45 // for vlsapic
46 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
48 #define VMX(x,y) ((x)->arch.arch_vmx.y)
51 #define VMM_RR_SHIFT 20
52 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
54 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
55 extern u64 cr_igfld_mask (int index, u64 value);
56 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
57 extern u64 set_isr_ei_ni (VCPU *vcpu);
58 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
61 /* next all for VTI domain APIs definition */
62 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
63 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
64 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
65 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
66 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
67 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
68 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
69 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
70 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
71 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
72 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
73 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
74 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
75 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
76 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
77 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
78 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
79 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
80 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
81 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
82 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
83 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
84 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
85 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
86 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
87 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
88 #if 0
89 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
90 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
91 #endif
92 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
93 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
94 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
95 extern void vtm_init(VCPU *vcpu);
96 extern uint64_t vtm_get_itc(VCPU *vcpu);
97 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
98 extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
99 extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
100 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
101 //extern void vtm_domain_out(VCPU *vcpu);
102 //extern void vtm_domain_in(VCPU *vcpu);
103 extern void vlsapic_reset(VCPU *vcpu);
104 extern int vmx_check_pending_irq(VCPU *vcpu);
105 extern void guest_write_eoi(VCPU *vcpu);
106 extern int is_unmasked_irq(VCPU *vcpu);
107 extern uint64_t guest_read_vivr(VCPU *vcpu);
108 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
109 extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);
110 extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
111 extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
112 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
113 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
114 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
115 extern void vcpu_load_kernel_regs(VCPU *vcpu);
116 extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
117 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
119 extern void dtlb_fault (VCPU *vcpu, u64 vadr);
120 extern void nested_dtlb (VCPU *vcpu);
121 extern void alt_dtlb (VCPU *vcpu, u64 vadr);
122 extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
123 extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
124 extern void page_not_present(VCPU *vcpu, u64 vadr);
125 extern void data_access_rights(VCPU *vcpu, u64 vadr);
127 /**************************************************************************
128 VCPU control register access routines
129 **************************************************************************/
131 static inline
132 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
133 {
134 *pval = VCPU(vcpu,dcr);
135 return (IA64_NO_FAULT);
136 }
138 static inline
139 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
140 {
141 *pval = VCPU(vcpu,itm);
142 return (IA64_NO_FAULT);
143 }
145 static inline
146 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
147 {
148 *pval = VCPU(vcpu,iva);
149 return (IA64_NO_FAULT);
150 }
151 static inline
152 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
153 {
154 *pval = VCPU(vcpu,pta);
155 return (IA64_NO_FAULT);
156 }
158 static inline
159 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
160 {
161 *pval = VCPU(vcpu,lid);
162 return (IA64_NO_FAULT);
163 }
164 static inline
165 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
166 {
167 *pval = guest_read_vivr(vcpu);
168 return (IA64_NO_FAULT);
169 }
170 static inline
171 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
172 {
173 *pval = VCPU(vcpu,tpr);
174 return (IA64_NO_FAULT);
175 }
176 static inline
177 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
178 {
179 *pval = 0L; // reads of eoi always return 0
180 return (IA64_NO_FAULT);
181 }
182 static inline
183 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
184 {
185 *pval = VCPU(vcpu,irr[0]);
186 return (IA64_NO_FAULT);
187 }
188 static inline
189 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
190 {
191 *pval = VCPU(vcpu,irr[1]);
192 return (IA64_NO_FAULT);
193 }
194 static inline
195 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
196 {
197 *pval = VCPU(vcpu,irr[2]);
198 return (IA64_NO_FAULT);
199 }
200 static inline
201 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
202 {
203 *pval = VCPU(vcpu,irr[3]);
204 return (IA64_NO_FAULT);
205 }
206 static inline
207 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
208 {
209 *pval = VCPU(vcpu,itv);
210 return (IA64_NO_FAULT);
211 }
212 static inline
213 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
214 {
215 *pval = VCPU(vcpu,pmv);
216 return (IA64_NO_FAULT);
217 }
218 static inline
219 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
220 {
221 *pval = VCPU(vcpu,cmcv);
222 return (IA64_NO_FAULT);
223 }
224 static inline
225 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
226 {
227 *pval = VCPU(vcpu,lrr0);
228 return (IA64_NO_FAULT);
229 }
230 static inline
231 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
232 {
233 *pval = VCPU(vcpu,lrr1);
234 return (IA64_NO_FAULT);
235 }
236 static inline
237 IA64FAULT
238 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
239 {
240 u64 mdcr, mask;
241 VCPU(vcpu,dcr)=val;
242 /* All vDCR bits will go to mDCR, except for be/pp bit */
243 mdcr = ia64_get_dcr();
244 mask = IA64_DCR_BE | IA64_DCR_PP;
245 mdcr = ( mdcr & mask ) | ( val & (~mask) );
246 ia64_set_dcr( mdcr);
248 return IA64_NO_FAULT;
249 }
251 static inline
252 IA64FAULT
253 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
254 {
255 vtm_set_itm(vcpu, val);
256 return IA64_NO_FAULT;
257 }
258 static inline
259 IA64FAULT
260 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
261 {
262 VCPU(vcpu,iva)=val;
263 return IA64_NO_FAULT;
264 }
266 static inline
267 IA64FAULT
268 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
269 {
270 VCPU(vcpu,pta)=val;
271 return IA64_NO_FAULT;
272 }
274 static inline
275 IA64FAULT
276 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
277 {
278 VCPU(vcpu,lid)=val;
279 return IA64_NO_FAULT;
280 }
281 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
283 static inline
284 IA64FAULT
285 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
286 {
287 guest_write_eoi(vcpu);
288 return IA64_NO_FAULT;
289 }
291 static inline
292 IA64FAULT
293 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
294 {
296 vtm_set_itv(vcpu, val);
297 return IA64_NO_FAULT;
298 }
299 static inline
300 IA64FAULT
301 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
302 {
303 VCPU(vcpu,pmv)=val;
304 return IA64_NO_FAULT;
305 }
306 static inline
307 IA64FAULT
308 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
309 {
310 VCPU(vcpu,cmcv)=val;
311 return IA64_NO_FAULT;
312 }
313 static inline
314 IA64FAULT
315 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
316 {
317 VCPU(vcpu,lrr0)=val;
318 return IA64_NO_FAULT;
319 }
320 static inline
321 IA64FAULT
322 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
323 {
324 VCPU(vcpu,lrr1)=val;
325 return IA64_NO_FAULT;
326 }
331 /**************************************************************************
332 VCPU privileged application register access routines
333 **************************************************************************/
334 static inline
335 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
336 {
337 vtm_set_itc(vcpu, val);
338 return IA64_NO_FAULT;
339 }
340 static inline
341 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
342 {
343 *val = vtm_get_itc(vcpu);
344 return IA64_NO_FAULT;
345 }
346 /*
347 static inline
348 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
349 {
350 *pval = VMX(vcpu,vrr[reg>>61]);
351 return (IA64_NO_FAULT);
352 }
353 */
354 /**************************************************************************
355 VCPU debug breakpoint register access routines
356 **************************************************************************/
358 static inline
359 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
360 {
361 // TODO: unimplemented DBRs return a reserved register fault
362 // TODO: Should set Logical CPU state, not just physical
363 if(reg > 4){
364 panic_domain(vcpu_regs(vcpu),"there are only five cpuid registers");
365 }
366 *pval=VCPU(vcpu,vcpuid[reg]);
367 return (IA64_NO_FAULT);
368 }
371 static inline
372 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
373 {
374 // TODO: unimplemented DBRs return a reserved register fault
375 // TODO: Should set Logical CPU state, not just physical
376 ia64_set_dbr(reg,val);
377 return (IA64_NO_FAULT);
378 }
379 static inline
380 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
381 {
382 // TODO: unimplemented IBRs return a reserved register fault
383 // TODO: Should set Logical CPU state, not just physical
384 ia64_set_ibr(reg,val);
385 return (IA64_NO_FAULT);
386 }
387 static inline
388 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
389 {
390 // TODO: unimplemented DBRs return a reserved register fault
391 UINT64 val = ia64_get_dbr(reg);
392 *pval = val;
393 return (IA64_NO_FAULT);
394 }
395 static inline
396 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
397 {
398 // TODO: unimplemented IBRs return a reserved register fault
399 UINT64 val = ia64_get_ibr(reg);
400 *pval = val;
401 return (IA64_NO_FAULT);
402 }
404 /**************************************************************************
405 VCPU performance monitor register access routines
406 **************************************************************************/
407 static inline
408 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
409 {
410 // TODO: Should set Logical CPU state, not just physical
411 // NOTE: Writes to unimplemented PMC registers are discarded
412 ia64_set_pmc(reg,val);
413 return (IA64_NO_FAULT);
414 }
415 static inline
416 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
417 {
418 // TODO: Should set Logical CPU state, not just physical
419 // NOTE: Writes to unimplemented PMD registers are discarded
420 ia64_set_pmd(reg,val);
421 return (IA64_NO_FAULT);
422 }
423 static inline
424 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
425 {
426 // NOTE: Reads from unimplemented PMC registers return zero
427 UINT64 val = (UINT64)ia64_get_pmc(reg);
428 *pval = val;
429 return (IA64_NO_FAULT);
430 }
431 static inline
432 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
433 {
434 // NOTE: Reads from unimplemented PMD registers return zero
435 UINT64 val = (UINT64)ia64_get_pmd(reg);
436 *pval = val;
437 return (IA64_NO_FAULT);
438 }
440 /**************************************************************************
441 VCPU banked general register access routines
442 **************************************************************************/
443 #if 0
444 static inline
445 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
446 {
448 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
449 return (IA64_NO_FAULT);
450 }
451 static inline
452 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
453 {
455 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
456 return (IA64_NO_FAULT);
457 }
458 #endif
459 #if 0
460 /* Another hash performance algorithm */
461 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
462 #endif
463 static inline unsigned long
464 vrrtomrr(VCPU *v, unsigned long val)
465 {
466 ia64_rr rr;
468 rr.rrval=val;
469 rr.rid = rr.rid + v->arch.starting_rid;
470 rr.ps = PAGE_SHIFT;
471 rr.ve = 1;
472 return vmMangleRID(rr.rrval);
473 /* Disable this rid allocation algorithm for now */
474 #if 0
475 rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
476 rr.rid = redistribute_rid(rid);
477 #endif
479 }
480 static inline thash_cb_t *
481 vmx_vcpu_get_vtlb(VCPU *vcpu)
482 {
483 return &vcpu->arch.vtlb;
484 }
486 static inline thash_cb_t *
487 vcpu_get_vhpt(VCPU *vcpu)
488 {
489 return &vcpu->arch.vhpt;
490 }
492 #endif