ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 6607624285b2
children
line source
1 /* -*- Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
26 #include <xen/sched.h>
27 #include <asm/ia64_int.h>
28 #include <asm/vmx_vpd.h>
29 #include <asm/ptrace.h>
30 #include <asm/regs.h>
31 #include <asm/regionreg.h>
32 #include <asm/types.h>
33 #include <asm/vcpu.h>
35 #define VRN_SHIFT 61
36 #define VRN0 0x0UL
37 #define VRN1 0x1UL
38 #define VRN2 0x2UL
39 #define VRN3 0x3UL
40 #define VRN4 0x4UL
41 #define VRN5 0x5UL
42 #define VRN6 0x6UL
43 #define VRN7 0x7UL
44 // for vlsapic
45 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
47 #define VMX(x,y) ((x)->arch.arch_vmx.y)
49 #define VMM_RR_SHIFT 20
50 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
52 extern u64 indirect_reg_igfld_MASK(int type, int index, u64 value);
53 extern u64 cr_igfld_mask(int index, u64 value);
54 extern int check_indirect_reg_rsv_fields(int type, int index, u64 value);
55 extern u64 set_isr_ei_ni(VCPU * vcpu);
56 extern u64 set_isr_for_na_inst(VCPU * vcpu, int op);
57 extern void set_illegal_op_isr (VCPU *vcpu);
59 /* next all for VTI domain APIs definition */
60 extern void vmx_vcpu_set_psr(VCPU * vcpu, unsigned long value);
61 extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
62 extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
63 extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg);
64 IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
65 extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
66 extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
67 extern IA64FAULT vmx_vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
68 u64 ifa);
69 extern IA64FAULT vmx_vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
70 u64 ifa);
71 extern IA64FAULT vmx_vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 ps);
72 extern IA64FAULT vmx_vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 ps);
73 extern IA64FAULT vmx_vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 ps);
74 extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
75 extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
76 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
77 extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr);
78 extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
79 extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr);
80 extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
81 extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr);
82 extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
83 extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
84 extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
85 extern IA64FAULT vmx_vcpu_set_bgr(VCPU * vcpu, unsigned int reg, u64 val,
86 int nat);
87 #if 0
88 extern IA64FAULT vmx_vcpu_get_gr(VCPU * vcpu, unsigned reg, u64 * val);
89 extern IA64FAULT vmx_vcpu_set_gr(VCPU * vcpu, unsigned reg, u64 value, int nat);
90 #endif
91 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24);
92 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU * vcpu, u64 imm24);
93 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU * vcpu, u64 val);
94 extern void vtm_init(VCPU * vcpu);
95 extern uint64_t vtm_get_itc(VCPU * vcpu);
96 extern void vtm_set_itc(VCPU * vcpu, uint64_t new_itc);
97 extern void vtm_set_itv(VCPU * vcpu, uint64_t val);
98 extern void vtm_set_itm(VCPU * vcpu, uint64_t val);
99 extern void vlsapic_reset(VCPU * vcpu);
100 extern int vmx_check_pending_irq(VCPU * vcpu);
101 extern void guest_write_eoi(VCPU * vcpu);
102 extern int is_unmasked_irq(VCPU * vcpu);
103 extern uint64_t guest_read_vivr(VCPU * vcpu);
104 extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
105 extern void vcpu_load_kernel_regs(VCPU * vcpu);
106 extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
107 void *shared_arch_info);
108 extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
109 extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
110 extern void vmx_ia64_set_dcr(VCPU * v);
111 extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
112 extern void vmx_asm_bsw0(void);
113 extern void vmx_asm_bsw1(void);
115 /**************************************************************************
116 VCPU control register access routines
117 **************************************************************************/
119 static inline u64 vmx_vcpu_get_itm(VCPU * vcpu)
120 {
121 return ((u64)VCPU(vcpu, itm));
122 }
124 static inline u64 vmx_vcpu_get_iva(VCPU * vcpu)
125 {
126 return ((u64)VCPU(vcpu, iva));
127 }
129 static inline u64 vmx_vcpu_get_pta(VCPU * vcpu)
130 {
131 return ((u64)VCPU(vcpu, pta));
132 }
134 static inline u64 vmx_vcpu_get_lid(VCPU * vcpu)
135 {
136 return ((u64)VCPU(vcpu, lid));
137 }
139 static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu)
140 {
141 return ((u64)guest_read_vivr(vcpu));
142 }
144 static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu)
145 {
146 return ((u64)VCPU(vcpu, tpr));
147 }
149 static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu)
150 {
151 return (0UL); // reads of eoi always return 0
152 }
154 static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu)
155 {
156 return ((u64)VCPU(vcpu, irr[0]));
157 }
159 static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu)
160 {
161 return ((u64)VCPU(vcpu, irr[1]));
162 }
164 static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu)
165 {
166 return ((u64)VCPU(vcpu, irr[2]));
167 }
169 static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu)
170 {
171 return ((u64)VCPU(vcpu, irr[3]));
172 }
174 static inline u64 vmx_vcpu_get_itv(VCPU * vcpu)
175 {
176 return ((u64)VCPU(vcpu, itv));
177 }
179 static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu)
180 {
181 return ((u64)VCPU(vcpu, pmv));
182 }
184 static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu)
185 {
186 return ((u64)VCPU(vcpu, cmcv));
187 }
189 static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu)
190 {
191 return ((u64)VCPU(vcpu, lrr0));
192 }
194 static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu)
195 {
196 return ((u64)VCPU(vcpu, lrr1));
197 }
199 static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
200 {
201 vtm_set_itm(vcpu, val);
202 return IA64_NO_FAULT;
203 }
205 static inline IA64FAULT vmx_vcpu_set_iva(VCPU * vcpu, u64 val)
206 {
207 VCPU(vcpu, iva) = val;
208 return IA64_NO_FAULT;
209 }
211 static inline IA64FAULT vmx_vcpu_set_pta(VCPU * vcpu, u64 val)
212 {
213 VCPU(vcpu, pta) = val;
214 return IA64_NO_FAULT;
215 }
217 static inline IA64FAULT vmx_vcpu_set_lid(VCPU * vcpu, u64 val)
218 {
219 VCPU(vcpu, lid) = val;
220 return IA64_NO_FAULT;
221 }
222 extern IA64FAULT vmx_vcpu_set_tpr(VCPU * vcpu, u64 val);
224 static inline IA64FAULT vmx_vcpu_set_eoi(VCPU * vcpu, u64 val)
225 {
226 guest_write_eoi(vcpu);
227 return IA64_NO_FAULT;
228 }
230 static inline IA64FAULT vmx_vcpu_set_itv(VCPU * vcpu, u64 val)
231 {
233 vtm_set_itv(vcpu, val);
234 return IA64_NO_FAULT;
235 }
237 static inline IA64FAULT vmx_vcpu_set_pmv(VCPU * vcpu, u64 val)
238 {
239 VCPU(vcpu, pmv) = val;
240 return IA64_NO_FAULT;
241 }
243 static inline IA64FAULT vmx_vcpu_set_cmcv(VCPU * vcpu, u64 val)
244 {
245 VCPU(vcpu, cmcv) = val;
246 return IA64_NO_FAULT;
247 }
249 static inline IA64FAULT vmx_vcpu_set_lrr0(VCPU * vcpu, u64 val)
250 {
251 VCPU(vcpu, lrr0) = val;
252 return IA64_NO_FAULT;
253 }
255 static inline IA64FAULT vmx_vcpu_set_lrr1(VCPU * vcpu, u64 val)
256 {
257 VCPU(vcpu, lrr1) = val;
258 return IA64_NO_FAULT;
259 }
261 /**************************************************************************
262 VCPU privileged application register access routines
263 **************************************************************************/
264 static inline IA64FAULT vmx_vcpu_set_itc(VCPU * vcpu, u64 val)
265 {
266 vtm_set_itc(vcpu, val);
267 return IA64_NO_FAULT;
268 }
270 static inline u64 vmx_vcpu_get_itc(VCPU * vcpu)
271 {
272 return ((u64)vtm_get_itc(vcpu));
273 }
275 /*
276 static inline
277 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u64 reg, u64 *pval)
278 {
279 *pval = VMX(vcpu,vrr[reg>>61]);
280 return IA64_NO_FAULT;
281 }
282 */
283 /**************************************************************************
284 VCPU debug breakpoint register access routines
285 **************************************************************************/
287 static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg)
288 {
289 // TODO: unimplemented DBRs return a reserved register fault
290 // TODO: Should set Logical CPU state, not just physical
291 if (reg > 4) {
292 panic_domain(vcpu_regs(vcpu),
293 "there are only five cpuid registers");
294 }
295 return ((u64)VCPU(vcpu, vcpuid[reg]));
296 }
298 static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
299 {
300 return vcpu_set_dbr(vcpu, reg, val);
301 }
303 static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
304 {
305 return vcpu_set_ibr(vcpu, reg, val);
306 }
308 static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 *pval)
309 {
310 return vcpu_get_dbr(vcpu, reg, pval);
311 }
313 static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 *pval)
314 {
315 return vcpu_get_ibr(vcpu, reg, pval);
316 }
318 /**************************************************************************
319 VCPU performance monitor register access routines
320 **************************************************************************/
321 static inline IA64FAULT vmx_vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
322 {
323 // TODO: Should set Logical CPU state, not just physical
324 // NOTE: Writes to unimplemented PMC registers are discarded
325 ia64_set_pmc(reg, val);
326 return IA64_NO_FAULT;
327 }
329 static inline IA64FAULT vmx_vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
330 {
331 // TODO: Should set Logical CPU state, not just physical
332 // NOTE: Writes to unimplemented PMD registers are discarded
333 ia64_set_pmd(reg, val);
334 return IA64_NO_FAULT;
335 }
337 static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg)
338 {
339 // NOTE: Reads from unimplemented PMC registers return zero
340 return ((u64)ia64_get_pmc(reg));
341 }
343 static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg)
344 {
345 // NOTE: Reads from unimplemented PMD registers return zero
346 return ((u64)ia64_get_pmd(reg));
347 }
349 /**************************************************************************
350 VCPU banked general register access routines
351 **************************************************************************/
352 #if 0
353 static inline IA64FAULT vmx_vcpu_bsw0(VCPU * vcpu)
354 {
356 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
357 return IA64_NO_FAULT;
358 }
360 static inline IA64FAULT vmx_vcpu_bsw1(VCPU * vcpu)
361 {
363 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
364 return IA64_NO_FAULT;
365 }
366 #endif
367 #if 0
368 /* Another hash performance algorithm */
369 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
370 #endif
371 static inline unsigned long vrrtomrr(VCPU * v, unsigned long val)
372 {
373 ia64_rr rr;
375 rr.rrval = val;
376 rr.rid = rr.rid + v->arch.starting_rid;
377 if (rr.ps > PAGE_SHIFT)
378 rr.ps = PAGE_SHIFT;
379 rr.ve = 1;
380 return vmMangleRID(rr.rrval);
381 /* Disable this rid allocation algorithm for now */
382 #if 0
383 rid = (((u64) vcpu->domain->domain_id) << DOMAIN_RID_SHIFT) + rr.rid;
384 rr.rid = redistribute_rid(rid);
385 #endif
387 }
388 static inline thash_cb_t *vmx_vcpu_get_vtlb(VCPU * vcpu)
389 {
390 return &vcpu->arch.vtlb;
391 }
393 static inline thash_cb_t *vcpu_get_vhpt(VCPU * vcpu)
394 {
395 return &vcpu->arch.vhpt;
396 }
399 /**************************************************************************
400 VCPU fault injection routines
401 **************************************************************************/
403 /*
404 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
405 * Parameter:
406 * set_ifa: if true, set vIFA
407 * set_itir: if true, set vITIR
408 * set_iha: if true, set vIHA
409 */
410 static inline void
411 set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
412 int set_ifa, int set_itir, int set_iha)
413 {
414 IA64_PSR vpsr;
415 u64 value;
416 vpsr.val = VCPU(vcpu, vpsr);
417 /* Vol2, Table 8-1 */
418 if (vpsr.ic) {
419 if (set_ifa){
420 vcpu_set_ifa(vcpu, vadr);
421 }
422 if (set_itir) {
423 value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
424 vcpu_set_itir(vcpu, value);
425 }
426 if (set_iha) {
427 value = vmx_vcpu_thash(vcpu, vadr);
428 vcpu_set_iha(vcpu, value);
429 }
430 }
431 }
433 /*
434 * Data TLB Fault
435 * @ Data TLB vector
436 * Refer to SDM Vol2 Table 5-6 & 8-1
437 */
438 static inline void
439 dtlb_fault (VCPU *vcpu, u64 vadr)
440 {
441 /* If vPSR.ic, IFA, ITIR, IHA */
442 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
443 inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
444 }
446 /*
447 * Instruction TLB Fault
448 * @ Instruction TLB vector
449 * Refer to SDM Vol2 Table 5-6 & 8-1
450 */
451 static inline void
452 itlb_fault (VCPU *vcpu, u64 vadr)
453 {
454 /* If vPSR.ic, IFA, ITIR, IHA */
455 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
456 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
457 }
459 /*
460 * Data Nested TLB Fault
461 * @ Data Nested TLB Vector
462 * Refer to SDM Vol2 Table 5-6 & 8-1
463 */
464 static inline void
465 nested_dtlb (VCPU *vcpu)
466 {
467 inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
468 }
470 /*
471 * Alternate Data TLB Fault
472 * @ Alternate Data TLB vector
473 * Refer to SDM Vol2 Table 5-6 & 8-1
474 */
475 static inline void
476 alt_dtlb (VCPU *vcpu, u64 vadr)
477 {
478 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
479 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
480 }
482 /*
483 * Data TLB Fault
484 * @ Data TLB vector
485 * Refer to SDM Vol2 Table 5-6 & 8-1
486 */
487 static inline void
488 alt_itlb (VCPU *vcpu, u64 vadr)
489 {
490 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
491 inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
492 }
494 /*
495 * Deal with:
496 * VHPT Translation Vector
497 */
498 static inline void
499 _vhpt_fault(VCPU *vcpu, u64 vadr)
500 {
501 /* If vPSR.ic, IFA, ITIR, IHA*/
502 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
503 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
504 }
506 /*
507 * VHPT Instruction Fault
508 * @ VHPT Translation vector
509 * Refer to SDM Vol2 Table 5-6 & 8-1
510 */
511 static inline void
512 ivhpt_fault (VCPU *vcpu, u64 vadr)
513 {
514 _vhpt_fault(vcpu, vadr);
515 }
517 /*
518 * VHPT Data Fault
519 * @ VHPT Translation vector
520 * Refer to SDM Vol2 Table 5-6 & 8-1
521 */
522 static inline void
523 dvhpt_fault (VCPU *vcpu, u64 vadr)
524 {
525 _vhpt_fault(vcpu, vadr);
526 }
528 /*
529 * Deal with:
530 * General Exception vector
531 */
532 static inline void
533 _general_exception (VCPU *vcpu)
534 {
535 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
536 }
538 /*
539 * Illegal Operation Fault
540 * @ General Exception Vector
541 * Refer to SDM Vol2 Table 5-6 & 8-1
542 */
543 static inline void
544 illegal_op (VCPU *vcpu)
545 {
546 _general_exception(vcpu);
547 }
549 /*
550 * Illegal Dependency Fault
551 * @ General Exception Vector
552 * Refer to SDM Vol2 Table 5-6 & 8-1
553 */
554 static inline void
555 illegal_dep (VCPU *vcpu)
556 {
557 _general_exception(vcpu);
558 }
560 /*
561 * Reserved Register/Field Fault
562 * @ General Exception Vector
563 * Refer to SDM Vol2 Table 5-6 & 8-1
564 */
565 static inline void
566 rsv_reg_field (VCPU *vcpu)
567 {
568 _general_exception(vcpu);
569 }
571 /*
572 * Privileged Operation Fault
573 * @ General Exception Vector
574 * Refer to SDM Vol2 Table 5-6 & 8-1
575 */
576 static inline void
577 privilege_op (VCPU *vcpu)
578 {
579 _general_exception(vcpu);
580 }
582 /*
583 * Unimplement Data Address Fault
584 * @ General Exception Vector
585 * Refer to SDM Vol2 Table 5-6 & 8-1
586 */
587 static inline void
588 unimpl_daddr (VCPU *vcpu)
589 {
590 ISR isr;
592 isr.val = set_isr_ei_ni(vcpu);
593 isr.code = IA64_UNIMPL_DADDR_FAULT;
594 vcpu_set_isr(vcpu, isr.val);
595 _general_exception(vcpu);
596 }
598 /*
599 * Privileged Register Fault
600 * @ General Exception Vector
601 * Refer to SDM Vol2 Table 5-6 & 8-1
602 */
603 static inline void
604 privilege_reg (VCPU *vcpu)
605 {
606 _general_exception(vcpu);
607 }
609 /*
610 * Deal with
611 * Nat consumption vector
612 * Parameter:
613 * vaddr: Optional, if t == REGISTER
614 */
615 static inline void
616 _nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t)
617 {
618 /* If vPSR.ic && t == DATA/INST, IFA */
619 if ( t == DATA || t == INSTRUCTION ) {
620 /* IFA */
621 set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
622 }
624 inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
625 }
627 /*
628 * IR Data Nat Page Consumption Fault
629 * @ Nat Consumption Vector
630 * Refer to SDM Vol2 Table 5-6 & 8-1
631 */
632 #if 0
633 static inline void
634 ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
635 {
636 _nat_consumption_fault(vcpu, vadr, DATA);
637 }
638 #endif //shadow it due to no use currently
640 /*
641 * Instruction Nat Page Consumption Fault
642 * @ Nat Consumption Vector
643 * Refer to SDM Vol2 Table 5-6 & 8-1
644 */
645 static inline void
646 inat_page_consumption (VCPU *vcpu, u64 vadr)
647 {
648 _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
649 }
651 /*
652 * Register Nat Consumption Fault
653 * @ Nat Consumption Vector
654 * Refer to SDM Vol2 Table 5-6 & 8-1
655 */
656 static inline void
657 rnat_consumption (VCPU *vcpu)
658 {
659 _nat_consumption_fault(vcpu, 0, REGISTER);
660 }
662 /*
663 * Data Nat Page Consumption Fault
664 * @ Nat Consumption Vector
665 * Refer to SDM Vol2 Table 5-6 & 8-1
666 */
667 static inline void
668 dnat_page_consumption (VCPU *vcpu, uint64_t vadr)
669 {
670 _nat_consumption_fault(vcpu, vadr, DATA);
671 }
673 /*
674 * Deal with
675 * Page not present vector
676 */
677 static inline void
678 __page_not_present(VCPU *vcpu, u64 vadr)
679 {
680 /* If vPSR.ic, IFA, ITIR */
681 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
682 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
683 }
685 static inline void
686 data_page_not_present(VCPU *vcpu, u64 vadr)
687 {
688 __page_not_present(vcpu, vadr);
689 }
691 static inline void
692 inst_page_not_present(VCPU *vcpu, u64 vadr)
693 {
694 __page_not_present(vcpu, vadr);
695 }
697 /*
698 * Deal with
699 * Data access rights vector
700 */
701 static inline void
702 data_access_rights(VCPU *vcpu, u64 vadr)
703 {
704 /* If vPSR.ic, IFA, ITIR */
705 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
706 inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
707 }
709 /*
710 * Unimplement Instruction Address Trap
711 * @ Lower-Privilege Transfer Trap Vector
712 * Refer to SDM Vol2 Table 5-6 & 8-1
713 */
714 static inline void
715 unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
716 {
717 ISR isr;
719 isr.val = set_isr_ei_ni(vcpu);
720 isr.code = IA64_UNIMPL_IADDR_TRAP;
721 vcpu_set_isr(vcpu, isr.val);
722 vcpu_set_ifa(vcpu, vadr);
723 inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
724 }
725 #endif