ia64/xen-unstable
changeset 6801:52d2d5208575
Merge latest xen-unstable into xen-ia64-unstable
line diff
1.1 --- a/xen/arch/ia64/asm-offsets.c Tue Sep 13 14:20:02 2005 -0600 1.2 +++ b/xen/arch/ia64/asm-offsets.c Wed Sep 14 15:26:35 2005 -0600 1.3 @@ -147,13 +147,6 @@ void foo(void) 1.4 DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr)); 1.5 DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat)); 1.6 DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs)); 1.7 - DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip)); 1.8 - DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr)); 1.9 - DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs)); 1.10 - DEFINE(RFI_PFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_pfs)); 1.11 - DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr5)); 1.12 - DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr6)); 1.13 - DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr7)); 1.14 DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta)); 1.15 #endif //CONFIG_VTI 1.16 DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16)); 1.17 @@ -228,8 +221,8 @@ void foo(void) 1.18 BLANK(); 1.19 1.20 #ifdef CONFIG_VTI 1.21 - DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd)); 1.22 - DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.in_service[0])); 1.23 + DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs)); 1.24 + DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0])); 1.25 DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); 1.26 DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); 1.27
2.1 --- a/xen/arch/ia64/vmx/mmio.c Tue Sep 13 14:20:02 2005 -0600 2.2 +++ b/xen/arch/ia64/vmx/mmio.c Wed Sep 14 15:26:35 2005 -0600 2.3 @@ -147,7 +147,7 @@ static void low_mmio_access(VCPU *vcpu, 2.4 if(dir==IOREQ_WRITE) //write; 2.5 p->u.data = *val; 2.6 p->pdata_valid = 0; 2.7 - p->port_mm = 1; 2.8 + p->type = 1; 2.9 p->df = 0; 2.10 2.11 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 2.12 @@ -180,7 +180,7 @@ static void legacy_io_access(VCPU *vcpu, 2.13 if(dir==IOREQ_WRITE) //write; 2.14 p->u.data = *val; 2.15 p->pdata_valid = 0; 2.16 - p->port_mm = 0; 2.17 + p->type = 0; 2.18 p->df = 0; 2.19 2.20 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 2.21 @@ -353,7 +353,7 @@ static inline VCPU *lid_2_vcpu (struct d 2.22 vcpu = d->vcpu[i]; 2.23 if (!vcpu) 2.24 continue; 2.25 - lid.val = VPD_CR(vcpu, lid); 2.26 + lid.val = VCPU(vcpu, lid); 2.27 if ( lid.id == id && lid.eid == eid ) { 2.28 return vcpu; 2.29 }
3.1 --- a/xen/arch/ia64/vmx/vlsapic.c Tue Sep 13 14:20:02 2005 -0600 3.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Wed Sep 14 15:26:35 2005 -0600 3.3 @@ -89,8 +89,8 @@ static void vtm_reset(VCPU *vcpu) 3.4 vtm=&(vcpu->arch.arch_vmx.vtm); 3.5 vtm->vtm_offset = 0; 3.6 vtm->vtm_local_drift = 0; 3.7 - VPD_CR(vcpu, itm) = 0; 3.8 - VPD_CR(vcpu, itv) = 0x10000; 3.9 + VCPU(vcpu, itm) = 0; 3.10 + VCPU(vcpu, itv) = 0x10000; 3.11 cur_itc = ia64_get_itc(); 3.12 vtm->last_itc = vtm->vtm_offset + cur_itc; 3.13 } 3.14 @@ -104,12 +104,12 @@ static void vtm_timer_fn(void *data) 3.15 3.16 UINT64 vec; 3.17 3.18 - vec = VPD_CR(vcpu, itv) & 0xff; 3.19 + vec = VCPU(vcpu, itv) & 0xff; 3.20 vmx_vcpu_pend_interrupt(vcpu, vec); 3.21 3.22 vtm=&(vcpu->arch.arch_vmx.vtm); 3.23 cur_itc = now_itc(vtm); 3.24 - vitm =VPD_CR(vcpu, itm); 3.25 + vitm =VCPU(vcpu, itm); 3.26 //fire_itc2 = cur_itc; 3.27 //fire_itm2 = vitm; 3.28 update_last_itc(vtm,cur_itc); // pseudo read to update vITC 3.29 @@ -167,7 +167,7 @@ void vtm_set_itv(VCPU *vcpu) 3.30 3.31 vtm=&(vcpu->arch.arch_vmx.vtm); 3.32 local_irq_save(spsr); 3.33 - itv = VPD_CR(vcpu, itv); 3.34 + itv = VCPU(vcpu, itv); 3.35 if ( ITV_IRQ_MASK(itv) ) 3.36 rem_ac_timer(&vtm->vtm_timer); 3.37 vtm_interruption_update(vcpu, vtm); 3.38 @@ -190,12 +190,12 @@ void vtm_interruption_update(VCPU *vcpu, 3.39 long diff_now, diff_last; 3.40 uint64_t spsr; 3.41 3.42 - vitv = VPD_CR(vcpu, itv); 3.43 + vitv = VCPU(vcpu, itv); 3.44 if ( ITV_IRQ_MASK(vitv) ) { 3.45 return; 3.46 } 3.47 3.48 - vitm =VPD_CR(vcpu, itm); 3.49 + vitm =VCPU(vcpu, itm); 3.50 local_irq_save(spsr); 3.51 cur_itc =now_itc(vtm); 3.52 diff_last = vtm->last_itc - vitm; 3.53 @@ -249,7 +249,6 @@ void vtm_domain_in(VCPU *vcpu) 3.54 #define NMI_VECTOR 2 3.55 #define ExtINT_VECTOR 0 3.56 #define NULL_VECTOR -1 3.57 -#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i]) 3.58 static void update_vhpi(VCPU *vcpu, int vec) 3.59 { 3.60 u64 vhpi; 3.61 @@ -265,11 +264,11 @@ static void update_vhpi(VCPU *vcpu, int 3.62 vhpi = vec / 16; 3.63 } 3.64 3.65 - VMX_VPD(vcpu,vhpi) = vhpi; 3.66 + VCPU(vcpu,vhpi) = vhpi; 3.67 // TODO: Add support for XENO 3.68 - if ( VMX_VPD(vcpu,vac).a_int ) { 3.69 + if ( VCPU(vcpu,vac).a_int ) { 3.70 ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 3.71 - (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0); 3.72 + (uint64_t) &(vcpu->arch.privregs), 0, 0,0,0,0,0); 3.73 } 3.74 } 3.75 3.76 @@ -284,7 +283,7 @@ void vlapic_update_shared_info(VCPU *vcp 3.77 return; 3.78 3.79 ps = get_psapic(vcpu); 3.80 - ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16; 3.81 + ps->vl_lapic_id = ((VCPU(vcpu, lid) >> 16) & 0xffff) << 16; 3.82 printf("vl_lapic_id = %x\n", ps->vl_lapic_id); 3.83 ps->vl_apr = 0; 3.84 // skip ps->vl_logical_dest && ps->vl_dest_format 3.85 @@ -316,18 +315,18 @@ void vlsapic_reset(VCPU *vcpu) 3.86 vl_apic_info *psapic; // shared lapic inf. 3.87 #endif 3.88 3.89 - VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID); 3.90 - VPD_CR(vcpu, ivr) = 0; 3.91 - VPD_CR(vcpu,tpr) = 0x10000; 3.92 - VPD_CR(vcpu, eoi) = 0; 3.93 - VPD_CR(vcpu, irr[0]) = 0; 3.94 - VPD_CR(vcpu, irr[1]) = 0; 3.95 - VPD_CR(vcpu, irr[2]) = 0; 3.96 - VPD_CR(vcpu, irr[3]) = 0; 3.97 - VPD_CR(vcpu, pmv) = 0x10000; 3.98 - VPD_CR(vcpu, cmcv) = 0x10000; 3.99 - VPD_CR(vcpu, lrr0) = 0x10000; // default reset value? 3.100 - VPD_CR(vcpu, lrr1) = 0x10000; // default reset value? 3.101 + VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID); 3.102 + VCPU(vcpu, ivr) = 0; 3.103 + VCPU(vcpu,tpr) = 0x10000; 3.104 + VCPU(vcpu, eoi) = 0; 3.105 + VCPU(vcpu, irr[0]) = 0; 3.106 + VCPU(vcpu, irr[1]) = 0; 3.107 + VCPU(vcpu, irr[2]) = 0; 3.108 + VCPU(vcpu, irr[3]) = 0; 3.109 + VCPU(vcpu, pmv) = 0x10000; 3.110 + VCPU(vcpu, cmcv) = 0x10000; 3.111 + VCPU(vcpu, lrr0) = 0x10000; // default reset value? 3.112 + VCPU(vcpu, lrr1) = 0x10000; // default reset value? 3.113 update_vhpi(vcpu, NULL_VECTOR); 3.114 for ( i=0; i<4; i++) { 3.115 VLSAPIC_INSVC(vcpu,i) = 0; 3.116 @@ -367,9 +366,9 @@ static __inline__ int highest_bits(uint6 3.117 */ 3.118 static int highest_pending_irq(VCPU *vcpu) 3.119 { 3.120 - if ( VPD_CR(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR; 3.121 - if ( VPD_CR(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR; 3.122 - return highest_bits(&VPD_CR(vcpu, irr[0])); 3.123 + if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR; 3.124 + if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR; 3.125 + return highest_bits(&VCPU(vcpu, irr[0])); 3.126 } 3.127 3.128 static int highest_inservice_irq(VCPU *vcpu) 3.129 @@ -410,7 +409,7 @@ static int 3.130 tpr_t vtpr; 3.131 uint64_t mmi; 3.132 3.133 - vtpr.val = VPD_CR(vcpu, tpr); 3.134 + vtpr.val = VCPU(vcpu, tpr); 3.135 3.136 if ( h_inservice == NMI_VECTOR ) { 3.137 return IRQ_MASKED_BY_INSVC; 3.138 @@ -468,7 +467,7 @@ void vmx_vcpu_pend_interrupt(VCPU *vcpu, 3.139 return; 3.140 } 3.141 local_irq_save(spsr); 3.142 - VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63); 3.143 + VCPU(vcpu,irr[vector>>6]) |= 1UL<<(vector&63); 3.144 //vlapic_update_shared_irr(vcpu); 3.145 local_irq_restore(spsr); 3.146 vcpu->arch.irq_new_pending = 1; 3.147 @@ -486,7 +485,7 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 3.148 3.149 local_irq_save(spsr); 3.150 for (i=0 ; i<4; i++ ) { 3.151 - VPD_CR(vcpu,irr[i]) |= pend_irr[i]; 3.152 + VCPU(vcpu,irr[i]) |= pend_irr[i]; 3.153 } 3.154 //vlapic_update_shared_irr(vcpu); 3.155 local_irq_restore(spsr); 3.156 @@ -554,7 +553,7 @@ void guest_write_eoi(VCPU *vcpu) 3.157 local_irq_save(spsr); 3.158 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63)); 3.159 local_irq_restore(spsr); 3.160 - VPD_CR(vcpu, eoi)=0; // overwrite the data 3.161 + VCPU(vcpu, eoi)=0; // overwrite the data 3.162 vmx_check_pending_irq(vcpu); 3.163 } 3.164 3.165 @@ -573,7 +572,7 @@ uint64_t guest_read_vivr(VCPU *vcpu) 3.166 } 3.167 3.168 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); 3.169 - VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 3.170 + VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 3.171 update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write 3.172 //vlapic_update_shared_irr(vcpu); 3.173 local_irq_restore(spsr); 3.174 @@ -600,10 +599,10 @@ vhpi_detection(VCPU *vcpu) 3.175 IA64_PSR vpsr; 3.176 3.177 vpsr.val = vmx_vcpu_get_psr(vcpu); 3.178 - vtpr.val = VPD_CR(vcpu, tpr); 3.179 + vtpr.val = VCPU(vcpu, tpr); 3.180 3.181 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; 3.182 - vhpi = VMX_VPD(vcpu,vhpi); 3.183 + vhpi = VCPU(vcpu,vhpi); 3.184 if ( vhpi > threshold ) { 3.185 // interrupt actived 3.186 generate_exirq (vcpu);
4.1 --- a/xen/arch/ia64/vmx/vmmu.c Tue Sep 13 14:20:02 2005 -0600 4.2 +++ b/xen/arch/ia64/vmx/vmmu.c Wed Sep 14 15:26:35 2005 -0600 4.3 @@ -434,7 +434,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod 4.4 ia64_rr vrr; 4.5 u64 mfn; 4.6 4.7 - if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode 4.8 + if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode 4.9 gpip = gip; 4.10 } 4.11 else { 4.12 @@ -726,12 +726,12 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 4.13 if(data){ 4.14 if(data->p==0){ 4.15 visr.na=1; 4.16 - vmx_vcpu_set_isr(vcpu,visr.val); 4.17 + vcpu_set_isr(vcpu,visr.val); 4.18 page_not_present(vcpu, vadr); 4.19 return IA64_FAULT; 4.20 }else if(data->ma == VA_MATTR_NATPAGE){ 4.21 visr.na = 1; 4.22 - vmx_vcpu_set_isr(vcpu, visr.val); 4.23 + vcpu_set_isr(vcpu, visr.val); 4.24 dnat_page_consumption(vcpu, vadr); 4.25 return IA64_FAULT; 4.26 }else{ 4.27 @@ -741,7 +741,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 4.28 }else{ 4.29 if(!vhpt_enabled(vcpu, vadr, NA_REF)){ 4.30 if(vpsr.ic){ 4.31 - vmx_vcpu_set_isr(vcpu, visr.val); 4.32 + vcpu_set_isr(vcpu, visr.val); 4.33 alt_dtlb(vcpu, vadr); 4.34 return IA64_FAULT; 4.35 } 4.36 @@ -756,7 +756,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 4.37 data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB); 4.38 if(data){ 4.39 if(vpsr.ic){ 4.40 - vmx_vcpu_set_isr(vcpu, visr.val); 4.41 + vcpu_set_isr(vcpu, visr.val); 4.42 dtlb_fault(vcpu, vadr); 4.43 return IA64_FAULT; 4.44 } 4.45 @@ -767,7 +767,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 4.46 } 4.47 else{ 4.48 if(vpsr.ic){ 4.49 - vmx_vcpu_set_isr(vcpu, visr.val); 4.50 + vcpu_set_isr(vcpu, visr.val); 4.51 dvhpt_fault(vcpu, vadr); 4.52 return IA64_FAULT; 4.53 }
5.1 --- a/xen/arch/ia64/vmx/vmx_entry.S Tue Sep 13 14:20:02 2005 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S Wed Sep 14 15:26:35 2005 -0600 5.3 @@ -218,6 +218,7 @@ GLOBAL_ENTRY(ia64_leave_hypervisor) 5.4 adds out0=16,r12 5.5 ;; 5.6 br.call.sptk.many b0=leave_hypervisor_tail 5.7 + ;; 5.8 mov ar.pfs=loc0 5.9 adds r8=IA64_VPD_BASE_OFFSET,r13 5.10 ;;
6.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c Tue Sep 13 14:20:02 2005 -0600 6.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Sep 14 15:26:35 2005 -0600 6.3 @@ -98,7 +98,8 @@ void hyper_dom_mem_op(void) 6.4 vmx_vcpu_get_gr(vcpu,18,&r34); 6.5 vmx_vcpu_get_gr(vcpu,19,&r35); 6.6 vmx_vcpu_get_gr(vcpu,20,&r36); 6.7 - ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36); 6.8 +// ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36); 6.9 + ret = 0; 6.10 printf("do_dom_mem return value: %lx\n", ret); 6.11 vmx_vcpu_set_gr(vcpu, 8, ret, 0); 6.12
7.1 --- a/xen/arch/ia64/vmx/vmx_init.c Tue Sep 13 14:20:02 2005 -0600 7.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Wed Sep 14 15:26:35 2005 -0600 7.3 @@ -217,7 +217,7 @@ static void 7.4 vmx_create_vp(struct vcpu *v) 7.5 { 7.6 u64 ret; 7.7 - vpd_t *vpd = v->arch.arch_vmx.vpd; 7.8 + vpd_t *vpd = v->arch.privregs; 7.9 u64 ivt_base; 7.10 extern char vmx_ia64_ivt; 7.11 /* ia64_ivt is function pointer, so need this tranlation */ 7.12 @@ -255,7 +255,7 @@ vmx_save_state(struct vcpu *v) 7.13 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt; 7.14 7.15 /* FIXME: about setting of pal_proc_vector... time consuming */ 7.16 - status = ia64_pal_vp_save(v->arch.arch_vmx.vpd, 0); 7.17 + status = ia64_pal_vp_save(v->arch.privregs, 0); 7.18 if (status != PAL_STATUS_SUCCESS) 7.19 panic("Save vp status failed\n"); 7.20 7.21 @@ -290,7 +290,7 @@ vmx_load_state(struct vcpu *v) 7.22 u64 pte_xen, pte_vhpt; 7.23 int i; 7.24 7.25 - status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0); 7.26 + status = ia64_pal_vp_restore(v->arch.privregs, 0); 7.27 if (status != PAL_STATUS_SUCCESS) 7.28 panic("Restore vp status failed\n"); 7.29 7.30 @@ -351,7 +351,8 @@ vmx_final_setup_domain(struct domain *d) 7.31 vpd = alloc_vpd(); 7.32 ASSERT(vpd); 7.33 7.34 - v->arch.arch_vmx.vpd = vpd; 7.35 +// v->arch.arch_vmx.vpd = vpd; 7.36 + v->arch.privregs = vpd; 7.37 vpd->virt_env_vaddr = vm_buffer; 7.38 7.39 #ifdef CONFIG_VTI
8.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c Tue Sep 13 14:20:02 2005 -0600 8.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c Wed Sep 14 15:26:35 2005 -0600 8.3 @@ -51,24 +51,24 @@ collect_interruption(VCPU *vcpu) 8.4 ipsr = regs->cr_ipsr; 8.5 vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA 8.6 | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED)); 8.7 - vmx_vcpu_set_ipsr(vcpu, vpsr.val); 8.8 + vcpu_set_ipsr(vcpu, vpsr.val); 8.9 8.10 /* Currently, for trap, we do not advance IIP to next 8.11 * instruction. That's because we assume caller already 8.12 * set up IIP correctly 8.13 */ 8.14 8.15 - vmx_vcpu_set_iip(vcpu , regs->cr_iip); 8.16 + vcpu_set_iip(vcpu , regs->cr_iip); 8.17 8.18 /* set vifs.v to zero */ 8.19 - vifs = VPD_CR(vcpu,ifs); 8.20 + vifs = VCPU(vcpu,ifs); 8.21 vifs &= ~IA64_IFS_V; 8.22 - vmx_vcpu_set_ifs(vcpu, vifs); 8.23 + vcpu_set_ifs(vcpu, vifs); 8.24 8.25 - vmx_vcpu_set_iipa(vcpu, regs->cr_iipa); 8.26 + vcpu_set_iipa(vcpu, regs->cr_iipa); 8.27 } 8.28 8.29 - vdcr = VPD_CR(vcpu,dcr); 8.30 + vdcr = VCPU(vcpu,dcr); 8.31 8.32 /* Set guest psr 8.33 * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged 8.34 @@ -119,16 +119,16 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr, 8.35 /* Vol2, Table 8-1 */ 8.36 if ( vpsr.ic ) { 8.37 if ( set_ifa){ 8.38 - vmx_vcpu_set_ifa(vcpu, vadr); 8.39 + vcpu_set_ifa(vcpu, vadr); 8.40 } 8.41 if ( set_itir) { 8.42 value = vmx_vcpu_get_itir_on_fault(vcpu, vadr); 8.43 - vmx_vcpu_set_itir(vcpu, value); 8.44 + vcpu_set_itir(vcpu, value); 8.45 } 8.46 8.47 if ( set_iha) { 8.48 vmx_vcpu_thash(vcpu, vadr, &value); 8.49 - vmx_vcpu_set_iha(vcpu, value); 8.50 + vcpu_set_iha(vcpu, value); 8.51 } 8.52 } 8.53
9.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Sep 13 14:20:02 2005 -0600 9.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Wed Sep 14 15:26:35 2005 -0600 9.3 @@ -221,10 +221,11 @@ vmx_init_all_rr(VCPU *vcpu) 9.4 VMX(vcpu,vrr[VRN5]) = 0x538; 9.5 VMX(vcpu,vrr[VRN6]) = 0x660; 9.6 VMX(vcpu,vrr[VRN7]) = 0x760; 9.7 - 9.8 +#if 0 9.9 VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38); 9.10 VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60); 9.11 VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60); 9.12 +#endif 9.13 } 9.14 9.15 void 9.16 @@ -275,7 +276,7 @@ vmx_load_all_rr(VCPU *vcpu) 9.17 ia64_set_rr((VRN6 << VRN_SHIFT), 9.18 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6]))); 9.19 vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info, 9.20 - (void *)vcpu->vcpu_info->arch.privregs, 9.21 + (void *)vcpu->arch.privregs, 9.22 ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr ); 9.23 ia64_set_pta(vcpu->arch.arch_vmx.mpta); 9.24 #endif
10.1 --- a/xen/arch/ia64/vmx/vmx_process.c Tue Sep 13 14:20:02 2005 -0600 10.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Wed Sep 14 15:26:35 2005 -0600 10.3 @@ -82,13 +82,13 @@ vmx_ia64_handle_break (unsigned long ifa 10.4 case FW_HYPERCALL_PAL_CALL: 10.5 //printf("*** PAL hypercall: index=%d\n",regs->r28); 10.6 //FIXME: This should call a C routine 10.7 - x = pal_emulator_static(VMX_VPD(v, vgr[12])); 10.8 + x = pal_emulator_static(VCPU(v, vgr[12])); 10.9 regs->r8 = x.status; regs->r9 = x.v0; 10.10 regs->r10 = x.v1; regs->r11 = x.v2; 10.11 #if 0 10.12 if (regs->r8) 10.13 printk("Failed vpal emulation, with index:0x%lx\n", 10.14 - VMX_VPD(v, vgr[12])); 10.15 + VCPU(v, vgr[12])); 10.16 #endif 10.17 break; 10.18 case FW_HYPERCALL_SAL_CALL: 10.19 @@ -178,11 +178,11 @@ void vmx_reflect_interruption(UINT64 ifa 10.20 if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){ 10.21 panic("Guest nested fault!"); 10.22 } 10.23 - VPD_CR(vcpu,isr)=isr; 10.24 - VPD_CR(vcpu,iipa) = regs->cr_iip; 10.25 + VCPU(vcpu,isr)=isr; 10.26 + VCPU(vcpu,iipa) = regs->cr_iip; 10.27 vector=vec2off[vector]; 10.28 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) 10.29 - VPD_CR(vcpu,iim) = iim; 10.30 + VCPU(vcpu,iim) = iim; 10.31 else { 10.32 set_ifa_itir_iha(vcpu,ifa,1,1,1); 10.33 } 10.34 @@ -220,8 +220,8 @@ void leave_hypervisor_tail(struct pt_reg 10.35 * 10.36 * Now hardcode the vector as 0x10 temporarily 10.37 */ 10.38 - if (event_pending(v)&&(!((v->arch.arch_vmx.in_service[0])&(1UL<<0x10)))) { 10.39 - VPD_CR(v, irr[0]) |= 1UL << 0x10; 10.40 + if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) { 10.41 + VCPU(v, irr[0]) |= 1UL << 0x10; 10.42 v->arch.irq_new_pending = 1; 10.43 } 10.44 10.45 @@ -295,7 +295,7 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u 10.46 }else if(type == DSIDE_TLB){ 10.47 if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){ 10.48 if(vpsr.ic){ 10.49 - vmx_vcpu_set_isr(vcpu, misr.val); 10.50 + vcpu_set_isr(vcpu, misr.val); 10.51 alt_dtlb(vcpu, vadr); 10.52 return IA64_FAULT; 10.53 } else{ 10.54 @@ -313,7 +313,7 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u 10.55 data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB); 10.56 if(data){ 10.57 if(vpsr.ic){ 10.58 - vmx_vcpu_set_isr(vcpu, misr.val); 10.59 + vcpu_set_isr(vcpu, misr.val); 10.60 dtlb_fault(vcpu, vadr); 10.61 return IA64_FAULT; 10.62 }else{ 10.63 @@ -327,7 +327,7 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u 10.64 } 10.65 }else{ 10.66 if(vpsr.ic){ 10.67 - vmx_vcpu_set_isr(vcpu, misr.val); 10.68 + vcpu_set_isr(vcpu, misr.val); 10.69 dvhpt_fault(vcpu, vadr); 10.70 return IA64_FAULT; 10.71 }else{ 10.72 @@ -346,7 +346,7 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u 10.73 if(!vpsr.ic){ 10.74 misr.ni=1; 10.75 } 10.76 - vmx_vcpu_set_isr(vcpu, misr.val); 10.77 + vcpu_set_isr(vcpu, misr.val); 10.78 alt_itlb(vcpu, vadr); 10.79 return IA64_FAULT; 10.80 } else{ 10.81 @@ -357,14 +357,14 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u 10.82 if(!vpsr.ic){ 10.83 misr.ni=1; 10.84 } 10.85 - vmx_vcpu_set_isr(vcpu, misr.val); 10.86 + vcpu_set_isr(vcpu, misr.val); 10.87 itlb_fault(vcpu, vadr); 10.88 return IA64_FAULT; 10.89 }else{ 10.90 if(!vpsr.ic){ 10.91 misr.ni=1; 10.92 } 10.93 - vmx_vcpu_set_isr(vcpu, misr.val); 10.94 + vcpu_set_isr(vcpu, misr.val); 10.95 ivhpt_fault(vcpu, vadr); 10.96 return IA64_FAULT; 10.97 }
11.1 --- a/xen/arch/ia64/vmx/vmx_utility.c Tue Sep 13 14:20:02 2005 -0600 11.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c Wed Sep 14 15:26:35 2005 -0600 11.3 @@ -455,7 +455,7 @@ set_rnat_consumption_isr (VCPU *vcpu,int 11.4 value = set_isr_ei_ni (vcpu); 11.5 visr.val = visr.val | value; 11.6 11.7 - vmx_vcpu_set_isr (vcpu,visr.val); 11.8 + vcpu_set_isr (vcpu,visr.val); 11.9 } 11.10 11.11 11.12 @@ -476,7 +476,7 @@ void set_break_isr (VCPU *vcpu) 11.13 value = set_isr_ei_ni (vcpu); 11.14 visr.val = visr.val | value; 11.15 11.16 - vmx_vcpu_set_isr(vcpu, visr.val); 11.17 + vcpu_set_isr(vcpu, visr.val); 11.18 } 11.19 11.20 11.21 @@ -508,7 +508,7 @@ void set_privileged_operation_isr (VCPU 11.22 value = set_isr_ei_ni (vcpu); 11.23 visr.val = visr.val | value; 11.24 11.25 - vmx_vcpu_set_isr (vcpu, visr.val); 11.26 + vcpu_set_isr (vcpu, visr.val); 11.27 } 11.28 11.29 11.30 @@ -533,7 +533,7 @@ void set_privileged_reg_isr (VCPU *vcpu, 11.31 value = set_isr_ei_ni (vcpu); 11.32 visr.val = visr.val | value; 11.33 11.34 - vmx_vcpu_set_isr (vcpu, visr.val); 11.35 + vcpu_set_isr (vcpu, visr.val); 11.36 } 11.37 11.38 11.39 @@ -559,7 +559,7 @@ void set_rsv_reg_field_isr (VCPU *vcpu) 11.40 value = set_isr_ei_ni (vcpu); 11.41 visr.val = visr.val | value; 11.42 11.43 - vmx_vcpu_set_isr (vcpu, visr.val); 11.44 + vcpu_set_isr (vcpu, visr.val); 11.45 } 11.46 11.47 11.48 @@ -580,7 +580,7 @@ void set_illegal_op_isr (VCPU *vcpu) 11.49 value = set_isr_ei_ni (vcpu); 11.50 visr.val = visr.val | value; 11.51 11.52 - vmx_vcpu_set_isr (vcpu, visr.val); 11.53 + vcpu_set_isr (vcpu, visr.val); 11.54 } 11.55 11.56 11.57 @@ -594,7 +594,7 @@ void set_isr_reg_nat_consumption(VCPU *v 11.58 isr.na = non_access; 11.59 isr.r = 1; 11.60 isr.w = 0; 11.61 - vmx_vcpu_set_isr(vcpu, isr.val); 11.62 + vcpu_set_isr(vcpu, isr.val); 11.63 return; 11.64 } 11.65 11.66 @@ -606,7 +606,7 @@ void set_isr_for_priv_fault(VCPU *vcpu, 11.67 isr.val = set_isr_ei_ni(vcpu); 11.68 isr.code = IA64_PRIV_OP_FAULT; 11.69 isr.na = non_access; 11.70 - vmx_vcpu_set_isr(vcpu, isr.val); 11.71 + vcpu_set_isr(vcpu, isr.val); 11.72 11.73 return; 11.74 }
12.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Tue Sep 13 14:20:02 2005 -0600 12.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Wed Sep 14 15:26:35 2005 -0600 12.3 @@ -100,7 +100,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo 12.4 * Since these bits will become 0, after success execution of each 12.5 * instruction, we will change set them to mIA64_PSR 12.6 */ 12.7 - VMX_VPD(vcpu,vpsr) = value & 12.8 + VCPU(vcpu,vpsr) = value & 12.9 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | 12.10 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA 12.11 )); 12.12 @@ -167,7 +167,7 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v 12.13 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA 12.14 )); 12.15 12.16 - VMX_VPD(vcpu, vpsr) = vpsr.val; 12.17 + VCPU(vcpu, vpsr) = vpsr.val; 12.18 12.19 ipsr->val &= 12.20 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | 12.21 @@ -185,7 +185,7 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu) 12.22 vpsr.val = vmx_vcpu_get_psr(vcpu); 12.23 12.24 if(!vpsr.ic) 12.25 - VPD_CR(vcpu,ifs) = regs->cr_ifs; 12.26 + VCPU(vcpu,ifs) = regs->cr_ifs; 12.27 regs->cr_ifs = IA64_IFS_V; 12.28 return (IA64_NO_FAULT); 12.29 } 12.30 @@ -244,7 +244,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI 12.31 #else 12.32 case VRN7: 12.33 vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info, 12.34 - (void *)vcpu->vcpu_info->arch.privregs, 12.35 + (void *)vcpu->arch.privregs, 12.36 ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr ); 12.37 break; 12.38 #endif 12.39 @@ -307,15 +307,15 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu) 12.40 // TODO: Only allowed for current vcpu 12.41 UINT64 ifs, psr; 12.42 REGS *regs = vcpu_regs(vcpu); 12.43 - psr = VPD_CR(vcpu,ipsr); 12.44 + psr = VCPU(vcpu,ipsr); 12.45 vmx_vcpu_set_psr(vcpu,psr); 12.46 - ifs=VPD_CR(vcpu,ifs); 12.47 + ifs=VCPU(vcpu,ifs); 12.48 if((ifs>>63)&&(ifs<<1)){ 12.49 ifs=(regs->cr_ifs)&0x7f; 12.50 regs->rfi_pfs = (ifs<<7)|ifs; 12.51 - regs->cr_ifs = VPD_CR(vcpu,ifs); 12.52 + regs->cr_ifs = VCPU(vcpu,ifs); 12.53 } 12.54 - regs->cr_iip = VPD_CR(vcpu,iip); 12.55 + regs->cr_iip = VCPU(vcpu,iip); 12.56 return (IA64_NO_FAULT); 12.57 } 12.58 12.59 @@ -323,7 +323,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu) 12.60 UINT64 12.61 vmx_vcpu_get_psr(VCPU *vcpu) 12.62 { 12.63 - return VMX_VPD(vcpu,vpsr); 12.64 + return VCPU(vcpu,vpsr); 12.65 } 12.66 12.67 12.68 @@ -334,9 +334,9 @@ vmx_vcpu_get_bgr(VCPU *vcpu, unsigned in 12.69 12.70 vpsr.val = vmx_vcpu_get_psr(vcpu); 12.71 if ( vpsr.bn ) { 12.72 - *val=VMX_VPD(vcpu,vgr[reg-16]); 12.73 + *val=VCPU(vcpu,vgr[reg-16]); 12.74 // Check NAT bit 12.75 - if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) { 12.76 + if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) { 12.77 // TODO 12.78 //panic ("NAT consumption fault\n"); 12.79 return IA64_FAULT; 12.80 @@ -344,8 +344,8 @@ vmx_vcpu_get_bgr(VCPU *vcpu, unsigned in 12.81 12.82 } 12.83 else { 12.84 - *val=VMX_VPD(vcpu,vbgr[reg-16]); 12.85 - if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) { 12.86 + *val=VCPU(vcpu,vbgr[reg-16]); 12.87 + if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) { 12.88 //panic ("NAT consumption fault\n"); 12.89 return IA64_FAULT; 12.90 } 12.91 @@ -360,19 +360,19 @@ vmx_vcpu_set_bgr(VCPU *vcpu, unsigned in 12.92 IA64_PSR vpsr; 12.93 vpsr.val = vmx_vcpu_get_psr(vcpu); 12.94 if ( vpsr.bn ) { 12.95 - VMX_VPD(vcpu,vgr[reg-16]) = val; 12.96 + VCPU(vcpu,vgr[reg-16]) = val; 12.97 if(nat){ 12.98 - VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) ); 12.99 + VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) ); 12.100 }else{ 12.101 - VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) ); 12.102 + VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) ); 12.103 } 12.104 } 12.105 else { 12.106 - VMX_VPD(vcpu,vbgr[reg-16]) = val; 12.107 + VCPU(vcpu,vbgr[reg-16]) = val; 12.108 if(nat){ 12.109 - VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) ); 12.110 + VCPU(vcpu,vnat) |= ( 1UL<<(reg) ); 12.111 }else{ 12.112 - VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) ); 12.113 + VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) ); 12.114 } 12.115 } 12.116 return IA64_NO_FAULT; 12.117 @@ -447,7 +447,7 @@ IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, 12.118 IA64FAULT 12.119 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val) 12.120 { 12.121 - VPD_CR(vcpu,tpr)=val; 12.122 + VCPU(vcpu,tpr)=val; 12.123 vcpu->arch.irq_new_condition = 1; 12.124 return IA64_NO_FAULT; 12.125 }
13.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Tue Sep 13 14:20:02 2005 -0600 13.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Sep 14 15:26:35 2005 -0600 13.3 @@ -180,7 +180,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu 13.4 if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT) 13.5 panic(" get_psr nat bit fault\n"); 13.6 13.7 - val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32)); 13.8 + val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32)); 13.9 #if 0 13.10 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32)))) 13.11 while(1); 13.12 @@ -546,10 +546,10 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS 13.13 } 13.14 #endif // VMAL_NO_FAULT_CHECK 13.15 13.16 - if (vmx_vcpu_get_itir(vcpu,&itir)){ 13.17 + if (vcpu_get_itir(vcpu,&itir)){ 13.18 return(IA64_FAULT); 13.19 } 13.20 - if (vmx_vcpu_get_ifa(vcpu,&ifa)){ 13.21 + if (vcpu_get_ifa(vcpu,&ifa)){ 13.22 return(IA64_FAULT); 13.23 } 13.24 #ifdef VMAL_NO_FAULT_CHECK 13.25 @@ -603,10 +603,10 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS 13.26 } 13.27 #endif // VMAL_NO_FAULT_CHECK 13.28 13.29 - if (vmx_vcpu_get_itir(vcpu,&itir)){ 13.30 + if (vcpu_get_itir(vcpu,&itir)){ 13.31 return(IA64_FAULT); 13.32 } 13.33 - if (vmx_vcpu_get_ifa(vcpu,&ifa)){ 13.34 + if (vcpu_get_ifa(vcpu,&ifa)){ 13.35 return(IA64_FAULT); 13.36 } 13.37 #ifdef VMAL_NO_FAULT_CHECK 13.38 @@ -657,10 +657,10 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN 13.39 } 13.40 #endif // VMAL_NO_FAULT_CHECK 13.41 13.42 - if (vmx_vcpu_get_itir(vcpu,itir)){ 13.43 + if (vcpu_get_itir(vcpu,itir)){ 13.44 return(IA64_FAULT); 13.45 } 13.46 - if (vmx_vcpu_get_ifa(vcpu,ifa)){ 13.47 + if (vcpu_get_ifa(vcpu,ifa)){ 13.48 return(IA64_FAULT); 13.49 } 13.50 #ifdef VMAL_NO_FAULT_CHECK 13.51 @@ -1178,21 +1178,21 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, 13.52 #endif //CHECK_FAULT 13.53 extern u64 cr_igfld_mask(int index, u64 value); 13.54 r2 = cr_igfld_mask(inst.M32.cr3,r2); 13.55 - VMX_VPD(vcpu, vcr[inst.M32.cr3]) = r2; 13.56 + VCPU(vcpu, vcr[inst.M32.cr3]) = r2; 13.57 switch (inst.M32.cr3) { 13.58 case 0: return vmx_vcpu_set_dcr(vcpu,r2); 13.59 case 1: return vmx_vcpu_set_itm(vcpu,r2); 13.60 case 2: return vmx_vcpu_set_iva(vcpu,r2); 13.61 case 8: return vmx_vcpu_set_pta(vcpu,r2); 13.62 - case 16:return vmx_vcpu_set_ipsr(vcpu,r2); 13.63 - case 17:return vmx_vcpu_set_isr(vcpu,r2); 13.64 - case 19:return vmx_vcpu_set_iip(vcpu,r2); 13.65 - case 20:return vmx_vcpu_set_ifa(vcpu,r2); 13.66 - case 21:return vmx_vcpu_set_itir(vcpu,r2); 13.67 - case 22:return vmx_vcpu_set_iipa(vcpu,r2); 13.68 - case 23:return vmx_vcpu_set_ifs(vcpu,r2); 13.69 - case 24:return vmx_vcpu_set_iim(vcpu,r2); 13.70 - case 25:return vmx_vcpu_set_iha(vcpu,r2); 13.71 + case 16:return vcpu_set_ipsr(vcpu,r2); 13.72 + case 17:return vcpu_set_isr(vcpu,r2); 13.73 + case 19:return vcpu_set_iip(vcpu,r2); 13.74 + case 20:return vcpu_set_ifa(vcpu,r2); 13.75 + case 21:return vcpu_set_itir(vcpu,r2); 13.76 + case 22:return vcpu_set_iipa(vcpu,r2); 13.77 + case 23:return vcpu_set_ifs(vcpu,r2); 13.78 + case 24:return vcpu_set_iim(vcpu,r2); 13.79 + case 25:return vcpu_set_iha(vcpu,r2); 13.80 case 64:printk("SET LID to 0x%lx\n", r2); 13.81 return vmx_vcpu_set_lid(vcpu,r2); 13.82 case 65:return IA64_NO_FAULT; 13.83 @@ -1213,10 +1213,13 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, 13.84 13.85 13.86 #define cr_get(cr) \ 13.87 + ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ 13.88 + vmx_vcpu_set_gr(vcpu, tgt, val,0):fault; 13.89 + 13.90 +#define vmx_cr_get(cr) \ 13.91 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ 13.92 vmx_vcpu_set_gr(vcpu, tgt, val,0):fault; 13.93 13.94 - 13.95 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) 13.96 { 13.97 UINT64 tgt = inst.M33.r1; 13.98 @@ -1241,10 +1244,10 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp 13.99 13.100 // from_cr_cnt[inst.M33.cr3]++; 13.101 switch (inst.M33.cr3) { 13.102 - case 0: return cr_get(dcr); 13.103 - case 1: return cr_get(itm); 13.104 - case 2: return cr_get(iva); 13.105 - case 8: return cr_get(pta); 13.106 + case 0: return vmx_cr_get(dcr); 13.107 + case 1: return vmx_cr_get(itm); 13.108 + case 2: return vmx_cr_get(iva); 13.109 + case 8: return vmx_cr_get(pta); 13.110 case 16:return cr_get(ipsr); 13.111 case 17:return cr_get(isr); 13.112 case 19:return cr_get(iip); 13.113 @@ -1254,23 +1257,21 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp 13.114 case 23:return cr_get(ifs); 13.115 case 24:return cr_get(iim); 13.116 case 25:return cr_get(iha); 13.117 -// case 64:val = ia64_getreg(_IA64_REG_CR_LID); 13.118 -// return vmx_vcpu_set_gr(vcpu,tgt,val,0); 13.119 - case 64:return cr_get(lid); 13.120 + case 64:return vmx_cr_get(lid); 13.121 case 65: 13.122 - vmx_vcpu_get_ivr(vcpu,&val); 13.123 - return vmx_vcpu_set_gr(vcpu,tgt,val,0); 13.124 - case 66:return cr_get(tpr); 13.125 + vmx_vcpu_get_ivr(vcpu,&val); 13.126 + return vmx_vcpu_set_gr(vcpu,tgt,val,0); 13.127 + case 66:return vmx_cr_get(tpr); 13.128 case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0); 13.129 - case 68:return cr_get(irr0); 13.130 - case 69:return cr_get(irr1); 13.131 - case 70:return cr_get(irr2); 13.132 - case 71:return cr_get(irr3); 13.133 - case 72:return cr_get(itv); 13.134 - case 73:return cr_get(pmv); 13.135 - case 74:return cr_get(cmcv); 13.136 - case 80:return cr_get(lrr0); 13.137 - case 81:return cr_get(lrr1); 13.138 + case 68:return vmx_cr_get(irr0); 13.139 + case 69:return vmx_cr_get(irr1); 13.140 + case 70:return vmx_cr_get(irr2); 13.141 + case 71:return vmx_cr_get(irr3); 13.142 + case 72:return vmx_cr_get(itv); 13.143 + case 73:return vmx_cr_get(pmv); 13.144 + case 74:return vmx_cr_get(cmcv); 13.145 + case 80:return vmx_cr_get(lrr0); 13.146 + case 81:return vmx_cr_get(lrr1); 13.147 default: 13.148 panic("Read reserved cr register"); 13.149 } 13.150 @@ -1355,7 +1356,7 @@ if ( (cause == 0xff && opcode == 0x1e000 13.151 #else 13.152 inst.inst=opcode; 13.153 #endif /* BYPASS_VMAL_OPCODE */ 13.154 - 13.155 + vcpu_set_regs(vcpu, regs); 13.156 /* 13.157 * Switch to actual virtual rid in rr0 and rr4, 13.158 * which is required by some tlb related instructions.
14.1 --- a/xen/arch/ia64/xen/domain.c Tue Sep 13 14:20:02 2005 -0600 14.2 +++ b/xen/arch/ia64/xen/domain.c Wed Sep 14 15:26:35 2005 -0600 14.3 @@ -194,10 +194,12 @@ void arch_do_createdomain(struct vcpu *v 14.4 while (1); 14.5 } 14.6 memset(d->shared_info, 0, PAGE_SIZE); 14.7 - d->shared_info->vcpu_data[0].arch.privregs = 14.8 +#if 0 14.9 + d->vcpu[0].arch.privregs = 14.10 alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 14.11 - printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs); 14.12 - memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE); 14.13 + printf("arch_vcpu_info=%p\n", d->vcpu[0].arch.privregs); 14.14 + memset(d->vcpu.arch.privregs, 0, PAGE_SIZE); 14.15 +#endif 14.16 v->vcpu_info = &(d->shared_info->vcpu_data[0]); 14.17 14.18 d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME 14.19 @@ -216,7 +218,7 @@ void arch_do_createdomain(struct vcpu *v 14.20 if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL) 14.21 || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL)) 14.22 BUG(); 14.23 - VCPU(v, metaphysical_mode) = 1; 14.24 +// VCPU(v, metaphysical_mode) = 1; 14.25 v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0; 14.26 v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4; 14.27 v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0; 14.28 @@ -284,12 +286,17 @@ int arch_set_info_guest(struct vcpu *v, 14.29 14.30 vmx_setup_platform(v, c); 14.31 } 14.32 - 14.33 + else{ 14.34 + v->arch.privregs = 14.35 + alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 14.36 + printf("arch_vcpu_info=%p\n", v->arch.privregs); 14.37 + memset(v->arch.privregs, 0, PAGE_SIZE); 14.38 + } 14.39 *regs = c->regs; 14.40 new_thread(v, regs->cr_iip, 0, 0); 14.41 14.42 v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector; 14.43 - if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs, 14.44 + if ( c->vcpu.privregs && copy_from_user(v->arch.privregs, 14.45 c->vcpu.privregs, sizeof(mapped_regs_t))) { 14.46 printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs); 14.47 return -EFAULT; 14.48 @@ -309,10 +316,10 @@ void arch_do_boot_vcpu(struct vcpu *v) 14.49 struct domain *d = v->domain; 14.50 printf("arch_do_boot_vcpu: not implemented\n"); 14.51 14.52 - d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 14.53 + d->vcpu[v->vcpu_id]->arch.privregs = 14.54 alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 14.55 - printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[v->vcpu_id].arch.privregs); 14.56 - memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE); 14.57 + printf("arch_vcpu_info=%p\n", d->vcpu[v->vcpu_id]->arch.privregs); 14.58 + memset(d->vcpu[v->vcpu_id]->arch.privregs, 0, PAGE_SIZE); 14.59 return; 14.60 } 14.61 14.62 @@ -357,10 +364,10 @@ void new_thread(struct vcpu *v, 14.63 #ifdef CONFIG_VTI 14.64 vmx_init_all_rr(v); 14.65 if (d == dom0) 14.66 - VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); 14.67 + VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); 14.68 /* Virtual processor context setup */ 14.69 - VMX_VPD(v, vpsr) = IA64_PSR_BN; 14.70 - VPD_CR(v, dcr) = 0; 14.71 + VCPU(v, vpsr) = IA64_PSR_BN; 14.72 + VCPU(v, dcr) = 0; 14.73 #endif 14.74 } else { 14.75 init_all_rr(v); 14.76 @@ -995,6 +1002,12 @@ int construct_dom0(struct domain *d, 14.77 printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d); 14.78 if (vmx_dom0) 14.79 vmx_final_setup_domain(dom0); 14.80 + else{ 14.81 + d->vcpu[0]->arch.privregs = 14.82 + alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 14.83 + printf("arch_vcpu_info=%p\n", d->vcpu[0]->arch.privregs); 14.84 + memset(d->vcpu[0]->arch.privregs, 0, PAGE_SIZE); 14.85 + } 14.86 14.87 set_bit(_DOMF_constructed, &d->domain_flags); 14.88
15.1 --- a/xen/arch/ia64/xen/process.c Tue Sep 13 14:20:02 2005 -0600 15.2 +++ b/xen/arch/ia64/xen/process.c Wed Sep 14 15:26:35 2005 -0600 15.3 @@ -173,7 +173,7 @@ void reflect_interruption(unsigned long 15.4 struct vcpu *v = current; 15.5 15.6 if (vector == IA64_EXTINT_VECTOR) { 15.7 - 15.8 + 15.9 extern unsigned long vcpu_verbose, privop_trace; 15.10 static first_extint = 1; 15.11 if (first_extint) {
16.1 --- a/xen/arch/ia64/xen/regionreg.c Tue Sep 13 14:20:02 2005 -0600 16.2 +++ b/xen/arch/ia64/xen/regionreg.c Wed Sep 14 15:26:35 2005 -0600 16.3 @@ -234,7 +234,7 @@ int set_one_rr(unsigned long rr, unsigne 16.4 newrrv.ve = VHPT_ENABLED_REGION_7; 16.5 newrrv.ps = IA64_GRANULE_SHIFT; 16.6 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, 16.7 - v->vcpu_info->arch.privregs); 16.8 + v->arch.privregs); 16.9 } 16.10 else { 16.11 newrrv.rid = newrid; 16.12 @@ -252,7 +252,7 @@ int set_one_rr(unsigned long rr, unsigne 16.13 newrrv.ps = PAGE_SHIFT; 16.14 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval; 16.15 if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, 16.16 - v->vcpu_info->arch.privregs); 16.17 + v->arch.privregs); 16.18 else set_rr(rr,newrrv.rrval); 16.19 #endif 16.20 return 1;
17.1 --- a/xen/arch/ia64/xen/vcpu.c Tue Sep 13 14:20:02 2005 -0600 17.2 +++ b/xen/arch/ia64/xen/vcpu.c Wed Sep 14 15:26:35 2005 -0600 17.3 @@ -355,7 +355,11 @@ extern unsigned long privop_trace; 17.4 17.5 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval) 17.6 { 17.7 - *pval = PSCBX(vcpu,iva) & ~0x7fffL; 17.8 + if(VMX_DOMAIN(vcpu)){ 17.9 + *pval = PSCB(vcpu,iva) & ~0x7fffL; 17.10 + }else{ 17.11 + *pval = PSCBX(vcpu,iva) & ~0x7fffL; 17.12 + } 17.13 return (IA64_NO_FAULT); 17.14 } 17.15 17.16 @@ -435,7 +439,7 @@ IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT 17.17 UINT64 val = PSCB(vcpu,iipa); 17.18 // SP entry code does not save iipa yet nor does it get 17.19 // properly delivered in the pscb 17.20 - printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n"); 17.21 +// printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n"); 17.22 *pval = val; 17.23 return (IA64_NO_FAULT); 17.24 } 17.25 @@ -480,7 +484,11 @@ extern unsigned long privop_trace; 17.26 17.27 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val) 17.28 { 17.29 - PSCBX(vcpu,iva) = val & ~0x7fffL; 17.30 + if(VMX_DOMAIN(vcpu)){ 17.31 + PSCB(vcpu,iva) = val & ~0x7fffL; 17.32 + }else{ 17.33 + PSCBX(vcpu,iva) = val & ~0x7fffL; 17.34 + } 17.35 return (IA64_NO_FAULT); 17.36 } 17.37 17.38 @@ -539,7 +547,7 @@ IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT 17.39 { 17.40 // SP entry code does not save iipa yet nor does it get 17.41 // properly delivered in the pscb 17.42 - printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n"); 17.43 +// printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n"); 17.44 PSCB(vcpu,iipa) = val; 17.45 return IA64_NO_FAULT; 17.46 } 17.47 @@ -578,11 +586,11 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN 17.48 printf("vcpu_pend_interrupt: bad vector\n"); 17.49 return; 17.50 } 17.51 -#ifdef CONFIG_VTI 17.52 +//#ifdef CONFIG_VTI 17.53 if ( VMX_DOMAIN(vcpu) ) { 17.54 - set_bit(vector,VPD_CR(vcpu,irr)); 17.55 + set_bit(vector,VCPU(vcpu,irr)); 17.56 } else 17.57 -#endif // CONFIG_VTI 17.58 +//#endif // CONFIG_VTI 17.59 { 17.60 /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */ 17.61 if (test_bit(vector,PSCBX(vcpu,irr))) {
18.1 --- a/xen/include/asm-ia64/domain.h Tue Sep 13 14:20:02 2005 -0600 18.2 +++ b/xen/include/asm-ia64/domain.h Wed Sep 14 15:26:35 2005 -0600 18.3 @@ -54,7 +54,7 @@ struct arch_vcpu { 18.4 unsigned long dtlb_pte; 18.5 unsigned long irr[4]; 18.6 unsigned long insvc[4]; 18.7 - unsigned long iva; 18.8 + unsigned long iva; 18.9 unsigned long dcr; 18.10 unsigned long itc; 18.11 unsigned long domain_itm; 18.12 @@ -63,6 +63,7 @@ struct arch_vcpu { 18.13 unsigned long xen_timer_interval; 18.14 #endif 18.15 void *regs; /* temporary until find a better way to do privops */ 18.16 + mapped_regs_t *privregs; /* save the state of vcpu */ 18.17 int metaphysical_rr0; // from arch_domain (so is pinned) 18.18 int metaphysical_rr4; // from arch_domain (so is pinned) 18.19 int metaphysical_saved_rr0; // from arch_domain (so is pinned)
19.1 --- a/xen/include/asm-ia64/vcpu.h Tue Sep 13 14:20:02 2005 -0600 19.2 +++ b/xen/include/asm-ia64/vcpu.h Wed Sep 14 15:26:35 2005 -0600 19.3 @@ -16,7 +16,7 @@ typedef struct vcpu VCPU; 19.4 19.5 typedef cpu_user_regs_t REGS; 19.6 19.7 -#define VCPU(_v,_x) _v->vcpu_info->arch.privregs->_x 19.8 +#define VCPU(_v,_x) _v->arch.privregs->_x 19.9 19.10 #define PRIVOP_ADDR_COUNT 19.11 #ifdef PRIVOP_ADDR_COUNT
20.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Tue Sep 13 14:20:02 2005 -0600 20.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Sep 14 15:26:35 2005 -0600 20.3 @@ -42,14 +42,14 @@ 20.4 #define VRN5 0x5UL 20.5 #define VRN6 0x6UL 20.6 #define VRN7 0x7UL 20.7 - 20.8 +// for vlsapic 20.9 +#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i]) 20.10 // this def for vcpu_regs won't work if kernel stack is present 20.11 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1) 20.12 -#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y) 20.13 +//#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y) 20.14 20.15 #define VMX(x,y) ((x)->arch.arch_vmx.y) 20.16 20.17 -#define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y) 20.18 20.19 #define VMM_RR_SHIFT 20 20.20 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1) 20.21 @@ -129,89 +129,34 @@ extern void memwrite_p(VCPU *vcpu, u64 * 20.22 static inline 20.23 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval) 20.24 { 20.25 - *pval = VPD_CR(vcpu,dcr); 20.26 + *pval = VCPU(vcpu,dcr); 20.27 return (IA64_NO_FAULT); 20.28 } 20.29 20.30 static inline 20.31 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval) 20.32 { 20.33 - *pval = VPD_CR(vcpu,itm); 20.34 + *pval = VCPU(vcpu,itm); 20.35 return (IA64_NO_FAULT); 20.36 } 20.37 20.38 static inline 20.39 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval) 20.40 { 20.41 - *pval = VPD_CR(vcpu,iva); 20.42 + *pval = VCPU(vcpu,iva); 20.43 return (IA64_NO_FAULT); 20.44 } 20.45 static inline 20.46 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval) 20.47 { 20.48 - *pval = VPD_CR(vcpu,pta); 20.49 - return (IA64_NO_FAULT); 20.50 -} 20.51 -static inline 20.52 -IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval) 20.53 -{ 20.54 - *pval = VPD_CR(vcpu,ipsr); 20.55 - return (IA64_NO_FAULT); 20.56 -} 20.57 - 20.58 -static inline 20.59 -IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval) 20.60 -{ 20.61 - *pval = VPD_CR(vcpu,isr); 20.62 - return (IA64_NO_FAULT); 20.63 -} 20.64 -static inline 20.65 -IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval) 20.66 -{ 20.67 - *pval = VPD_CR(vcpu,iip); 20.68 - return (IA64_NO_FAULT); 20.69 -} 20.70 -static inline 20.71 -IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval) 20.72 -{ 20.73 - *pval = VPD_CR(vcpu,ifa); 20.74 + *pval = VCPU(vcpu,pta); 20.75 return (IA64_NO_FAULT); 20.76 } 20.77 20.78 static inline 20.79 -IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval) 20.80 -{ 20.81 - *pval = VPD_CR(vcpu,itir); 20.82 - return (IA64_NO_FAULT); 20.83 -} 20.84 -static inline 20.85 -IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval) 20.86 -{ 20.87 - *pval = VPD_CR(vcpu,iipa); 20.88 - return (IA64_NO_FAULT); 20.89 -} 20.90 -static inline 20.91 -IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval) 20.92 -{ 20.93 - *pval = VPD_CR(vcpu,ifs); 20.94 - return (IA64_NO_FAULT); 20.95 -} 20.96 -static inline 20.97 -IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval) 20.98 -{ 20.99 - *pval = VPD_CR(vcpu,iim); 20.100 - return (IA64_NO_FAULT); 20.101 -} 20.102 -static inline 20.103 -IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval) 20.104 -{ 20.105 - *pval = VPD_CR(vcpu,iha); 20.106 - return (IA64_NO_FAULT); 20.107 -} 20.108 -static inline 20.109 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval) 20.110 { 20.111 - *pval = VPD_CR(vcpu,lid); 20.112 + *pval = VCPU(vcpu,lid); 20.113 return (IA64_NO_FAULT); 20.114 } 20.115 static inline 20.116 @@ -223,7 +168,7 @@ IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, U 20.117 static inline 20.118 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval) 20.119 { 20.120 - *pval = VPD_CR(vcpu,tpr); 20.121 + *pval = VCPU(vcpu,tpr); 20.122 return (IA64_NO_FAULT); 20.123 } 20.124 static inline 20.125 @@ -235,54 +180,54 @@ IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, U 20.126 static inline 20.127 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval) 20.128 { 20.129 - *pval = VPD_CR(vcpu,irr[0]); 20.130 + *pval = VCPU(vcpu,irr[0]); 20.131 return (IA64_NO_FAULT); 20.132 } 20.133 static inline 20.134 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval) 20.135 { 20.136 - *pval = VPD_CR(vcpu,irr[1]); 20.137 + *pval = VCPU(vcpu,irr[1]); 20.138 return (IA64_NO_FAULT); 20.139 } 20.140 static inline 20.141 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval) 20.142 { 20.143 - *pval = VPD_CR(vcpu,irr[2]); 20.144 + *pval = VCPU(vcpu,irr[2]); 20.145 return (IA64_NO_FAULT); 20.146 } 20.147 static inline 20.148 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval) 20.149 { 20.150 - *pval = VPD_CR(vcpu,irr[3]); 20.151 + *pval = VCPU(vcpu,irr[3]); 20.152 return (IA64_NO_FAULT); 20.153 } 20.154 static inline 20.155 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval) 20.156 { 20.157 - *pval = VPD_CR(vcpu,itv); 20.158 + *pval = VCPU(vcpu,itv); 20.159 return (IA64_NO_FAULT); 20.160 } 20.161 static inline 20.162 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval) 20.163 { 20.164 - *pval = VPD_CR(vcpu,pmv); 20.165 + *pval = VCPU(vcpu,pmv); 20.166 return (IA64_NO_FAULT); 20.167 } 20.168 static inline 20.169 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval) 20.170 { 20.171 - *pval = VPD_CR(vcpu,cmcv); 20.172 + *pval = VCPU(vcpu,cmcv); 20.173 return (IA64_NO_FAULT); 20.174 } 20.175 static inline 20.176 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval) 20.177 { 20.178 - *pval = VPD_CR(vcpu,lrr0); 20.179 + *pval = VCPU(vcpu,lrr0); 20.180 return (IA64_NO_FAULT); 20.181 } 20.182 static inline 20.183 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval) 20.184 -{ *pval = VPD_CR(vcpu,lrr1); 20.185 +{ *pval = VCPU(vcpu,lrr1); 20.186 return (IA64_NO_FAULT); 20.187 } 20.188 static inline 20.189 @@ -290,7 +235,7 @@ IA64FAULT 20.190 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val) 20.191 { 20.192 u64 mdcr, mask; 20.193 - VPD_CR(vcpu,dcr)=val; 20.194 + VCPU(vcpu,dcr)=val; 20.195 /* All vDCR bits will go to mDCR, except for be/pp bit */ 20.196 mdcr = ia64_get_dcr(); 20.197 mask = IA64_DCR_BE | IA64_DCR_PP; 20.198 @@ -307,7 +252,7 @@ vmx_vcpu_set_itm(VCPU *vcpu, u64 val) 20.199 vtime_t *vtm; 20.200 20.201 vtm=&(vcpu->arch.arch_vmx.vtm); 20.202 - VPD_CR(vcpu,itm)=val; 20.203 + VCPU(vcpu,itm)=val; 20.204 #ifdef CONFIG_VTI 20.205 vtm_interruption_update(vcpu, vtm); 20.206 #endif 20.207 @@ -317,7 +262,7 @@ static inline 20.208 IA64FAULT 20.209 vmx_vcpu_set_iva(VCPU *vcpu, u64 val) 20.210 { 20.211 - VPD_CR(vcpu,iva)=val; 20.212 + VCPU(vcpu,iva)=val; 20.213 return IA64_NO_FAULT; 20.214 } 20.215 20.216 @@ -325,78 +270,7 @@ static inline 20.217 IA64FAULT 20.218 vmx_vcpu_set_pta(VCPU *vcpu, u64 val) 20.219 { 20.220 - VPD_CR(vcpu,pta)=val; 20.221 - return IA64_NO_FAULT; 20.222 -} 20.223 - 20.224 -static inline 20.225 -IA64FAULT 20.226 -vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val) 20.227 -{ 20.228 - VPD_CR(vcpu,ipsr)=val; 20.229 - return IA64_NO_FAULT; 20.230 -} 20.231 - 20.232 -static inline 20.233 -IA64FAULT 20.234 -vmx_vcpu_set_isr(VCPU *vcpu, u64 val) 20.235 -{ 20.236 - VPD_CR(vcpu,isr)=val; 20.237 - return IA64_NO_FAULT; 20.238 -} 20.239 - 20.240 -static inline 20.241 -IA64FAULT 20.242 -vmx_vcpu_set_iip(VCPU *vcpu, u64 val) 20.243 -{ 20.244 - VPD_CR(vcpu,iip)=val; 20.245 - return IA64_NO_FAULT; 20.246 -} 20.247 - 20.248 -static inline 20.249 -IA64FAULT 20.250 -vmx_vcpu_set_ifa(VCPU *vcpu, u64 val) 20.251 -{ 20.252 - VPD_CR(vcpu,ifa)=val; 20.253 - return IA64_NO_FAULT; 20.254 -} 20.255 - 20.256 -static inline 20.257 -IA64FAULT 20.258 -vmx_vcpu_set_itir(VCPU *vcpu, u64 val) 20.259 -{ 20.260 - VPD_CR(vcpu,itir)=val; 20.261 - return IA64_NO_FAULT; 20.262 -} 20.263 - 20.264 -static inline 20.265 -IA64FAULT 20.266 -vmx_vcpu_set_iipa(VCPU *vcpu, u64 val) 20.267 -{ 20.268 - VPD_CR(vcpu,iipa)=val; 20.269 - return IA64_NO_FAULT; 20.270 -} 20.271 - 20.272 -static inline 20.273 -IA64FAULT 20.274 -vmx_vcpu_set_ifs(VCPU *vcpu, u64 val) 20.275 -{ 20.276 - VPD_CR(vcpu,ifs)=val; 20.277 - return IA64_NO_FAULT; 20.278 -} 20.279 -static inline 20.280 -IA64FAULT 20.281 -vmx_vcpu_set_iim(VCPU *vcpu, u64 val) 20.282 -{ 20.283 - VPD_CR(vcpu,iim)=val; 20.284 - return IA64_NO_FAULT; 20.285 -} 20.286 - 20.287 -static inline 20.288 -IA64FAULT 20.289 -vmx_vcpu_set_iha(VCPU *vcpu, u64 val) 20.290 -{ 20.291 - VPD_CR(vcpu,iha)=val; 20.292 + VCPU(vcpu,pta)=val; 20.293 return IA64_NO_FAULT; 20.294 } 20.295 20.296 @@ -404,7 +278,7 @@ static inline 20.297 IA64FAULT 20.298 vmx_vcpu_set_lid(VCPU *vcpu, u64 val) 20.299 { 20.300 - VPD_CR(vcpu,lid)=val; 20.301 + VCPU(vcpu,lid)=val; 20.302 #ifdef V_IOSAPIC_READY 20.303 vlapic_update_shared_info(vcpu); 20.304 #endif 20.305 @@ -427,7 +301,7 @@ IA64FAULT 20.306 vmx_vcpu_set_itv(VCPU *vcpu, u64 val) 20.307 { 20.308 20.309 - VPD_CR(vcpu,itv)=val; 20.310 + VCPU(vcpu,itv)=val; 20.311 #ifdef CONFIG_VTI 20.312 vtm_set_itv(vcpu); 20.313 #endif 20.314 @@ -437,28 +311,28 @@ static inline 20.315 IA64FAULT 20.316 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val) 20.317 { 20.318 - VPD_CR(vcpu,pmv)=val; 20.319 + VCPU(vcpu,pmv)=val; 20.320 return IA64_NO_FAULT; 20.321 } 20.322 static inline 20.323 IA64FAULT 20.324 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val) 20.325 { 20.326 - VPD_CR(vcpu,cmcv)=val; 20.327 + VCPU(vcpu,cmcv)=val; 20.328 return IA64_NO_FAULT; 20.329 } 20.330 static inline 20.331 IA64FAULT 20.332 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val) 20.333 { 20.334 - VPD_CR(vcpu,lrr0)=val; 20.335 + VCPU(vcpu,lrr0)=val; 20.336 return IA64_NO_FAULT; 20.337 } 20.338 static inline 20.339 IA64FAULT 20.340 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val) 20.341 { 20.342 - VPD_CR(vcpu,lrr1)=val; 20.343 + VCPU(vcpu,lrr1)=val; 20.344 return IA64_NO_FAULT; 20.345 } 20.346 20.347 @@ -502,7 +376,7 @@ IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, 20.348 if(reg > 4){ 20.349 panic("there are only five cpuid registers"); 20.350 } 20.351 - *pval=VMX_VPD(vcpu,vcpuid[reg]); 20.352 + *pval=VCPU(vcpu,vcpuid[reg]); 20.353 return (IA64_NO_FAULT); 20.354 } 20.355 20.356 @@ -583,14 +457,14 @@ static inline 20.357 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu) 20.358 { 20.359 20.360 - VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN; 20.361 + VCPU(vcpu,vpsr) &= ~IA64_PSR_BN; 20.362 return (IA64_NO_FAULT); 20.363 } 20.364 static inline 20.365 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu) 20.366 { 20.367 20.368 - VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN; 20.369 + VCPU(vcpu,vpsr) |= IA64_PSR_BN; 20.370 return (IA64_NO_FAULT); 20.371 } 20.372 #if 0
21.1 --- a/xen/include/asm-ia64/vmx_vpd.h Tue Sep 13 14:20:02 2005 -0600 21.2 +++ b/xen/include/asm-ia64/vmx_vpd.h Wed Sep 14 15:26:35 2005 -0600 21.3 @@ -64,19 +64,19 @@ typedef struct { 21.4 21.5 struct arch_vmx_struct { 21.6 // struct virutal_platform_def vmx_platform; 21.7 - vpd_t *vpd; 21.8 +// vpd_t *vpd; 21.9 vtime_t vtm; 21.10 unsigned long vrr[8]; 21.11 unsigned long vkr[8]; 21.12 - unsigned long mrr5; 21.13 - unsigned long mrr6; 21.14 - unsigned long mrr7; 21.15 +// unsigned long mrr5; 21.16 +// unsigned long mrr6; 21.17 +// unsigned long mrr7; 21.18 unsigned long mpta; 21.19 - unsigned long rfi_pfs; 21.20 - unsigned long rfi_iip; 21.21 - unsigned long rfi_ipsr; 21.22 - unsigned long rfi_ifs; 21.23 - unsigned long in_service[4]; // vLsapic inservice IRQ bits 21.24 +// unsigned long rfi_pfs; 21.25 +// unsigned long rfi_iip; 21.26 +// unsigned long rfi_ipsr; 21.27 +// unsigned long rfi_ifs; 21.28 +// unsigned long in_service[4]; // vLsapic inservice IRQ bits 21.29 unsigned long flags; 21.30 }; 21.31
22.1 --- a/xen/include/public/arch-ia64.h Tue Sep 13 14:20:02 2005 -0600 22.2 +++ b/xen/include/public/arch-ia64.h Wed Sep 14 15:26:35 2005 -0600 22.3 @@ -95,35 +95,37 @@ typedef struct cpu_user_regs{ 22.4 unsigned long r2; /* scratch */ 22.5 unsigned long r3; /* scratch */ 22.6 22.7 -#ifdef CONFIG_VTI 22.8 - unsigned long r4; /* preserved */ 22.9 - unsigned long r5; /* preserved */ 22.10 - unsigned long r6; /* preserved */ 22.11 - unsigned long r7; /* preserved */ 22.12 - unsigned long cr_iipa; /* for emulation */ 22.13 - unsigned long cr_isr; /* for emulation */ 22.14 - unsigned long eml_unat; /* used for emulating instruction */ 22.15 - unsigned long rfi_pfs; /* used for elulating rfi */ 22.16 -#endif 22.17 - 22.18 - /* The following registers are saved by SAVE_REST: */ 22.19 - unsigned long r16; /* scratch */ 22.20 - unsigned long r17; /* scratch */ 22.21 - unsigned long r18; /* scratch */ 22.22 - unsigned long r19; /* scratch */ 22.23 - unsigned long r20; /* scratch */ 22.24 - unsigned long r21; /* scratch */ 22.25 - unsigned long r22; /* scratch */ 22.26 - unsigned long r23; /* scratch */ 22.27 - unsigned long r24; /* scratch */ 22.28 - unsigned long r25; /* scratch */ 22.29 - unsigned long r26; /* scratch */ 22.30 - unsigned long r27; /* scratch */ 22.31 - unsigned long r28; /* scratch */ 22.32 - unsigned long r29; /* scratch */ 22.33 - unsigned long r30; /* scratch */ 22.34 - unsigned long r31; /* scratch */ 22.35 - 22.36 + union { 22.37 + struct { 22.38 + /* The following registers are saved by SAVE_REST: */ 22.39 + unsigned long r16; /* scratch */ 22.40 + unsigned long r17; /* scratch */ 22.41 + unsigned long r18; /* scratch */ 22.42 + unsigned long r19; /* scratch */ 22.43 + unsigned long r20; /* scratch */ 22.44 + unsigned long r21; /* scratch */ 22.45 + unsigned long r22; /* scratch */ 22.46 + unsigned long r23; /* scratch */ 22.47 + unsigned long r24; /* scratch */ 22.48 + unsigned long r25; /* scratch */ 22.49 + unsigned long r26; /* scratch */ 22.50 + unsigned long r27; /* scratch */ 22.51 + unsigned long r28; /* scratch */ 22.52 + unsigned long r29; /* scratch */ 22.53 + unsigned long r30; /* scratch */ 22.54 + unsigned long r31; /* scratch */ 22.55 + }; 22.56 + struct { 22.57 + unsigned long r4; /* preserved */ 22.58 + unsigned long r5; /* preserved */ 22.59 + unsigned long r6; /* preserved */ 22.60 + unsigned long r7; /* preserved */ 22.61 + unsigned long cr_iipa; /* for emulation */ 22.62 + unsigned long cr_isr; /* for emulation */ 22.63 + unsigned long eml_unat; /* used for emulating instruction */ 22.64 + unsigned long rfi_pfs; /* used for elulating rfi */ 22.65 + }; 22.66 + }; 22.67 unsigned long ar_ccv; /* compare/exchange value (scratch) */ 22.68 22.69 /* 22.70 @@ -238,11 +240,13 @@ typedef struct { 22.71 unsigned long tmp[8]; // temp registers (e.g. for hyperprivops) 22.72 }; 22.73 }; 22.74 +#if 0 22.75 #ifdef CONFIG_VTI 22.76 unsigned long reserved6[3456]; 22.77 unsigned long vmm_avail[128]; 22.78 unsigned long reserved7[4096]; 22.79 #endif 22.80 +#endif 22.81 } mapped_regs_t; 22.82 22.83 typedef struct {