ia64/xen-unstable

changeset 7915:6ac2a06e238b

Handle lds on vti domain and fix some bugs on vti domain
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Tue Nov 22 12:17:18 2005 -0600 (2005-11-22)
parents c35a32f96d20
children 90b9e8569dfb
files xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/vmx/vtlb.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Sat Nov 19 15:41:08 2005 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Tue Nov 22 12:17:18 2005 -0600
     1.3 @@ -529,7 +529,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
     1.4      int injected=0;
     1.5      uint64_t    isr;
     1.6      IA64_PSR    vpsr;
     1.7 -
     1.8 +    REGS *regs=vcpu_regs(vcpu);
     1.9      local_irq_save(spsr);
    1.10      h_pending = highest_pending_irq(vcpu);
    1.11      if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
    1.12 @@ -541,7 +541,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
    1.13          isr = vpsr.val & IA64_PSR_RI;
    1.14          if ( !vpsr.ic )
    1.15              panic("Interrupt when IC=0\n");
    1.16 -        vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
    1.17 +        vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
    1.18          injected = 1;
    1.19      }
    1.20      else if ( mask == IRQ_MASKED_BY_INSVC ) {
    1.21 @@ -601,13 +601,13 @@ static void generate_exirq(VCPU *vcpu)
    1.22  {
    1.23      IA64_PSR    vpsr;
    1.24      uint64_t    isr;
    1.25 -    
    1.26 +    REGS *regs=vcpu_regs(vcpu);
    1.27      vpsr.val = vmx_vcpu_get_psr(vcpu);
    1.28      update_vhpi(vcpu, NULL_VECTOR);
    1.29      isr = vpsr.val & IA64_PSR_RI;
    1.30      if ( !vpsr.ic )
    1.31          panic("Interrupt when IC=0\n");
    1.32 -    vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
    1.33 +    vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
    1.34  }
    1.35  
    1.36  vhpi_detection(VCPU *vcpu)
     2.1 --- a/xen/arch/ia64/vmx/vmmu.c	Sat Nov 19 15:41:08 2005 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Tue Nov 22 12:17:18 2005 -0600
     2.3 @@ -438,20 +438,23 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
     2.4      thash_data_t    *tlb;
     2.5      ia64_rr vrr;
     2.6      u64     mfn;
     2.7 -    
     2.8 +
     2.9      if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
    2.10          gpip = gip;
    2.11      }
    2.12      else {
    2.13          vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
    2.14 -        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 
    2.15 +        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
    2.16                  vrr.rid, gip, ISIDE_TLB );
    2.17 -        if ( tlb == NULL ) panic("No entry found in ITLB\n");
    2.18 +        if( tlb == NULL )
    2.19 +             tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
    2.20 +                vrr.rid, gip, DSIDE_TLB );
    2.21 +        if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
    2.22          gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
    2.23      }
    2.24      mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    2.25      if ( mfn == INVALID_MFN ) return 0;
    2.26 -    
    2.27 + 
    2.28      mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
    2.29      *code = *(u64*)__va(mpa);
    2.30      return 1;
     3.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Sat Nov 19 15:41:08 2005 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue Nov 22 12:17:18 2005 -0600
     3.3 @@ -47,6 +47,7 @@
     3.4  #include <asm/kregs.h>
     3.5  #include <asm/vmx.h>
     3.6  #include <asm/vmx_mm_def.h>
     3.7 +#include <asm/vmx_phy_mode.h>
     3.8  #include <xen/mm.h>
     3.9  /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
    3.10  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    3.11 @@ -267,6 +268,12 @@ void leave_hypervisor_tail(struct pt_reg
    3.12  
    3.13  extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
    3.14  
    3.15 +static int vmx_handle_lds(REGS* regs)
    3.16 +{
    3.17 +    regs->cr_ipsr |=IA64_PSR_ED;
    3.18 +    return IA64_FAULT;
    3.19 +}
    3.20 +
    3.21  /* We came here because the H/W VHPT walker failed to find an entry */
    3.22  void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
    3.23  {
    3.24 @@ -294,18 +301,19 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
    3.25          return;
    3.26      }
    3.27  */
    3.28 -
    3.29 -    if((vec==1)&&(!vpsr.it)){
    3.30 -        physical_itlb_miss(v, vadr);
    3.31 -        return;
    3.32 -    }
    3.33 -    if((vec==2)&&(!vpsr.dt)){
    3.34 -        if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
    3.35 -            emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
    3.36 -        }else{
    3.37 -            physical_dtlb_miss(v, vadr);
    3.38 +    if(is_physical_mode(v)&&(!(vadr<<1>>62))){
    3.39 +        if(vec==1){
    3.40 +            physical_itlb_miss(v, vadr);
    3.41 +            return;
    3.42          }
    3.43 -        return;
    3.44 +        if(vec==2){
    3.45 +            if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
    3.46 +                emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
    3.47 +            }else{
    3.48 +                physical_dtlb_miss(v, vadr);
    3.49 +            }
    3.50 +            return;
    3.51 +        }
    3.52      }
    3.53      vrr = vmx_vcpu_rr(v, vadr);
    3.54      if(vec == 1) type = ISIDE_TLB;
    3.55 @@ -336,7 +344,8 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
    3.56              } else{
    3.57                  if(misr.sp){
    3.58                      //TODO  lds emulation
    3.59 -                    panic("Don't support speculation load");
    3.60 +                    //panic("Don't support speculation load");
    3.61 +                    return vmx_handle_lds(regs);
    3.62                  }else{
    3.63                      nested_dtlb(v);
    3.64                      return IA64_FAULT;
    3.65 @@ -353,8 +362,9 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
    3.66                      return IA64_FAULT;
    3.67                  }else{
    3.68                      if(misr.sp){
    3.69 -                        //TODO  lds emulation
    3.70 -                        panic("Don't support speculation load");
    3.71 +                    //TODO  lds emulation
    3.72 +                    //panic("Don't support speculation load");
    3.73 +                    return vmx_handle_lds(regs);
    3.74                      }else{
    3.75                          nested_dtlb(v);
    3.76                          return IA64_FAULT;
    3.77 @@ -367,8 +377,9 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
    3.78                      return IA64_FAULT;
    3.79                  }else{
    3.80                      if(misr.sp){
    3.81 -                        //TODO  lds emulation
    3.82 -                        panic("Don't support speculation load");
    3.83 +                    //TODO  lds emulation
    3.84 +                    //panic("Don't support speculation load");
    3.85 +                    return vmx_handle_lds(regs);
    3.86                      }else{
    3.87                          nested_dtlb(v);
    3.88                          return IA64_FAULT;
     4.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Sat Nov 19 15:41:08 2005 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Tue Nov 22 12:17:18 2005 -0600
     4.3 @@ -835,6 +835,7 @@ IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu,
     4.4  
     4.5  IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
     4.6  {
     4.7 +    return IA64_NO_FAULT;
     4.8      u64 r3,r2;
     4.9  #ifdef  CHECK_FAULT
    4.10      IA64_PSR vpsr;
    4.11 @@ -858,6 +859,7 @@ IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu
    4.12  
    4.13  IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
    4.14  {
    4.15 +    return IA64_NO_FAULT;
    4.16      u64 r3,r2;
    4.17  #ifdef  CHECK_FAULT
    4.18      IA64_PSR vpsr;
    4.19 @@ -1272,8 +1274,7 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
    4.20          case 74:return vmx_cr_get(cmcv);
    4.21          case 80:return vmx_cr_get(lrr0);
    4.22          case 81:return vmx_cr_get(lrr1);
    4.23 -        default:
    4.24 -            panic("Read reserved cr register");
    4.25 +        default: return IA64_NO_FAULT;
    4.26      }
    4.27  }
    4.28  
     5.1 --- a/xen/arch/ia64/vmx/vtlb.c	Sat Nov 19 15:41:08 2005 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Tue Nov 22 12:17:18 2005 -0600
     5.3 @@ -391,6 +391,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_
     5.4  #if 1
     5.5      vrr=vmx_vcpu_rr(current, va);
     5.6      if (vrr.ps != entry->ps) {
     5.7 +        machine_tlb_insert(hcb->vcpu, entry);
     5.8  	printk("not preferred ps with va: 0x%lx\n", va);
     5.9  	return;
    5.10      }