ia64/xen-unstable

changeset 13556:baa9b76ea3e1

[SVM] Remove ASID logic. Errata prevent this feature being used
reliably in current SVM processor implementations.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Jan 22 14:13:26 2007 +0000 (2007-01-22)
parents 1c0ca58e8c16
children 207523704fb1
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/hvm/svm/vmcb.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Jan 22 13:38:04 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Jan 22 14:13:26 2007 +0000
     1.3 @@ -74,108 +74,6 @@ static void *root_vmcb[NR_CPUS] __read_m
     1.4  /* physical address of above for host VMSAVE/VMLOAD */
     1.5  u64 root_vmcb_pa[NR_CPUS] __read_mostly;
     1.6  
     1.7 -
     1.8 -/* ASID API */
     1.9 -enum {
    1.10 -    ASID_AVAILABLE = 0,
    1.11 -    ASID_INUSE,
    1.12 -    ASID_RETIRED
    1.13 -};
    1.14 -#define   INITIAL_ASID      0
    1.15 -#define   ASID_MAX          64
    1.16 - 
    1.17 -struct asid_pool {
    1.18 -    spinlock_t asid_lock;
    1.19 -    u32 asid[ASID_MAX];
    1.20 -};
    1.21 -
    1.22 -static DEFINE_PER_CPU(struct asid_pool, asid_pool);
    1.23 -
    1.24 -
    1.25 -/*
    1.26 - * Initializes the POOL of ASID used by the guests per core.
    1.27 - */
    1.28 -void asidpool_init(int core)
    1.29 -{
    1.30 -    int i;
    1.31 -
    1.32 -    spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
    1.33 -
    1.34 -    /* Host ASID is always in use */
    1.35 -    per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
    1.36 -    for ( i = 1; i < ASID_MAX; i++ )
    1.37 -        per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
    1.38 -}
    1.39 -
    1.40 -
    1.41 -/* internal function to get the next available ASID */
    1.42 -static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
    1.43 -{
    1.44 -    int i;  
    1.45 -    for ( i = 1; i < ASID_MAX; i++ )
    1.46 -    {
    1.47 -        if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
    1.48 -        {
    1.49 -            vmcb->guest_asid = i;
    1.50 -            per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
    1.51 -            return i;
    1.52 -        }
    1.53 -    }
    1.54 -    return -1;
    1.55 -}
    1.56 -
    1.57 -
    1.58 -/*
    1.59 - * This functions assigns on the passed VMCB, the next
    1.60 - * available ASID number. If none are available, the
    1.61 - * TLB flush flag is set, and all retireds ASID
    1.62 - * are made available. 
    1.63 - *
    1.64 - *  Returns: 1 -- sucess;
    1.65 - *           0 -- failure -- no more ASID numbers 
    1.66 - *                           available.
    1.67 - */
    1.68 -int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    1.69 -                          int oldcore, int newcore )
    1.70 -{
    1.71 -    int i;
    1.72 -    int res = 1;
    1.73 -    static unsigned long cnt=0;
    1.74 -
    1.75 -    spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
    1.76 -    if( retire_current && vmcb->guest_asid ) {
    1.77 -        per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
    1.78 -            ASID_RETIRED;
    1.79 -    }
    1.80 -    spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
    1.81 -    spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
    1.82 -    if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
    1.83 -        if (svm_dbg_on)
    1.84 -            printk( "SVM: tlb(%ld)\n", cnt++ );
    1.85 -        /* FLUSH the TLB and all retired slots are made available */ 
    1.86 -        vmcb->tlb_control = 1;
    1.87 -        for( i = 1; i < ASID_MAX; i++ ) {
    1.88 -            if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
    1.89 -                per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
    1.90 -            }
    1.91 -        }
    1.92 -        /* Get the First slot available */ 
    1.93 -        res = asidpool_fetch_next( vmcb, newcore ) > 0;
    1.94 -    }
    1.95 -    spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
    1.96 -    return res;
    1.97 -}
    1.98 -
    1.99 -void asidpool_retire( struct vmcb_struct *vmcb, int core )
   1.100 -{
   1.101 -    spin_lock(&per_cpu(asid_pool,core).asid_lock);
   1.102 -    if( vmcb->guest_asid ) {
   1.103 -        per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
   1.104 -            ASID_RETIRED;
   1.105 -    }
   1.106 -    spin_unlock(&per_cpu(asid_pool,core).asid_lock);
   1.107 -}
   1.108 -
   1.109  static inline void svm_inject_exception(struct vcpu *v, int trap, 
   1.110                                          int ev, int error_code)
   1.111  {
   1.112 @@ -851,7 +749,6 @@ int start_svm(void)
   1.113      rdmsr(MSR_EFER, eax, edx);
   1.114      eax |= EFER_SVME;
   1.115      wrmsr(MSR_EFER, eax, edx);
   1.116 -    asidpool_init( cpu );    
   1.117      printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
   1.118  
   1.119      /* Initialize the HSA for this core */
   1.120 @@ -920,28 +817,11 @@ void arch_svm_do_resume(struct vcpu *v)
   1.121  
   1.122  static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
   1.123  {
   1.124 -    struct vcpu *v = current;
   1.125 -    unsigned long eip;
   1.126 -    int result;
   1.127 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   1.128 -
   1.129 -    ASSERT(vmcb);
   1.130 -
   1.131 -//#if HVM_DEBUG
   1.132 -    eip = vmcb->rip;
   1.133      HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   1.134                  "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
   1.135 -                va, eip, (unsigned long)regs->error_code);
   1.136 -//#endif
   1.137 -
   1.138 -    result = shadow_fault(va, regs); 
   1.139 -
   1.140 -    if( result ) {
   1.141 -        /* Let's make sure that the Guest TLB is flushed */
   1.142 -        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.143 -    }
   1.144 -
   1.145 -    return result;
   1.146 +                va, (unsigned long)current->arch.hvm_svm.vmcb->rip,
   1.147 +                (unsigned long)regs->error_code);
   1.148 +    return shadow_fault(va, regs); 
   1.149  }
   1.150  
   1.151  
   1.152 @@ -1578,8 +1458,6 @@ static int svm_set_cr0(unsigned long val
   1.153  
   1.154          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
   1.155                      (unsigned long) (mfn << PAGE_SHIFT));
   1.156 -
   1.157 -        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.158      }
   1.159  
   1.160      if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
   1.161 @@ -1600,7 +1478,6 @@ static int svm_set_cr0(unsigned long val
   1.162              return 0;
   1.163          }
   1.164          shadow_update_paging_modes(v);
   1.165 -        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.166      }
   1.167      else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
   1.168      {
   1.169 @@ -1611,7 +1488,6 @@ static int svm_set_cr0(unsigned long val
   1.170          }
   1.171          /* we should take care of this kind of situation */
   1.172          shadow_update_paging_modes(v);
   1.173 -        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.174      }
   1.175  
   1.176      return 1;
   1.177 @@ -1702,7 +1578,6 @@ static int mov_to_cr(int gpreg, int cr, 
   1.178              v->arch.hvm_svm.cpu_cr3 = value;
   1.179              break;
   1.180          }
   1.181 -        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.182  
   1.183          /* We make a new one if the shadow does not exist. */
   1.184          if (value == v->arch.hvm_svm.cpu_cr3) 
   1.185 @@ -1795,10 +1670,7 @@ static int mov_to_cr(int gpreg, int cr, 
   1.186           * all TLB entries except global entries.
   1.187           */
   1.188          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
   1.189 -        {
   1.190 -            set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.191              shadow_update_paging_modes(v);
   1.192 -        }
   1.193          break;
   1.194  
   1.195      case 8:
   1.196 @@ -2140,8 +2012,6 @@ void svm_handle_invlpg(const short invlp
   1.197          __update_guest_eip (vmcb, inst_len);
   1.198      }
   1.199  
   1.200 -    /* Overkill, we may not this */
   1.201 -    set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
   1.202      shadow_invlpg(v, g_vaddr);
   1.203  }
   1.204  
   1.205 @@ -2892,31 +2762,6 @@ asmlinkage void svm_load_cr2(void)
   1.206      local_irq_disable();
   1.207      asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
   1.208  }
   1.209 -
   1.210 -asmlinkage void svm_asid(void)
   1.211 -{
   1.212 -    struct vcpu *v = current;
   1.213 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   1.214 -
   1.215 -    /*
   1.216 -     * if need to assign new asid, or if switching cores,
   1.217 -     * retire asid for the old core, and assign a new asid to the current core.
   1.218 -     */
   1.219 -    if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
   1.220 -         ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
   1.221 -        /* recycle asid */
   1.222 -        if ( !asidpool_assign_next(vmcb, 1,
   1.223 -                                   v->arch.hvm_svm.asid_core,
   1.224 -                                   v->arch.hvm_svm.launch_core) )
   1.225 -        {
   1.226 -            /* If we get here, we have a major problem */
   1.227 -            domain_crash_synchronous();
   1.228 -        }
   1.229 -
   1.230 -        v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
   1.231 -        clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
   1.232 -    }
   1.233 -}
   1.234    
   1.235  /*
   1.236   * Local variables:
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Mon Jan 22 13:38:04 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Mon Jan 22 14:13:26 2007 +0000
     2.3 @@ -38,8 +38,6 @@
     2.4  #include <xen/keyhandler.h>
     2.5  
     2.6  extern int svm_dbg_on;
     2.7 -extern int asidpool_assign_next(
     2.8 -    struct vmcb_struct *vmcb, int retire_current, int oldcore, int newcore);
     2.9  
    2.10  #define GUEST_SEGMENT_LIMIT 0xffffffff
    2.11  
    2.12 @@ -92,8 +90,9 @@ static int construct_vmcb(struct vcpu *v
    2.13      struct vmcb_struct *vmcb = arch_svm->vmcb;
    2.14      svm_segment_attributes_t attrib;
    2.15  
    2.16 -    /* Always flush the TLB on VMRUN. */
    2.17 +    /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */
    2.18      vmcb->tlb_control = 1;
    2.19 +    vmcb->guest_asid  = 1;
    2.20  
    2.21      /* SVM intercepts. */
    2.22      vmcb->general1_intercepts = 
    2.23 @@ -240,10 +239,7 @@ void svm_destroy_vmcb(struct vcpu *v)
    2.24      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
    2.25  
    2.26      if ( arch_svm->vmcb != NULL )
    2.27 -    {
    2.28 -        asidpool_retire(arch_svm->vmcb, arch_svm->asid_core);
    2.29          free_vmcb(arch_svm->vmcb);
    2.30 -    }
    2.31  
    2.32      if ( arch_svm->iopm != NULL )
    2.33      {
    2.34 @@ -264,16 +260,10 @@ void svm_destroy_vmcb(struct vcpu *v)
    2.35  
    2.36  void svm_do_launch(struct vcpu *v)
    2.37  {
    2.38 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.39 -    int core = smp_processor_id();
    2.40 -
    2.41      hvm_stts(v);
    2.42  
    2.43      /* current core is the one we intend to perform the VMRUN on */
    2.44 -    v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
    2.45 -    clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
    2.46 -    if ( !asidpool_assign_next(vmcb, 0, core, core) )
    2.47 -        BUG();
    2.48 +    v->arch.hvm_svm.launch_core = smp_processor_id();
    2.49  
    2.50      v->arch.schedule_tail = arch_svm_do_resume;
    2.51  }
     3.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Mon Jan 22 13:38:04 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Mon Jan 22 14:13:26 2007 +0000
     3.3 @@ -150,7 +150,6 @@ svm_test_all_events:
     3.4          jnz  svm_process_softirqs
     3.5  svm_restore_all_guest:
     3.6          call svm_intr_assist
     3.7 -        call svm_asid
     3.8          call svm_load_cr2
     3.9          /* 
    3.10           * Check if we are going back to AMD-V based VM
     4.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Mon Jan 22 13:38:04 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Mon Jan 22 14:13:26 2007 +0000
     4.3 @@ -163,7 +163,6 @@ svm_test_all_events:
     4.4          jnz   svm_process_softirqs
     4.5  svm_restore_all_guest:
     4.6          call svm_intr_assist
     4.7 -        call svm_asid
     4.8          call svm_load_cr2
     4.9          /*
    4.10           * Check if we are going back to AMD-V based VM
     5.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Mon Jan 22 13:38:04 2007 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Mon Jan 22 14:13:26 2007 +0000
     5.3 @@ -28,7 +28,6 @@
     5.4  #include <asm/hvm/svm/vmcb.h>
     5.5  #include <asm/i387.h>
     5.6  
     5.7 -extern void asidpool_retire(struct vmcb_struct *vmcb, int core);
     5.8  extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
     5.9  extern void svm_do_launch(struct vcpu *v);
    5.10  extern void arch_svm_do_resume(struct vcpu *v);
     6.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Jan 22 13:38:04 2007 +0000
     6.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Jan 22 14:13:26 2007 +0000
     6.3 @@ -457,7 +457,6 @@ struct arch_svm_struct {
     6.4      u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
     6.5      int                 saved_irq_vector;
     6.6      u32                 launch_core;
     6.7 -    u32                 asid_core;
     6.8      
     6.9      unsigned long       flags;      /* VMCB flags */
    6.10      unsigned long       cpu_shadow_cr0; /* Guest value for CR0 */
    6.11 @@ -477,17 +476,6 @@ void svm_destroy_vmcb(struct vcpu *v);
    6.12  
    6.13  void setup_vmcb_dump(void);
    6.14  
    6.15 -#define VMCB_USE_HOST_ENV       1
    6.16 -#define VMCB_USE_SEPARATE_ENV   0
    6.17 -
    6.18 -enum {
    6.19 -    ARCH_SVM_VMCB_LOADED = 0,
    6.20 -    ARCH_SVM_VMCB_ASSIGN_ASID
    6.21 -};
    6.22 -
    6.23 -#define VMCB_EFLAGS_RESERVED_0          0xffc08028 /* bitmap for 0 */
    6.24 -#define VMCB_EFLAGS_RESERVED_1          0x00000002 /* bitmap for 1 */
    6.25 -
    6.26  /* These bits in the CR4 are owned by the host */
    6.27  #if CONFIG_PAGING_LEVELS >= 3
    6.28  #define SVM_CR4_HOST_MASK (X86_CR4_PAE)