ia64/xen-unstable

changeset 10084:1d2e4a873003

SVM patch to add a host save area per core for the hypervisor and also
for the microcode. The microcode area is not guaranteed to be
compatible with the vmcb layout, therefore will require it's own
"scratch pad". Consolidate the per core areas into a single structure.

Signed-off-by: Tom Woller <thomas.woller@amd.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 18 00:03:13 2006 +0100 (2006-05-18)
parents 0fbec6836905
children 56444cd2805c
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/include/asm-x86/hvm/svm/svm.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu May 18 00:01:59 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu May 18 00:03:13 2006 +0100
     1.3 @@ -82,9 +82,11 @@ void svm_dump_regs(const char *from, str
     1.4  
     1.5  static void svm_relinquish_guest_resources(struct domain *d);
     1.6  
     1.7 -/* Host save area */
     1.8 -struct host_save_area *host_save_area[ NR_CPUS ] = {0};
     1.9 -static struct asid_pool ASIDpool[NR_CPUS];
    1.10 +
    1.11 +extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
    1.12 +
    1.13 +/* Host save area and ASID glogal data */
    1.14 +struct svm_percore_globals svm_globals[NR_CPUS];
    1.15  
    1.16  /*
    1.17   * Initializes the POOL of ASID used by the guests per core.
    1.18 @@ -92,15 +94,15 @@ static struct asid_pool ASIDpool[NR_CPUS
    1.19  void asidpool_init( int core )
    1.20  {
    1.21      int i;
    1.22 -    ASIDpool[core].asid_lock = SPIN_LOCK_UNLOCKED;
    1.23 -    spin_lock(&ASIDpool[core].asid_lock);
    1.24 +    svm_globals[core].ASIDpool.asid_lock = SPIN_LOCK_UNLOCKED;
    1.25 +    spin_lock(&svm_globals[core].ASIDpool.asid_lock);
    1.26      /* Host ASID is always in use */
    1.27 -    ASIDpool[core].asid[INITIAL_ASID] = ASID_INUSE;
    1.28 +    svm_globals[core].ASIDpool.asid[INITIAL_ASID] = ASID_INUSE;
    1.29      for( i=1; i<ASID_MAX; i++ )
    1.30      {
    1.31 -       ASIDpool[core].asid[i] = ASID_AVAILABLE;
    1.32 +       svm_globals[core].ASIDpool.asid[i] = ASID_AVAILABLE;
    1.33      }
    1.34 -    spin_unlock(&ASIDpool[core].asid_lock);
    1.35 +    spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
    1.36  }
    1.37  
    1.38  
    1.39 @@ -110,10 +112,10 @@ static int asidpool_fetch_next( struct v
    1.40      int i;   
    1.41      for( i = 1; i < ASID_MAX; i++ )
    1.42      {
    1.43 -        if( ASIDpool[core].asid[i] == ASID_AVAILABLE )
    1.44 +        if( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
    1.45          {
    1.46              vmcb->guest_asid = i;
    1.47 -            ASIDpool[core].asid[i] = ASID_INUSE;
    1.48 +            svm_globals[core].ASIDpool.asid[i] = ASID_INUSE;
    1.49              return i;
    1.50          }
    1.51      }
    1.52 @@ -138,36 +140,36 @@ int asidpool_assign_next( struct vmcb_st
    1.53      int res = 1;
    1.54      static unsigned long cnt=0;
    1.55  
    1.56 -    spin_lock(&ASIDpool[oldcore].asid_lock);
    1.57 +    spin_lock(&svm_globals[oldcore].ASIDpool.asid_lock);
    1.58      if( retire_current && vmcb->guest_asid ) {
    1.59 -       ASIDpool[oldcore].asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
    1.60 +       svm_globals[oldcore].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
    1.61      }
    1.62 -    spin_unlock(&ASIDpool[oldcore].asid_lock);
    1.63 -    spin_lock(&ASIDpool[newcore].asid_lock);
    1.64 +    spin_unlock(&svm_globals[oldcore].ASIDpool.asid_lock);
    1.65 +    spin_lock(&svm_globals[newcore].ASIDpool.asid_lock);
    1.66      if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
    1.67          if (svm_dbg_on)
    1.68              printk( "SVM: tlb(%ld)\n", cnt++ );
    1.69          /* FLUSH the TLB and all retired slots are made available */ 
    1.70          vmcb->tlb_control = 1;
    1.71          for( i = 1; i < ASID_MAX; i++ ) {
    1.72 -            if( ASIDpool[newcore].asid[i] == ASID_RETIRED ) {
    1.73 -                ASIDpool[newcore].asid[i] = ASID_AVAILABLE;
    1.74 +            if( svm_globals[newcore].ASIDpool.asid[i] == ASID_RETIRED ) {
    1.75 +                svm_globals[newcore].ASIDpool.asid[i] = ASID_AVAILABLE;
    1.76              }
    1.77          }
    1.78          /* Get the First slot available */ 
    1.79          res = asidpool_fetch_next( vmcb, newcore ) > 0;
    1.80      }
    1.81 -    spin_unlock(&ASIDpool[newcore].asid_lock);
    1.82 +    spin_unlock(&svm_globals[newcore].ASIDpool.asid_lock);
    1.83      return res;
    1.84  }
    1.85  
    1.86  void asidpool_retire( struct vmcb_struct *vmcb, int core )
    1.87  {
    1.88 -   spin_lock(&ASIDpool[core].asid_lock);
    1.89 +   spin_lock(&svm_globals[core].ASIDpool.asid_lock);
    1.90     if( vmcb->guest_asid ) {
    1.91 -       ASIDpool[core].asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
    1.92 +       svm_globals[core].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
    1.93     }
    1.94 -   spin_unlock(&ASIDpool[core].asid_lock);
    1.95 +   spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
    1.96  }
    1.97  
    1.98  static inline void svm_inject_exception(struct vcpu *v, int trap, int ev, int error_code)
    1.99 @@ -198,8 +200,13 @@ void stop_svm(void)
   1.100      wrmsr(MSR_EFER, eax, edx);
   1.101   
   1.102      /* release the HSA */
   1.103 -    free_host_save_area( host_save_area[ cpu ] );
   1.104 -    host_save_area[ cpu ] = NULL;
   1.105 +    free_host_save_area( svm_globals[cpu].hsa );
   1.106 +    free_host_save_area( svm_globals[cpu].scratch_hsa );
   1.107 +    svm_globals[cpu].hsa    = NULL;
   1.108 +    svm_globals[cpu].hsa_pa = 0;
   1.109 +    svm_globals[cpu].scratch_hsa    = NULL;
   1.110 +    svm_globals[cpu].scratch_hsa_pa = 0;
   1.111 +    wrmsr(MSR_K8_VM_HSAVE_PA, 0, 0 );
   1.112  
   1.113      printk("AMD SVM Extension is disabled.\n");
   1.114  }
   1.115 @@ -455,16 +462,20 @@ int start_svm(void)
   1.116      rdmsr(MSR_EFER, eax, edx);
   1.117      eax |= EFER_SVME;
   1.118      wrmsr(MSR_EFER, eax, edx);
   1.119 -    asidpool_init(smp_processor_id());    
   1.120 +    asidpool_init( cpu );    
   1.121      printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
   1.122  
   1.123      /* Initialize the HSA for this core */
   1.124 -    host_save_area[ cpu ] = alloc_host_save_area();
   1.125 -    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
   1.126 +    svm_globals[cpu].hsa = alloc_host_save_area();
   1.127 +    phys_hsa = (u64) virt_to_maddr( svm_globals[cpu].hsa ); 
   1.128      phys_hsa_lo = (u32) phys_hsa;
   1.129      phys_hsa_hi = (u32) (phys_hsa >> 32);    
   1.130      wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   1.131 -    
   1.132 +    svm_globals[cpu].hsa_pa = phys_hsa;
   1.133 +  
   1.134 +    svm_globals[cpu].scratch_hsa    = alloc_host_save_area();
   1.135 +    svm_globals[cpu].scratch_hsa_pa = (u64)virt_to_maddr( svm_globals[cpu].scratch_hsa );
   1.136 +
   1.137      /* Setup HVM interfaces */
   1.138      hvm_funcs.disable = stop_svm;
   1.139  
   1.140 @@ -888,8 +899,7 @@ static void svm_do_general_protection_fa
   1.141              (unsigned long)regs->eax, (unsigned long)regs->ebx,
   1.142              (unsigned long)regs->ecx, (unsigned long)regs->edx,
   1.143              (unsigned long)regs->esi, (unsigned long)regs->edi);
   1.144 -
   1.145 -    
   1.146 +      
   1.147      /* Reflect it back into the guest */
   1.148      svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
   1.149  }
   1.150 @@ -2903,6 +2913,9 @@ asmlinkage void svm_asid(void)
   1.151          v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
   1.152          clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
   1.153      }
   1.154 +
   1.155 +    /* make sure the HSA is set for the current core */
   1.156 +    set_hsa_to_guest( &v->arch.hvm_svm );
   1.157  }
   1.158  
   1.159  /*
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu May 18 00:01:59 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu May 18 00:03:13 2006 +0100
     2.3 @@ -36,7 +36,7 @@
     2.4  #include <xen/kernel.h>
     2.5  #include <xen/domain_page.h>
     2.6  
     2.7 -extern struct host_save_area *host_save_area[];
     2.8 +extern struct svm_percore_globals svm_globals[];
     2.9  extern int svm_dbg_on;
    2.10  extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    2.11                                    int oldcore, int newcore);
    2.12 @@ -430,8 +430,7 @@ void svm_do_launch(struct vcpu *v)
    2.13  
    2.14  void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
    2.15  {
    2.16 -    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
    2.17 -    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
    2.18 +  arch_svm->host_save_pa = svm_globals[ smp_processor_id() ].scratch_hsa_pa;
    2.19  }
    2.20  
    2.21  /* 
    2.22 @@ -445,9 +444,6 @@ void svm_do_resume(struct vcpu *v)
    2.23  
    2.24      svm_stts(v);
    2.25  
    2.26 -    /* make sure the HSA is set for the current core */
    2.27 -    set_hsa_to_guest( &v->arch.hvm_svm );
    2.28 -    
    2.29      /* pick up the elapsed PIT ticks and re-enable pit_timer */
    2.30      if ( time_info->first_injected ) {
    2.31          if ( v->domain->arch.hvm_domain.guest_time ) {
     3.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Thu May 18 00:01:59 2006 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Thu May 18 00:03:13 2006 +0100
     3.3 @@ -70,6 +70,14 @@ struct asid_pool {
     3.4      u32 asid[ASID_MAX];
     3.5  };
     3.6  
     3.7 +struct svm_percore_globals {
     3.8 +  void *hsa;
     3.9 +  u64  hsa_pa;
    3.10 +  void *scratch_hsa;
    3.11 +  u64  scratch_hsa_pa;
    3.12 +  struct asid_pool ASIDpool;
    3.13 +};
    3.14 +
    3.15  #define SVM_REG_EAX (0) 
    3.16  #define SVM_REG_ECX (1) 
    3.17  #define SVM_REG_EDX (2)