ia64/xen-unstable

changeset 9929:e1a47a269600

SVM patch to cleanup the host save area allocation and deallocation,
including removing memory leaks concerning these areas. Also fixes
problem where the HSA MSR was not initialized properly for cores>0.

Signed-off-by: Tom Woller <thomas.woller@amd.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 04 11:14:45 2006 +0100 (2006-05-04)
parents bbce4d115189
children 62c8e97d56cf
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu May 04 10:25:27 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu May 04 11:14:45 2006 +0100
     1.3 @@ -79,6 +79,8 @@ void svm_dump_regs(const char *from, str
     1.4  
     1.5  static void svm_relinquish_guest_resources(struct domain *d);
     1.6  
     1.7 +/* Host save area */
     1.8 +struct host_save_area *host_save_area[ NR_CPUS ] = {0};
     1.9  static struct asid_pool ASIDpool[NR_CPUS];
    1.10  
    1.11  /*
    1.12 @@ -185,11 +187,16 @@ static inline void svm_inject_exception(
    1.13  void stop_svm(void)
    1.14  {
    1.15      u32 eax, edx;    
    1.16 +    int cpu = smp_processor_id();
    1.17  
    1.18      /* We turn off the EFER_SVME bit. */
    1.19      rdmsr(MSR_EFER, eax, edx);
    1.20      eax &= ~EFER_SVME;
    1.21      wrmsr(MSR_EFER, eax, edx);
    1.22 + 
    1.23 +    /* release the HSA */
    1.24 +    free_host_save_area( host_save_area[ cpu ] );
    1.25 +    host_save_area[ cpu ] = NULL;
    1.26  
    1.27      printk("AMD SVM Extension is disabled.\n");
    1.28  }
    1.29 @@ -431,8 +438,11 @@ unsigned long svm_get_ctrl_reg(struct vc
    1.30  int start_svm(void)
    1.31  {
    1.32      u32 eax, ecx, edx;
    1.33 -    
    1.34 -    /* Xen does not fill x86_capability words except 0. */
    1.35 +    u32 phys_hsa_lo, phys_hsa_hi;   
    1.36 +    u64 phys_hsa;
    1.37 +    int cpu = smp_processor_id();
    1.38 + 
    1.39 +   /* Xen does not fill x86_capability words except 0. */
    1.40      ecx = cpuid_ecx(0x80000001);
    1.41      boot_cpu_data.x86_capability[5] = ecx;
    1.42      
    1.43 @@ -443,7 +453,14 @@ int start_svm(void)
    1.44      eax |= EFER_SVME;
    1.45      wrmsr(MSR_EFER, eax, edx);
    1.46      asidpool_init(smp_processor_id());    
    1.47 -    printk("AMD SVM Extension is enabled for cpu %d.\n", smp_processor_id());
    1.48 +    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
    1.49 +
    1.50 +    /* Initialize the HSA for this core */
    1.51 +    host_save_area[ cpu ] = alloc_host_save_area();
    1.52 +    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
    1.53 +    phys_hsa_lo = (u32) phys_hsa;
    1.54 +    phys_hsa_hi = (u32) (phys_hsa >> 32);    
    1.55 +    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
    1.56      
    1.57      /* Setup HVM interfaces */
    1.58      hvm_funcs.disable = stop_svm;
    1.59 @@ -546,20 +563,6 @@ void save_svm_cpu_user_regs(struct vcpu 
    1.60      ctxt->ds = vmcb->ds.sel;
    1.61  }
    1.62  
    1.63 -#if defined (__x86_64__)
    1.64 -void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v )
    1.65 -{
    1.66 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.67 -
    1.68 -    regs->rip    = vmcb->rip;
    1.69 -    regs->rsp    = vmcb->rsp;
    1.70 -    regs->rflags = vmcb->rflags;
    1.71 -    regs->cs     = vmcb->cs.sel;
    1.72 -    regs->ds     = vmcb->ds.sel;
    1.73 -    regs->es     = vmcb->es.sel;
    1.74 -    regs->ss     = vmcb->ss.sel;
    1.75 -}
    1.76 -#elif defined (__i386__)
    1.77  void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
    1.78  {
    1.79      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.80 @@ -571,11 +574,11 @@ void svm_store_cpu_user_regs(struct cpu_
    1.81      regs->ds     = vmcb->ds.sel;
    1.82      regs->es     = vmcb->es.sel;
    1.83      regs->ss     = vmcb->ss.sel;
    1.84 +    regs->fs     = vmcb->fs.sel;
    1.85 +    regs->gs     = vmcb->gs.sel;
    1.86  }
    1.87 -#endif
    1.88  
    1.89  /* XXX Use svm_load_cpu_guest_regs instead */
    1.90 -#if defined (__i386__)
    1.91  void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
    1.92  { 
    1.93      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.94 @@ -588,30 +591,17 @@ void svm_load_cpu_user_regs(struct vcpu 
    1.95      vmcb->rflags   = regs->eflags;
    1.96      vmcb->cs.sel   = regs->cs;
    1.97      vmcb->rip      = regs->eip;
    1.98 +
    1.99 +    vmcb->ds.sel   = regs->ds;
   1.100 +    vmcb->es.sel   = regs->es;
   1.101 +    vmcb->fs.sel   = regs->fs;
   1.102 +    vmcb->gs.sel   = regs->gs;
   1.103 +
   1.104      if (regs->eflags & EF_TF)
   1.105          *intercepts |= EXCEPTION_BITMAP_DB;
   1.106      else
   1.107          *intercepts &= ~EXCEPTION_BITMAP_DB;
   1.108  }
   1.109 -#else /* (__i386__) */
   1.110 -void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
   1.111 -{
   1.112 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   1.113 -    u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
   1.114 -    
   1.115 -    /* Write the guest register value into VMCB */
   1.116 -    vmcb->rax      = regs->rax;
   1.117 -    vmcb->ss.sel   = regs->ss;
   1.118 -    vmcb->rsp      = regs->rsp;   
   1.119 -    vmcb->rflags   = regs->rflags;
   1.120 -    vmcb->cs.sel   = regs->cs;
   1.121 -    vmcb->rip      = regs->rip;
   1.122 -    if (regs->rflags & EF_TF)
   1.123 -        *intercepts |= EXCEPTION_BITMAP_DB;
   1.124 -    else
   1.125 -        *intercepts &= ~EXCEPTION_BITMAP_DB;
   1.126 -}
   1.127 -#endif /* !(__i386__) */
   1.128  
   1.129  int svm_paging_enabled(struct vcpu *v)
   1.130  {
   1.131 @@ -735,10 +725,6 @@ static void svm_relinquish_guest_resourc
   1.132      {
   1.133          if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   1.134              continue;
   1.135 -#if 0
   1.136 -        /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
   1.137 -        free_host_save_area(v->arch.hvm_svm.host_save_area);
   1.138 -#endif
   1.139  
   1.140          destroy_vmcb(&v->arch.hvm_svm);
   1.141          free_monitor_pagetable(v);
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu May 04 10:25:27 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu May 04 11:14:45 2006 +0100
     2.3 @@ -36,9 +36,11 @@
     2.4  #include <xen/kernel.h>
     2.5  #include <xen/domain_page.h>
     2.6  
     2.7 +extern struct host_save_area *host_save_area[];
     2.8  extern int svm_dbg_on;
     2.9  extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    2.10                                    int oldcore, int newcore);
    2.11 +extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
    2.12  
    2.13  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    2.14  
    2.15 @@ -309,8 +311,6 @@ int construct_vmcb(struct arch_svm_struc
    2.16  {
    2.17      int error;
    2.18      long rc=0;
    2.19 -    struct host_save_area *hsa = NULL;
    2.20 -    u64 phys_hsa;
    2.21  
    2.22      memset(arch_svm, 0, sizeof(struct arch_svm_struct));
    2.23  
    2.24 @@ -320,37 +320,10 @@ int construct_vmcb(struct arch_svm_struc
    2.25          goto err_out;
    2.26      }
    2.27  
    2.28 -    /* 
    2.29 -     * The following code is for allocating host_save_area.
    2.30 -     * Note: We either allocate a Host Save Area per core or per VCPU. 
    2.31 -     * However, we do not want a global data structure 
    2.32 -     * for HSA per core, we decided to implement a HSA for each VCPU. 
    2.33 -     * It will waste space since VCPU number is larger than core number. 
    2.34 -     * But before we find a better place for HSA for each core, we will 
    2.35 -     * stay will this solution.
    2.36 -     */
    2.37 -
    2.38 -    if (!(hsa = alloc_host_save_area())) 
    2.39 -    {
    2.40 -        printk("Failed to allocate Host Save Area\n");
    2.41 -        rc = -ENOMEM;
    2.42 -        goto err_out;
    2.43 -    }
    2.44 -
    2.45 -    phys_hsa = (u64) virt_to_maddr(hsa);
    2.46 -    arch_svm->host_save_area = hsa;
    2.47 -    arch_svm->host_save_pa   = phys_hsa;
    2.48 -
    2.49 +    /* update the HSA for the current Core */
    2.50 +    set_hsa_to_guest( arch_svm );
    2.51      arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
    2.52  
    2.53 -    if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
    2.54 -    {
    2.55 -        printk("construct_vmcb: load_vmcb failed: VMCB = %lx\n",
    2.56 -               (unsigned long) arch_svm->host_save_pa);
    2.57 -        rc = -EINVAL;         
    2.58 -        goto err_out;
    2.59 -    }
    2.60 -
    2.61      if ((error = construct_vmcb_controls(arch_svm))) 
    2.62      {
    2.63          printk("construct_vmcb: construct_vmcb_controls failed\n");
    2.64 @@ -458,19 +431,12 @@ void svm_do_launch(struct vcpu *v)
    2.65  }
    2.66  
    2.67  
    2.68 -int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa) 
    2.69 +void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
    2.70  {
    2.71 -    u32 phys_hsa_lo, phys_hsa_hi;
    2.72 -    
    2.73 -    phys_hsa_lo = (u32) phys_hsa;
    2.74 -    phys_hsa_hi = (u32) (phys_hsa >> 32);
    2.75 -    
    2.76 -    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
    2.77 -    set_bit(ARCH_SVM_VMCB_LOADED, &arch_svm->flags); 
    2.78 -    return 0;
    2.79 +    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
    2.80 +    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
    2.81  }
    2.82  
    2.83 -
    2.84  /* 
    2.85   * Resume the guest.
    2.86   */
    2.87 @@ -481,6 +447,9 @@ void svm_do_resume(struct vcpu *v)
    2.88      struct hvm_time_info *time_info = &vpit->time_info;
    2.89  
    2.90      svm_stts(v);
    2.91 +
    2.92 +    /* make sure the HSA is set for the current core */
    2.93 +    set_hsa_to_guest( &v->arch.hvm_svm );
    2.94      
    2.95      /* pick up the elapsed PIT ticks and re-enable pit_timer */
    2.96      if ( time_info->first_injected ) {