direct-io.hg

changeset 15186:96a59a5ae656

Simplify APIC_ACCESS VMX support.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed May 30 17:01:26 2007 +0100 (2007-05-30)
parents 1f8fb764f843
children a1626e972148
files xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vlapic.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h xen/include/asm-x86/msr.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Wed May 30 16:48:28 2007 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Wed May 30 17:01:26 2007 +0100
     1.3 @@ -393,7 +393,7 @@ int arch_domain_create(struct domain *d)
     1.4      int i;
     1.5  #endif
     1.6      l1_pgentry_t gdt_l1e;
     1.7 -    int vcpuid, pdpt_order;
     1.8 +    int vcpuid, pdpt_order, paging_initialised = 0;
     1.9      int rc = -ENOMEM;
    1.10  
    1.11      pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
    1.12 @@ -442,6 +442,7 @@ int arch_domain_create(struct domain *d)
    1.13  #endif
    1.14  
    1.15      paging_domain_init(d);
    1.16 +    paging_initialised = 1;
    1.17  
    1.18      if ( !is_idle_domain(d) )
    1.19      {
    1.20 @@ -469,12 +470,13 @@ int arch_domain_create(struct domain *d)
    1.21          d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =
    1.22              (CONFIG_PAGING_LEVELS != 4);
    1.23      }
    1.24 -        
    1.25  
    1.26      return 0;
    1.27  
    1.28   fail:
    1.29      free_xenheap_page(d->shared_info);
    1.30 +    if ( paging_initialised )
    1.31 +        paging_final_teardown(d);
    1.32  #ifdef __x86_64__
    1.33      if ( d->arch.mm_perdomain_l2 )
    1.34          free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Wed May 30 16:48:28 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed May 30 17:01:26 2007 +0100
     2.3 @@ -226,7 +226,6 @@ int hvm_domain_initialise(struct domain 
     2.4  
     2.5      spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
     2.6      spin_lock_init(&d->arch.hvm_domain.irq_lock);
     2.7 -    spin_lock_init(&d->arch.hvm_domain.vapic_access_lock);
     2.8  
     2.9      rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
    2.10      if ( rc != 0 )
    2.11 @@ -238,7 +237,7 @@ int hvm_domain_initialise(struct domain 
    2.12      hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
    2.13      hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
    2.14  
    2.15 -    return 0;
    2.16 +    return hvm_funcs.domain_initialise(d);
    2.17  }
    2.18  
    2.19  void hvm_domain_relinquish_resources(struct domain *d)
    2.20 @@ -249,10 +248,7 @@ void hvm_domain_relinquish_resources(str
    2.21  
    2.22  void hvm_domain_destroy(struct domain *d)
    2.23  {
    2.24 -    pit_deinit(d);
    2.25 -    rtc_deinit(d);
    2.26 -    pmtimer_deinit(d);
    2.27 -    hpet_deinit(d);
    2.28 +    hvm_funcs.domain_destroy(d);
    2.29  }
    2.30  
    2.31  static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
    2.32 @@ -409,27 +405,39 @@ int hvm_vcpu_initialise(struct vcpu *v)
    2.33  
    2.34      INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
    2.35  
    2.36 -    if ( v->vcpu_id != 0 )
    2.37 -        return 0;
    2.38 -
    2.39 -    pit_init(v, cpu_khz);
    2.40 -    rtc_init(v, RTC_PORT(0));
    2.41 -    pmtimer_init(v);
    2.42 -    hpet_init(v);
    2.43 +    if ( v->vcpu_id == 0 )
    2.44 +    {
    2.45 +        /* NB. All these really belong in hvm_domain_initialise(). */
    2.46 +        pit_init(v, cpu_khz);
    2.47 +        rtc_init(v, RTC_PORT(0));
    2.48 +        pmtimer_init(v);
    2.49 +        hpet_init(v);
    2.50   
    2.51 -    /* Init guest TSC to start from zero. */
    2.52 -    hvm_set_guest_time(v, 0);
    2.53 +        /* Init guest TSC to start from zero. */
    2.54 +        hvm_set_guest_time(v, 0);
    2.55 +    }
    2.56  
    2.57      return 0;
    2.58  }
    2.59  
    2.60  void hvm_vcpu_destroy(struct vcpu *v)
    2.61  {
    2.62 +    struct domain *d = v->domain;
    2.63 +
    2.64      vlapic_destroy(v);
    2.65      hvm_funcs.vcpu_destroy(v);
    2.66  
    2.67      /* Event channel is already freed by evtchn_destroy(). */
    2.68      /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
    2.69 +
    2.70 +    if ( v->vcpu_id == 0 )
    2.71 +    {
    2.72 +        /* NB. All these really belong in hvm_domain_destroy(). */
    2.73 +        pit_deinit(d);
    2.74 +        rtc_deinit(d);
    2.75 +        pmtimer_deinit(d);
    2.76 +        hpet_deinit(d);
    2.77 +    }
    2.78  }
    2.79  
    2.80  
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed May 30 16:48:28 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed May 30 17:01:26 2007 +0100
     3.3 @@ -876,6 +876,15 @@ static void svm_do_resume(struct vcpu *v
     3.4      reset_stack_and_jump(svm_asm_do_resume);
     3.5  }
     3.6  
     3.7 +static int svm_domain_initialise(struct domain *d)
     3.8 +{
     3.9 +    return 0;
    3.10 +}
    3.11 +
    3.12 +static void svm_domain_destroy(struct domain *d)
    3.13 +{
    3.14 +}
    3.15 +
    3.16  static int svm_vcpu_initialise(struct vcpu *v)
    3.17  {
    3.18      int rc;
    3.19 @@ -920,6 +929,8 @@ static int svm_event_injection_faulted(s
    3.20  static struct hvm_function_table svm_function_table = {
    3.21      .name                 = "SVM",
    3.22      .disable              = stop_svm,
    3.23 +    .domain_initialise    = svm_domain_initialise,
    3.24 +    .domain_destroy       = svm_domain_destroy,
    3.25      .vcpu_initialise      = svm_vcpu_initialise,
    3.26      .vcpu_destroy         = svm_vcpu_destroy,
    3.27      .store_cpu_guest_regs = svm_store_cpu_guest_regs,
     4.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Wed May 30 16:48:28 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Wed May 30 17:01:26 2007 +0100
     4.3 @@ -74,11 +74,6 @@ static void update_tpr_threshold(struct 
     4.4      if ( !cpu_has_vmx_tpr_shadow )
     4.5          return;
     4.6  
     4.7 -#ifdef __i386__
     4.8 -    if ( !vlapic->mmap_vtpr_enabled )
     4.9 -        return;
    4.10 -#endif
    4.11 -
    4.12      if ( !vlapic_enabled(vlapic) || 
    4.13           ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
    4.14      {
     5.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed May 30 16:48:28 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed May 30 17:01:26 2007 +0100
     5.3 @@ -61,9 +61,6 @@ static u32 adjust_vmx_controls(u32 ctl_m
     5.4      return ctl;
     5.5  }
     5.6  
     5.7 -#define vmx_has_secondary_exec_ctls \
     5.8 -    (_vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS)
     5.9 -
    5.10  void vmx_init_vmcs_config(void)
    5.11  {
    5.12      u32 vmx_msr_low, vmx_msr_high, min, opt;
    5.13 @@ -76,7 +73,7 @@ void vmx_init_vmcs_config(void)
    5.14      min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
    5.15      opt = 0;
    5.16      _vmx_pin_based_exec_control = adjust_vmx_controls(
    5.17 -        min, opt, MSR_IA32_VMX_PINBASED_CTLS_MSR);
    5.18 +        min, opt, MSR_IA32_VMX_PINBASED_CTLS);
    5.19  
    5.20      min = (CPU_BASED_HLT_EXITING |
    5.21             CPU_BASED_INVDPG_EXITING |
    5.22 @@ -84,24 +81,21 @@ void vmx_init_vmcs_config(void)
    5.23             CPU_BASED_MOV_DR_EXITING |
    5.24             CPU_BASED_ACTIVATE_IO_BITMAP |
    5.25             CPU_BASED_USE_TSC_OFFSETING);
    5.26 -    opt = CPU_BASED_ACTIVATE_MSR_BITMAP;
    5.27 +    opt  = CPU_BASED_ACTIVATE_MSR_BITMAP;
    5.28      opt |= CPU_BASED_TPR_SHADOW;
    5.29 -    opt |= ACTIVATE_SECONDARY_CONTROLS;
    5.30 +    opt |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
    5.31      _vmx_cpu_based_exec_control = adjust_vmx_controls(
    5.32 -        min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR);
    5.33 +        min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
    5.34  #ifdef __x86_64__
    5.35      if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
    5.36      {
    5.37          min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
    5.38          _vmx_cpu_based_exec_control = adjust_vmx_controls(
    5.39 -            min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR);
    5.40 +            min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
    5.41      }
    5.42 -#elif defined(__i386__)
    5.43 -    if ( !vmx_has_secondary_exec_ctls )
    5.44 -        _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
    5.45  #endif
    5.46  
    5.47 -    if ( vmx_has_secondary_exec_ctls )
    5.48 +    if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
    5.49      {
    5.50          min = 0;
    5.51          opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
    5.52 @@ -109,27 +103,33 @@ void vmx_init_vmcs_config(void)
    5.53              min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
    5.54      }
    5.55  
    5.56 +#if defined(__i386__)
    5.57 +    /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
    5.58 +    if ( !(_vmx_secondary_exec_control &
    5.59 +           SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
    5.60 +        _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
    5.61 +#endif
    5.62 +
    5.63      min = VM_EXIT_ACK_INTR_ON_EXIT;
    5.64      opt = 0;
    5.65  #ifdef __x86_64__
    5.66      min |= VM_EXIT_IA32E_MODE;
    5.67  #endif
    5.68      _vmx_vmexit_control = adjust_vmx_controls(
    5.69 -        min, opt, MSR_IA32_VMX_EXIT_CTLS_MSR);
    5.70 +        min, opt, MSR_IA32_VMX_EXIT_CTLS);
    5.71  
    5.72      min = opt = 0;
    5.73      _vmx_vmentry_control = adjust_vmx_controls(
    5.74 -        min, opt, MSR_IA32_VMX_ENTRY_CTLS_MSR);
    5.75 +        min, opt, MSR_IA32_VMX_ENTRY_CTLS);
    5.76  
    5.77 -    rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
    5.78 +    rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
    5.79  
    5.80      if ( smp_processor_id() == 0 )
    5.81      {
    5.82          vmcs_revision_id = vmx_msr_low;
    5.83          vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
    5.84          vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
    5.85 -        if ( vmx_has_secondary_exec_ctls )
    5.86 -            vmx_secondary_exec_control = _vmx_secondary_exec_control;
    5.87 +        vmx_secondary_exec_control = _vmx_secondary_exec_control;
    5.88          vmx_vmexit_control         = _vmx_vmexit_control;
    5.89          vmx_vmentry_control        = _vmx_vmentry_control;
    5.90      }
    5.91 @@ -138,8 +138,7 @@ void vmx_init_vmcs_config(void)
    5.92          BUG_ON(vmcs_revision_id != vmx_msr_low);
    5.93          BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
    5.94          BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
    5.95 -        if ( vmx_has_secondary_exec_ctls )
    5.96 -            BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
    5.97 +        BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
    5.98          BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
    5.99          BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
   5.100      }
   5.101 @@ -310,7 +309,7 @@ static void construct_vmcs(struct vcpu *
   5.102      __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
   5.103      __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
   5.104      v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
   5.105 -    if ( vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS )
   5.106 +    if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
   5.107          __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control);
   5.108  
   5.109      if ( cpu_has_vmx_msr_bitmap )
   5.110 @@ -437,24 +436,14 @@ static void construct_vmcs(struct vcpu *
   5.111          cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
   5.112      __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   5.113  
   5.114 -#ifdef __x86_64__ 
   5.115 -    /* CR8 based VLAPIC TPR optimization. */
   5.116      if ( cpu_has_vmx_tpr_shadow )
   5.117      {
   5.118 -        __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
   5.119 -                  page_to_maddr(vcpu_vlapic(v)->regs_page));
   5.120 -        __vmwrite(TPR_THRESHOLD, 0);
   5.121 -    }
   5.122 +        uint64_t virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
   5.123 +        __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma);
   5.124 +#if defined (__i386__)
   5.125 +        __vmwrite(VIRTUAL_APIC_PAGE_ADDR_HIGH, virt_page_ma >> 32);
   5.126  #endif
   5.127 -
   5.128 -    /* Memory-mapped based VLAPIC TPR optimization. */
   5.129 -    if ( cpu_has_vmx_mmap_vtpr_optimization )
   5.130 -    {
   5.131 -        __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
   5.132 -                    page_to_maddr(vcpu_vlapic(v)->regs_page));
   5.133          __vmwrite(TPR_THRESHOLD, 0);
   5.134 -
   5.135 -        vcpu_vlapic(v)->mmap_vtpr_enabled = 1;
   5.136      }
   5.137  
   5.138      __vmwrite(GUEST_LDTR_SELECTOR, 0);
   5.139 @@ -527,18 +516,6 @@ void vmx_do_resume(struct vcpu *v)
   5.140          vmx_set_host_env(v);
   5.141      }
   5.142  
   5.143 -    if ( !v->arch.hvm_vmx.launched && vcpu_vlapic(v)->mmap_vtpr_enabled )
   5.144 -    {
   5.145 -        struct page_info *pg = change_guest_physmap_for_vtpr(v->domain, 1);
   5.146 -
   5.147 -        if ( pg == NULL )
   5.148 -        {
   5.149 -            gdprintk(XENLOG_ERR, "change_guest_physmap_for_vtpr failed!\n");
   5.150 -            domain_crash_synchronous();
   5.151 -        }
   5.152 -        __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(pg));
   5.153 -    }
   5.154 -
   5.155      debug_state = v->domain->debugger_attached;
   5.156      if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
   5.157      {
     6.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed May 30 16:48:28 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed May 30 17:01:26 2007 +0100
     6.3 @@ -56,6 +56,20 @@ char *vmx_msr_bitmap;
     6.4  static void vmx_ctxt_switch_from(struct vcpu *v);
     6.5  static void vmx_ctxt_switch_to(struct vcpu *v);
     6.6  
     6.7 +static int  vmx_alloc_vlapic_mapping(struct domain *d);
     6.8 +static void vmx_free_vlapic_mapping(struct domain *d);
     6.9 +static void vmx_install_vlapic_mapping(struct vcpu *v);
    6.10 +
    6.11 +static int vmx_domain_initialise(struct domain *d)
    6.12 +{
    6.13 +    return vmx_alloc_vlapic_mapping(d);
    6.14 +}
    6.15 +
    6.16 +static void vmx_domain_destroy(struct domain *d)
    6.17 +{
    6.18 +    vmx_free_vlapic_mapping(d);
    6.19 +}
    6.20 +
    6.21  static int vmx_vcpu_initialise(struct vcpu *v)
    6.22  {
    6.23      int rc;
    6.24 @@ -74,6 +88,8 @@ static int vmx_vcpu_initialise(struct vc
    6.25          return rc;
    6.26      }
    6.27  
    6.28 +    vmx_install_vlapic_mapping(v);
    6.29 +
    6.30      return 0;
    6.31  }
    6.32  
    6.33 @@ -1168,6 +1184,8 @@ static void disable_intercept_for_msr(u3
    6.34  static struct hvm_function_table vmx_function_table = {
    6.35      .name                 = "VMX",
    6.36      .disable              = stop_vmx,
    6.37 +    .domain_initialise    = vmx_domain_initialise,
    6.38 +    .domain_destroy       = vmx_domain_destroy,
    6.39      .vcpu_initialise      = vmx_vcpu_initialise,
    6.40      .vcpu_destroy         = vmx_vcpu_destroy,
    6.41      .store_cpu_guest_regs = vmx_store_cpu_guest_regs,
    6.42 @@ -2483,112 +2501,66 @@ done:
    6.43      return 1;
    6.44  }
    6.45  
    6.46 -struct page_info * change_guest_physmap_for_vtpr(struct domain *d,
    6.47 -                                                 int enable_vtpr)
    6.48 +static int vmx_alloc_vlapic_mapping(struct domain *d)
    6.49 +{
    6.50 +    void *apic_va;
    6.51 +
    6.52 +    if ( !cpu_has_vmx_virtualize_apic_accesses )
    6.53 +        return 0;
    6.54 +
    6.55 +    apic_va = alloc_xenheap_page();
    6.56 +    if ( apic_va == NULL )
    6.57 +        return -ENOMEM;
    6.58 +    share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
    6.59 +    guest_physmap_add_page(
    6.60 +        d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), virt_to_mfn(apic_va));
    6.61 +    d->arch.hvm_domain.vmx_apic_access_mfn = virt_to_mfn(apic_va);
    6.62 +
    6.63 +    return 0;
    6.64 +}
    6.65 +
    6.66 +static void vmx_free_vlapic_mapping(struct domain *d)
    6.67  {
    6.68 -    struct page_info *pg;
    6.69 -    unsigned long pfn, mfn;
    6.70 -
    6.71 -    spin_lock(&d->arch.hvm_domain.vapic_access_lock);
    6.72 -
    6.73 -    pg = d->arch.hvm_domain.apic_access_page;
    6.74 -    pfn = paddr_to_pfn(APIC_DEFAULT_PHYS_BASE);
    6.75 -
    6.76 -    if ( enable_vtpr )
    6.77 -    {
    6.78 -        if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access )
    6.79 -            goto out;
    6.80 -
    6.81 -        if ( pg == NULL )
    6.82 -            pg = alloc_domheap_page(d);
    6.83 -        if ( pg == NULL )
    6.84 -        {
    6.85 -            gdprintk(XENLOG_ERR, "alloc_domheap_pages() failed!\n");
    6.86 -            goto out;
    6.87 -        }
    6.88 -
    6.89 -        mfn = page_to_mfn(pg);
    6.90 -        d->arch.hvm_domain.apic_access_page = pg;
    6.91 -
    6.92 -        guest_physmap_add_page(d, pfn, mfn);
    6.93 -
    6.94 -        d->arch.hvm_domain.physmap_changed_for_vlapic_access = 1;
    6.95 -
    6.96 -        goto out;
    6.97 -    }
    6.98 -    else
    6.99 -    {
   6.100 -        if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access )
   6.101 -        {
   6.102 -            mfn = page_to_mfn(pg);
   6.103 -            guest_physmap_remove_page(d, pfn, mfn);
   6.104 -            flush_tlb_mask(d->domain_dirty_cpumask);
   6.105 -
   6.106 -            d->arch.hvm_domain.physmap_changed_for_vlapic_access = 0;
   6.107 -        }
   6.108 -        pg = NULL;
   6.109 -        goto out;
   6.110 -    }
   6.111 -
   6.112 -out:
   6.113 -    spin_unlock(&d->arch.hvm_domain.vapic_access_lock);
   6.114 -    return pg;
   6.115 +    unsigned long mfn = d->arch.hvm_domain.vmx_apic_access_mfn;
   6.116 +    if ( mfn != 0 )
   6.117 +        free_xenheap_page(mfn_to_virt(mfn));
   6.118  }
   6.119  
   6.120 -static void check_vlapic_msr_for_vtpr(struct vcpu *v)
   6.121 +static void vmx_install_vlapic_mapping(struct vcpu *v)
   6.122 +{
   6.123 +    paddr_t virt_page_ma, apic_page_ma;
   6.124 +
   6.125 +    if ( !cpu_has_vmx_virtualize_apic_accesses )
   6.126 +        return;
   6.127 +
   6.128 +    virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
   6.129 +    apic_page_ma = v->domain->arch.hvm_domain.vmx_apic_access_mfn;
   6.130 +    apic_page_ma <<= PAGE_SHIFT;
   6.131 +
   6.132 +    vmx_vmcs_enter(v);
   6.133 +    __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma);
   6.134 +    __vmwrite(APIC_ACCESS_ADDR, apic_page_ma);
   6.135 +#if defined (__i386__)
   6.136 +    __vmwrite(VIRTUAL_APIC_PAGE_ADDR_HIGH, virt_page_ma >> 32);
   6.137 +    __vmwrite(APIC_ACCESS_ADDR_HIGH, apic_page_ma >> 32);
   6.138 +#endif
   6.139 +    vmx_vmcs_exit(v);
   6.140 +}
   6.141 +
   6.142 +static void vmx_check_vlapic_msr(struct vcpu *v)
   6.143  {
   6.144      struct vlapic *vlapic = vcpu_vlapic(v);
   6.145 -    int    mmap_vtpr_enabled = vcpu_vlapic(v)->mmap_vtpr_enabled;
   6.146 -    uint32_t tmp;
   6.147 -
   6.148 -
   6.149 -    if ( vlapic_hw_disabled(vlapic) && mmap_vtpr_enabled )
   6.150 -    {
   6.151 -        vcpu_vlapic(v)->mmap_vtpr_enabled = 0;    
   6.152 -
   6.153 -#ifdef __i386__
   6.154 -        v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
   6.155 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   6.156 -                  v->arch.hvm_vcpu.u.vmx.exec_control);
   6.157 -#elif defined(__x86_64__)
   6.158 -        if ( !cpu_has_vmx_tpr_shadow )
   6.159 -        {
   6.160 -            v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
   6.161 -            __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   6.162 -                v->arch.hvm_vcpu.u.vmx.exec_control);
   6.163 -        }
   6.164 -#endif
   6.165 -        tmp  = __vmread(SECONDARY_VM_EXEC_CONTROL);
   6.166 -        tmp &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
   6.167 -        __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp);
   6.168 -
   6.169 -        change_guest_physmap_for_vtpr(v->domain, 0);
   6.170 -    }
   6.171 -    else if ( !vlapic_hw_disabled(vlapic) && !mmap_vtpr_enabled &&
   6.172 -              cpu_has_vmx_mmap_vtpr_optimization )
   6.173 -    {
   6.174 -        vcpu_vlapic(v)->mmap_vtpr_enabled = 1;
   6.175 -
   6.176 -        v->arch.hvm_vcpu.u.vmx.exec_control |=
   6.177 -            ( ACTIVATE_SECONDARY_CONTROLS | CPU_BASED_TPR_SHADOW );
   6.178 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   6.179 -                  v->arch.hvm_vcpu.u.vmx.exec_control);
   6.180 -        tmp  = __vmread(SECONDARY_VM_EXEC_CONTROL);
   6.181 -        tmp |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
   6.182 -        __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp);
   6.183 -
   6.184 -        change_guest_physmap_for_vtpr(v->domain, 1);
   6.185 -    }
   6.186 -
   6.187 -    if ( vcpu_vlapic(v)->mmap_vtpr_enabled &&
   6.188 -        !vlapic_hw_disabled(vlapic) &&
   6.189 -        (vlapic_base_address(vlapic) != APIC_DEFAULT_PHYS_BASE) )
   6.190 -    {
   6.191 -        gdprintk(XENLOG_ERR,
   6.192 -                 "Local APIC base address is set to 0x%016"PRIx64"!\n",
   6.193 -                  vlapic_base_address(vlapic));
   6.194 -        domain_crash_synchronous();
   6.195 -    }
   6.196 +    uint32_t ctl;
   6.197 +
   6.198 +    if ( !cpu_has_vmx_virtualize_apic_accesses )
   6.199 +        return;
   6.200 +
   6.201 +    ctl  = __vmread(SECONDARY_VM_EXEC_CONTROL);
   6.202 +    ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
   6.203 +    if ( !vlapic_hw_disabled(vlapic) &&
   6.204 +         (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
   6.205 +        ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
   6.206 +    __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl);
   6.207  }
   6.208  
   6.209  static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
   6.210 @@ -2619,7 +2591,7 @@ static inline int vmx_do_msr_write(struc
   6.211          break;
   6.212      case MSR_IA32_APICBASE:
   6.213          vlapic_msr_set(vcpu_vlapic(v), msr_content);
   6.214 -        check_vlapic_msr_for_vtpr(v);
   6.215 +        vmx_check_vlapic_msr(v);
   6.216          break;
   6.217      default:
   6.218          if ( !long_mode_do_msr_write(regs) )
   6.219 @@ -2932,12 +2904,12 @@ asmlinkage void vmx_vmexit_handler(struc
   6.220  
   6.221      case EXIT_REASON_TPR_BELOW_THRESHOLD:
   6.222          break;
   6.223 +
   6.224      case EXIT_REASON_APIC_ACCESS:
   6.225      {
   6.226          unsigned long offset;
   6.227 -
   6.228          exit_qualification = __vmread(EXIT_QUALIFICATION);
   6.229 -        offset = exit_qualification & 0x0fffUL;        
   6.230 +        offset = exit_qualification & 0x0fffUL;
   6.231          handle_mmio(APIC_DEFAULT_PHYS_BASE | offset);
   6.232          break;
   6.233      }
     7.1 --- a/xen/include/asm-x86/hvm/domain.h	Wed May 30 16:48:28 2007 +0100
     7.2 +++ b/xen/include/asm-x86/hvm/domain.h	Wed May 30 17:01:26 2007 +0100
     7.3 @@ -41,11 +41,6 @@ struct hvm_domain {
     7.4      s64                    tsc_frequency;
     7.5      struct pl_time         pl_time;
     7.6  
     7.7 -    /* For memory-mapped vLAPIC/vTPR access optimization */
     7.8 -    spinlock_t             vapic_access_lock;
     7.9 -    int                    physmap_changed_for_vlapic_access : 1;
    7.10 -    struct page_info       *apic_access_page;
    7.11 -
    7.12      struct hvm_io_handler  io_handler;
    7.13  
    7.14      /* Lock protects access to irq, vpic and vioapic. */
    7.15 @@ -60,6 +55,8 @@ struct hvm_domain {
    7.16      spinlock_t             pbuf_lock;
    7.17  
    7.18      uint64_t               params[HVM_NR_PARAMS];
    7.19 +
    7.20 +    unsigned long          vmx_apic_access_mfn;
    7.21  };
    7.22  
    7.23  #endif /* __ASM_X86_HVM_DOMAIN_H__ */
     8.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed May 30 16:48:28 2007 +0100
     8.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed May 30 17:01:26 2007 +0100
     8.3 @@ -69,8 +69,10 @@ struct hvm_function_table {
     8.4      void (*disable)(void);
     8.5  
     8.6      /*
     8.7 -     * Initialise/destroy HVM VCPU resources
     8.8 +     * Initialise/destroy HVM domain/vcpu resources
     8.9       */
    8.10 +    int  (*domain_initialise)(struct domain *d);
    8.11 +    void (*domain_destroy)(struct domain *d);
    8.12      int  (*vcpu_initialise)(struct vcpu *v);
    8.13      void (*vcpu_destroy)(struct vcpu *v);
    8.14  
     9.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Wed May 30 16:48:28 2007 +0100
     9.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Wed May 30 17:01:26 2007 +0100
     9.3 @@ -33,7 +33,7 @@
     9.4  #define vlapic_domain(vpic) (vlapic_vcpu(vlapic)->domain)
     9.5  
     9.6  #define VLAPIC_ID(vlapic)   \
     9.7 -    (GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID)))
     9.8 +    (GET_APIC_ID(vlapic_get_reg((vlapic), APIC_ID)))
     9.9  
    9.10  /*
    9.11   * APIC can be disabled in two ways:
    9.12 @@ -50,7 +50,7 @@
    9.13  #define vlapic_enabled(vlapic)     (!vlapic_disabled(vlapic))
    9.14  
    9.15  #define vlapic_base_address(vlapic)                             \
    9.16 -    (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
    9.17 +    ((vlapic)->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
    9.18  
    9.19  struct vlapic {
    9.20      struct hvm_hw_lapic      hw;
    9.21 @@ -58,8 +58,6 @@ struct vlapic {
    9.22      struct periodic_time     pt;
    9.23      s_time_t                 timer_last_update;
    9.24      struct page_info         *regs_page;
    9.25 -
    9.26 -    int                      mmap_vtpr_enabled : 1;
    9.27  };
    9.28  
    9.29  static inline uint32_t vlapic_get_reg(struct vlapic *vlapic, uint32_t reg)
    10.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed May 30 16:48:28 2007 +0100
    10.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed May 30 17:01:26 2007 +0100
    10.3 @@ -90,23 +90,23 @@ void vmx_destroy_vmcs(struct vcpu *v);
    10.4  void vmx_vmcs_enter(struct vcpu *v);
    10.5  void vmx_vmcs_exit(struct vcpu *v);
    10.6  
    10.7 -#define CPU_BASED_VIRTUAL_INTR_PENDING  0x00000004
    10.8 -#define CPU_BASED_USE_TSC_OFFSETING     0x00000008
    10.9 -#define CPU_BASED_HLT_EXITING           0x00000080
   10.10 -#define CPU_BASED_INVDPG_EXITING        0x00000200
   10.11 -#define CPU_BASED_MWAIT_EXITING         0x00000400
   10.12 -#define CPU_BASED_RDPMC_EXITING         0x00000800
   10.13 -#define CPU_BASED_RDTSC_EXITING         0x00001000
   10.14 -#define CPU_BASED_CR8_LOAD_EXITING      0x00080000
   10.15 -#define CPU_BASED_CR8_STORE_EXITING     0x00100000
   10.16 -#define CPU_BASED_TPR_SHADOW            0x00200000
   10.17 -#define CPU_BASED_MOV_DR_EXITING        0x00800000
   10.18 -#define CPU_BASED_UNCOND_IO_EXITING     0x01000000
   10.19 -#define CPU_BASED_ACTIVATE_IO_BITMAP    0x02000000
   10.20 -#define CPU_BASED_ACTIVATE_MSR_BITMAP   0x10000000
   10.21 -#define CPU_BASED_MONITOR_EXITING       0x20000000
   10.22 -#define CPU_BASED_PAUSE_EXITING         0x40000000
   10.23 -#define ACTIVATE_SECONDARY_CONTROLS     0x80000000
   10.24 +#define CPU_BASED_VIRTUAL_INTR_PENDING        0x00000004
   10.25 +#define CPU_BASED_USE_TSC_OFFSETING           0x00000008
   10.26 +#define CPU_BASED_HLT_EXITING                 0x00000080
   10.27 +#define CPU_BASED_INVDPG_EXITING              0x00000200
   10.28 +#define CPU_BASED_MWAIT_EXITING               0x00000400
   10.29 +#define CPU_BASED_RDPMC_EXITING               0x00000800
   10.30 +#define CPU_BASED_RDTSC_EXITING               0x00001000
   10.31 +#define CPU_BASED_CR8_LOAD_EXITING            0x00080000
   10.32 +#define CPU_BASED_CR8_STORE_EXITING           0x00100000
   10.33 +#define CPU_BASED_TPR_SHADOW                  0x00200000
   10.34 +#define CPU_BASED_MOV_DR_EXITING              0x00800000
   10.35 +#define CPU_BASED_UNCOND_IO_EXITING           0x01000000
   10.36 +#define CPU_BASED_ACTIVATE_IO_BITMAP          0x02000000
   10.37 +#define CPU_BASED_ACTIVATE_MSR_BITMAP         0x10000000
   10.38 +#define CPU_BASED_MONITOR_EXITING             0x20000000
   10.39 +#define CPU_BASED_PAUSE_EXITING               0x40000000
   10.40 +#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
   10.41  extern u32 vmx_cpu_based_exec_control;
   10.42  
   10.43  #define PIN_BASED_EXT_INTR_MASK         0x00000001
   10.44 @@ -129,9 +129,6 @@ extern u32 vmx_secondary_exec_control;
   10.45      (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
   10.46  #define cpu_has_vmx_tpr_shadow \
   10.47      (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
   10.48 -#define cpu_has_vmx_mmap_vtpr_optimization \
   10.49 -    (cpu_has_vmx_virtualize_apic_accesses && cpu_has_vmx_tpr_shadow)
   10.50 -
   10.51  #define cpu_has_vmx_msr_bitmap \
   10.52      (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
   10.53  extern char *vmx_msr_bitmap;
    11.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Wed May 30 16:48:28 2007 +0100
    11.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Wed May 30 17:01:26 2007 +0100
    11.3 @@ -33,9 +33,6 @@ void vmx_intr_assist(void);
    11.4  void vmx_do_resume(struct vcpu *);
    11.5  void set_guest_time(struct vcpu *v, u64 gtime);
    11.6  
    11.7 -extern struct page_info *change_guest_physmap_for_vtpr(struct domain *d,
    11.8 -                                                       int enable_vtpr);
    11.9 -
   11.10  /*
   11.11   * Exit Reasons
   11.12   */
    12.1 --- a/xen/include/asm-x86/msr.h	Wed May 30 16:48:28 2007 +0100
    12.2 +++ b/xen/include/asm-x86/msr.h	Wed May 30 17:01:26 2007 +0100
    12.3 @@ -109,12 +109,12 @@ static inline void wrmsrl(unsigned int m
    12.4  #define MSR_P6_PERFCTR1      0xc2
    12.5  
    12.6  /* MSRs & bits used for VMX enabling */
    12.7 -#define MSR_IA32_VMX_BASIC_MSR                  0x480
    12.8 -#define MSR_IA32_VMX_PINBASED_CTLS_MSR          0x481
    12.9 -#define MSR_IA32_VMX_PROCBASED_CTLS_MSR         0x482
   12.10 -#define MSR_IA32_VMX_EXIT_CTLS_MSR              0x483
   12.11 -#define MSR_IA32_VMX_ENTRY_CTLS_MSR             0x484
   12.12 -#define MSR_IA32_VMX_MISC_MSR                   0x485
   12.13 +#define MSR_IA32_VMX_BASIC                      0x480
   12.14 +#define MSR_IA32_VMX_PINBASED_CTLS              0x481
   12.15 +#define MSR_IA32_VMX_PROCBASED_CTLS             0x482
   12.16 +#define MSR_IA32_VMX_EXIT_CTLS                  0x483
   12.17 +#define MSR_IA32_VMX_ENTRY_CTLS                 0x484
   12.18 +#define MSR_IA32_VMX_MISC                       0x485
   12.19  #define MSR_IA32_VMX_CR0_FIXED0                 0x486
   12.20  #define MSR_IA32_VMX_CR0_FIXED1                 0x487
   12.21  #define MSR_IA32_VMX_CR4_FIXED0                 0x488