]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
VMX: delay p2m insertion of APIC access page
authorJan Beulich <jbeulich@suse.com>
Fri, 26 Feb 2021 09:18:59 +0000 (10:18 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 26 Feb 2021 09:18:59 +0000 (10:18 +0100)
Inserting the mapping at domain creation time leads to a memory leak
when the creation fails later on and the domain uses separate CPU and
IOMMU page tables - the latter requires intermediate page tables to be
allocated, but there's no freeing of them at present in this case. Since
we don't need the p2m insertion to happen this early, avoid the problem
altogether by deferring it until the last possible point. This comes at
the price of not being able to handle an error other than by crashing
the domain.

Reported-by: Julien Grall <julien@xen.org>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Release-Acked-by: Ian Jackson <iwj@xenproject.org>
xen/arch/x86/domain.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/hvm.h

index 97fd9456c26831da7ae6e456404f00b609fdecfd..5e3c94d3fa17265846abfb16c4b83ac855624dcb 100644 (file)
@@ -1007,6 +1007,8 @@ int arch_domain_soft_reset(struct domain *d)
 
 void arch_domain_creation_finished(struct domain *d)
 {
+    if ( is_hvm_domain(d) )
+        hvm_domain_creation_finished(d);
 }
 
 #define xen_vcpu_guest_context vcpu_guest_context
index faba95d057d4a672a6f6537c0c3bdea4391e45d9..bfea1b0f8a95cc6889e5ad4f0c701c19e6d3f76a 100644 (file)
@@ -428,6 +428,14 @@ static void vmx_domain_relinquish_resources(struct domain *d)
     vmx_free_vlapic_mapping(d);
 }
 
+static void domain_creation_finished(struct domain *d)
+{
+    if ( has_vlapic(d) && !mfn_eq(d->arch.hvm.vmx.apic_access_mfn, _mfn(0)) &&
+         set_mmio_p2m_entry(d, gaddr_to_gfn(APIC_DEFAULT_PHYS_BASE),
+                            d->arch.hvm.vmx.apic_access_mfn, PAGE_ORDER_4K) )
+        domain_crash(d);
+}
+
 static void vmx_init_ipt(struct vcpu *v)
 {
     unsigned int size = v->domain->vmtrace_size;
@@ -2408,6 +2416,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .cpu_dead             = vmx_cpu_dead,
     .domain_initialise    = vmx_domain_initialise,
     .domain_relinquish_resources = vmx_domain_relinquish_resources,
+    .domain_creation_finished = domain_creation_finished,
     .vcpu_initialise      = vmx_vcpu_initialise,
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
@@ -3234,8 +3243,7 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
     clear_domain_page(mfn);
     d->arch.hvm.vmx.apic_access_mfn = mfn;
 
-    return set_mmio_p2m_entry(d, gaddr_to_gfn(APIC_DEFAULT_PHYS_BASE), mfn,
-                              PAGE_ORDER_4K);
+    return 0;
 }
 
 static void vmx_free_vlapic_mapping(struct domain *d)
index 150746de669d15ff95d061b2b46f5ddb48fd39a1..4a8fb571de4285cb88423d300d246df11a649e43 100644 (file)
@@ -106,6 +106,7 @@ struct hvm_function_table {
      * Initialise/destroy HVM domain/vcpu resources
      */
     int  (*domain_initialise)(struct domain *d);
+    void (*domain_creation_finished)(struct domain *d);
     void (*domain_relinquish_resources)(struct domain *d);
     void (*domain_destroy)(struct domain *d);
     int  (*vcpu_initialise)(struct vcpu *v);
@@ -390,6 +391,12 @@ static inline bool hvm_has_set_descriptor_access_exiting(void)
     return hvm_funcs.set_descriptor_access_exiting;
 }
 
+static inline void hvm_domain_creation_finished(struct domain *d)
+{
+    if ( hvm_funcs.domain_creation_finished )
+        alternative_vcall(hvm_funcs.domain_creation_finished, d);
+}
+
 static inline int
 hvm_guest_x86_mode(struct vcpu *v)
 {
@@ -766,6 +773,11 @@ static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear)
     ASSERT_UNREACHABLE();
 }
 
+static inline void hvm_domain_creation_finished(struct domain *d)
+{
+    ASSERT_UNREACHABLE();
+}
+
 /*
  * Shadow code needs further cleanup to eliminate some HVM-only paths. For
  * now provide the stubs here but assert they will never be reached.