]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/mm: introduce per-vCPU L3 page-table
authorRoger Pau Monne <roger.pau@citrix.com>
Wed, 27 Nov 2024 17:02:01 +0000 (18:02 +0100)
committerRoger Pau Monne <roger.pau@citrix.com>
Tue, 10 Dec 2024 17:42:33 +0000 (18:42 +0100)
Such table is to be used in the per-domain slot when running with Address Space
Isolation enabled for the domain.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
xen/arch/x86/include/asm/domain.h
xen/arch/x86/include/asm/mm.h
xen/arch/x86/mm.c
xen/arch/x86/mm/hap/hap.c
xen/arch/x86/mm/shadow/hvm.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/pv/dom0_build.c
xen/arch/x86/pv/domain.c

index e35b11ef260261ca8b986ee25303b9e9aa283af6..c0d4e54fe6524d1312b0ca464d3a806fbb86dbdf 100644 (file)
@@ -669,6 +669,9 @@ struct arch_vcpu
 
     struct vcpu_msrs *msrs;
 
+    /* ASI: per-vCPU L3 table to use in the L4 per-domain slot. */
+    struct page_info *pervcpu_l3_pg;
+
     struct {
         bool next_interrupt_enabled;
     } monitor;
index f501e5e115ffe6908fe1be821eda17f5bcb9f939..f79d1594fde48c8476c8982c7d16c0d4e70ac4b3 100644 (file)
@@ -375,7 +375,7 @@ int devalidate_page(struct page_info *page, unsigned long type,
 
 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
-                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
+                       const struct vcpu *v, mfn_t sl4mfn, bool ro_mpt);
 bool fill_ro_mpt(mfn_t mfn);
 void zap_ro_mpt(mfn_t mfn);
 
index 52ab46e9d3ceceff679cac14338d37c01ca6ed3b..e209ba3d276bcc8c93f9fc021a4ca27b19b139b7 100644 (file)
@@ -1658,8 +1658,9 @@ static int promote_l3_table(struct page_info *page)
  * extended directmap.
  */
 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
-                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
+                       const struct vcpu *v, mfn_t sl4mfn, bool ro_mpt)
 {
+    const struct domain *d = v->domain;
     /*
      * PV vcpus need a shortened directmap.  HVM and Idle vcpus get the full
      * directmap.
@@ -1687,7 +1688,9 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
 
     /* Slot 260: Per-domain mappings. */
     l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+        l4e_from_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                  : d->arch.perdomain_l3_pg,
+                      __PAGE_HYPERVISOR_RW);
 
     /* Slot 4: Per-domain mappings mirror. */
     BUILD_BUG_ON(IS_ENABLED(CONFIG_PV32) &&
@@ -1842,8 +1845,15 @@ static int promote_l4_table(struct page_info *page)
 
     if ( !rc )
     {
+        /*
+         * Use vCPU#0 unconditionally.  When not running with ASI enabled the
+         * per-domain table is shared between all vCPUs, so it doesn't matter
+         * which vCPU gets passed to init_xen_l4_slots().  When running with
+         * ASI enabled this L4 will not be used, as a shadow per-vCPU L4 is
+         * used instead.
+         */
         init_xen_l4_slots(pl4e, l4mfn,
-                          d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
+                          d->vcpu[0], INVALID_MFN, VM_ASSIST(d, m2p_strict));
         atomic_inc(&d->arch.pv.nr_l4_pages);
     }
     unmap_domain_page(pl4e);
@@ -6313,14 +6323,17 @@ int create_perdomain_mapping(struct vcpu *v, unsigned long va,
     ASSERT(va >= PERDOMAIN_VIRT_START &&
            va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
 
-    if ( !d->arch.perdomain_l3_pg )
+    if ( !v->arch.pervcpu_l3_pg && !d->arch.perdomain_l3_pg )
     {
         pg = alloc_domheap_page(d, MEMF_no_owner);
         if ( !pg )
             return -ENOMEM;
         l3tab = __map_domain_page(pg);
         clear_page(l3tab);
-        d->arch.perdomain_l3_pg = pg;
+        if ( d->arch.asi )
+            v->arch.pervcpu_l3_pg = pg;
+        else
+            d->arch.perdomain_l3_pg = pg;
         if ( !nr )
         {
             unmap_domain_page(l3tab);
@@ -6330,7 +6343,8 @@ int create_perdomain_mapping(struct vcpu *v, unsigned long va,
     else if ( !nr )
         return 0;
     else
-        l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+        l3tab = __map_domain_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                              : d->arch.perdomain_l3_pg);
 
     ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
 
@@ -6436,8 +6450,9 @@ void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
         return;
     }
 
-    ASSERT(d->arch.perdomain_l3_pg);
-    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    ASSERT(d->arch.perdomain_l3_pg || v->arch.pervcpu_l3_pg);
+    l3tab = __map_domain_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                          : d->arch.perdomain_l3_pg);
 
     if ( unlikely(!(l3e_get_flags(l3tab[l3_table_offset(va)]) &
                     _PAGE_PRESENT)) )
@@ -6498,7 +6513,7 @@ void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
            va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
     ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
 
-    if ( !d->arch.perdomain_l3_pg )
+    if ( !d->arch.perdomain_l3_pg && !v->arch.pervcpu_l3_pg )
         return;
 
     /* Ensure loaded page-tables are from current (if current != curr_vcpu). */
@@ -6520,7 +6535,8 @@ void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
         return;
     }
 
-    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    l3tab = __map_domain_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                          : d->arch.perdomain_l3_pg);
     pl3e = l3tab + l3_table_offset(va);
 
     if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
@@ -6565,10 +6581,11 @@ void free_perdomain_mappings(struct vcpu *v)
     l3_pgentry_t *l3tab;
     unsigned int i;
 
-    if ( !d->arch.perdomain_l3_pg )
+    if ( !v->arch.pervcpu_l3_pg && !d->arch.perdomain_l3_pg )
         return;
 
-    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    l3tab = __map_domain_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                          : d->arch.perdomain_l3_pg);
 
     for ( i = 0; i < PERDOMAIN_SLOTS; ++i)
         if ( l3e_get_flags(l3tab[i]) & _PAGE_PRESENT )
@@ -6602,8 +6619,10 @@ void free_perdomain_mappings(struct vcpu *v)
         }
 
     unmap_domain_page(l3tab);
-    free_domheap_page(d->arch.perdomain_l3_pg);
+    free_domheap_page(d->arch.asi ? v->arch.pervcpu_l3_pg
+                                  : d->arch.perdomain_l3_pg);
     d->arch.perdomain_l3_pg = NULL;
+    v->arch.pervcpu_l3_pg = NULL;
 }
 
 static void write_sss_token(unsigned long *ptr)
index ec5043a8aa9ea740af46b1d67c8c09eb35d25486..c7d9bf7c71bf0285db8427d7f50c520ca141807a 100644 (file)
@@ -402,7 +402,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
     m4mfn = page_to_mfn(pg);
     l4e = map_domain_page(m4mfn);
 
-    init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+    init_xen_l4_slots(l4e, m4mfn, v, INVALID_MFN, false);
     unmap_domain_page(l4e);
 
     return m4mfn;
index c16f3b3adf32a2a0cf5076d2726feb707e08f8e9..68d80210596d968971f1b1df33d7bf0c56881bad 100644 (file)
@@ -758,7 +758,7 @@ mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels)
      * shadow-linear mapping will either be inserted below when creating
      * lower level monitor tables, or later in sh_update_cr3().
      */
-    init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+    init_xen_l4_slots(l4e, m4mfn, v, INVALID_MFN, false);
 
     if ( shadow_levels < 4 )
     {
index 10ddc408ff73a316477e9be325ffc9b020358bc7..a1f8147e197a819331c59f9c28628433067faff2 100644 (file)
@@ -973,7 +973,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
 
             BUILD_BUG_ON(sizeof(l4_pgentry_t) != sizeof(shadow_l4e_t));
 
-            init_xen_l4_slots(l4t, gmfn, d, smfn, (!is_pv_32bit_domain(d) &&
+            init_xen_l4_slots(l4t, gmfn, v, smfn, (!is_pv_32bit_domain(d) &&
                                                    VM_ASSIST(d, m2p_strict)));
             unmap_domain_page(l4t);
         }
index f54d1da5c6f4e482c0bbd7e8f46c352b68ef028b..5081c19b9a9a7e74bbb20a86c9f92abd2e5afb4f 100644 (file)
@@ -737,7 +737,7 @@ static int __init dom0_construct(struct boot_info *bi, struct domain *d)
         l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
         clear_page(l4tab);
         init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
-                          d, INVALID_MFN, true);
+                          d->vcpu[0], INVALID_MFN, true);
         v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     }
     else
index 5bda168eadff9be56eb5f00a6275639ce942df80..8d242805160759f4ccc702bcfe3a919c51059ec5 100644 (file)
@@ -125,7 +125,7 @@ static int setup_compat_l4(struct vcpu *v)
     mfn = page_to_mfn(pg);
     l4tab = map_domain_page(mfn);
     clear_page(l4tab);
-    init_xen_l4_slots(l4tab, mfn, v->domain, INVALID_MFN, false);
+    init_xen_l4_slots(l4tab, mfn, v, INVALID_MFN, false);
     unmap_domain_page(l4tab);
 
     /* This page needs to look like a pagetable so that it can be shadowed */