]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/pv: introduce function to populate perdomain area and use it to map Xen GDT
authorRoger Pau Monne <roger.pau@citrix.com>
Mon, 21 Oct 2024 09:44:04 +0000 (11:44 +0200)
committerRoger Pau Monne <roger.pau@citrix.com>
Tue, 10 Dec 2024 17:41:54 +0000 (18:41 +0100)
The current code to update the Xen part of the GDT when running a PV guest
relies on caching the direct map address of all the L1 tables used to map the
GDT and LDT, so that entries can be modified.

Introduce a new function that populates the per-domain region, either using the
recursive linear mappings when the target vCPU is the current one, or by
directly modifying the L1 table of the per-domain region.

Using such function to populate per-domain addresses drops the need to keep a
reference to per-domain L1 tables previously used to change the per-domain
mappings.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
xen/arch/x86/domain.c
xen/arch/x86/include/asm/desc.h
xen/arch/x86/include/asm/mm.h
xen/arch/x86/include/asm/processor.h
xen/arch/x86/mm.c
xen/arch/x86/smpboot.c
xen/arch/x86/traps.c

index 1f680bf176ee5dfddc266118612367c40f62e4b8..0bd0ef7e40f424eef60c30f0ac8c3512372d0d42 100644 (file)
@@ -1953,9 +1953,14 @@ static always_inline bool need_full_gdt(const struct domain *d)
 
 static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
 {
-    l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
-              !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
-                                   : per_cpu(compat_gdt_l1e, cpu));
+    ASSERT(v != current);
+
+    populate_perdomain_mapping(v,
+                               GDT_VIRT_START(v) +
+                               (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
+                               !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
+                                                    : &per_cpu(compat_gdt_mfn,
+                                                               cpu), 1);
 }
 
 static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
index a1e0807d97edd02ab4ac2b0cb59bc0dfa4272da6..33981bfca588143f269888b5d1ea513443560fd3 100644 (file)
@@ -44,6 +44,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <xen/mm-frame.h>
+
 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
 
 /* Fix up the RPL of a guest segment selector. */
@@ -212,10 +214,10 @@ struct __packed desc_ptr {
 
 extern seg_desc_t boot_gdt[];
 DECLARE_PER_CPU(seg_desc_t *, gdt);
-DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
+DECLARE_PER_CPU(mfn_t, gdt_mfn);
 extern seg_desc_t boot_compat_gdt[];
 DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
-DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
+DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
 DECLARE_PER_CPU(bool, full_gdt_loaded);
 
 static inline void lgdt(const struct desc_ptr *gdtr)
index 6c7e66ee21ab407082a705fff7dde3a624367a0d..b50a51327b2b6a595362bdac9a7e67926e986def 100644 (file)
@@ -603,6 +603,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
 int create_perdomain_mapping(struct domain *d, unsigned long va,
                              unsigned int nr, l1_pgentry_t **pl1tab,
                              struct page_info **ppg);
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+                                mfn_t *mfn, unsigned long nr);
 void destroy_perdomain_mapping(struct domain *d, unsigned long va,
                                unsigned int nr);
 void free_perdomain_mappings(struct domain *d);
index 877651212273b2898c6e79025159276c9bcfe619..aa7768ef9b29eff840c424e506617f6220cab8cd 100644 (file)
@@ -248,6 +248,11 @@ static inline unsigned long cr3_pa(unsigned long cr3)
     return cr3 & X86_CR3_ADDR_MASK;
 }
 
+static inline mfn_t cr3_mfn(unsigned long cr3)
+{
+    return maddr_to_mfn(cr3_pa(cr3));
+}
+
 static inline unsigned int cr3_pcid(unsigned long cr3)
 {
     return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
index 3d5dd22b6c36f209961ca25ab65146cc1397893c..b99bd72e3828402ec1eb89e35f7076d8c54deaa3 100644 (file)
@@ -6423,6 +6423,94 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
     return rc;
 }
 
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+                                mfn_t *mfn, unsigned long nr)
+{
+    l1_pgentry_t *l1tab = NULL, *pl1e;
+    const l3_pgentry_t *l3tab;
+    const l2_pgentry_t *l2tab;
+    struct domain *d = v->domain;
+
+    ASSERT(va >= PERDOMAIN_VIRT_START &&
+           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+    ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
+
+    /* Ensure loaded page-tables are from current (if current != curr_vcpu). */
+    sync_local_execstate();
+
+    /* Use likely to force the optimization for the fast path. */
+    if ( likely(v == current) )
+    {
+        unsigned int i;
+
+        /* Fast path: get L1 entries using the recursive linear mappings. */
+        pl1e = &__linear_l1_table[l1_linear_offset(va)];
+
+        for ( i = 0; i < nr; i++, pl1e++ )
+        {
+            if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+            {
+                ASSERT_UNREACHABLE();
+                free_domheap_page(l1e_get_page(*pl1e));
+            }
+            l1e_write(pl1e, l1e_from_mfn(mfn[i], __PAGE_HYPERVISOR_RW));
+        }
+
+        return;
+    }
+
+    ASSERT(d->arch.perdomain_l3_pg);
+    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+
+    if ( unlikely(!(l3e_get_flags(l3tab[l3_table_offset(va)]) &
+                    _PAGE_PRESENT)) )
+    {
+        unmap_domain_page(l3tab);
+        gprintk(XENLOG_ERR, "unable to map at VA %lx: L3e not present\n", va);
+        ASSERT_UNREACHABLE();
+        domain_crash(d);
+
+        return;
+    }
+
+    l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
+
+    for ( ; nr--; va += PAGE_SIZE, mfn++ )
+    {
+        if ( !l1tab || !l1_table_offset(va) )
+        {
+            const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+
+            if ( unlikely(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT)) )
+            {
+                gprintk(XENLOG_ERR, "unable to map at VA %lx: L2e not present\n",
+                        va);
+                ASSERT_UNREACHABLE();
+                domain_crash(d);
+
+                break;
+            }
+
+            unmap_domain_page(l1tab);
+            l1tab = map_l1t_from_l2e(*pl2e);
+        }
+
+        pl1e = &l1tab[l1_table_offset(va)];
+
+        if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+        {
+            ASSERT_UNREACHABLE();
+            free_domheap_page(l1e_get_page(*pl1e));
+        }
+
+        l1e_write(pl1e, l1e_from_mfn(*mfn, __PAGE_HYPERVISOR_RW));
+    }
+
+    unmap_domain_page(l1tab);
+    unmap_domain_page(l2tab);
+    unmap_domain_page(l3tab);
+}
+
 void destroy_perdomain_mapping(struct domain *d, unsigned long va,
                                unsigned int nr)
 {
index 79a79c54c30403940b211c39f1aa5b4d29511080..a740a6402272134a02b552dcd72e013dc6178b96 100644 (file)
@@ -1059,8 +1059,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
     if ( gdt == NULL )
         goto out;
     per_cpu(gdt, cpu) = gdt;
-    per_cpu(gdt_l1e, cpu) =
-        l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+    per_cpu(gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
     memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     BUILD_BUG_ON(NR_CPUS > 0x10000);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
@@ -1069,8 +1068,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
     per_cpu(compat_gdt, cpu) = gdt = alloc_xenheap_pages(0, memflags);
     if ( gdt == NULL )
         goto out;
-    per_cpu(compat_gdt_l1e, cpu) =
-        l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+    per_cpu(compat_gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
     memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
 #endif
index dd1dc33f9f7ca3191592e10ef224bf44f708f9aa..6421abc3caa57120067ef257d01151ca97e92ee6 100644 (file)
@@ -92,10 +92,10 @@ DEFINE_PER_CPU(uint64_t, efer);
 static DEFINE_PER_CPU(unsigned long, last_extable_addr);
 
 DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, gdt_mfn);
 #ifdef CONFIG_PV32
 DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, compat_gdt_mfn);
 #endif
 
 /* Master table, used by CPU0. */
@@ -2227,11 +2227,9 @@ void __init trap_init(void)
     }
 
     /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
-    this_cpu(gdt_l1e) =
-        l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
+    this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
     if ( IS_ENABLED(CONFIG_PV32) )
-        this_cpu(compat_gdt_l1e) =
-            l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
+        this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
 
     percpu_traps_init();