]> xenbits.xensource.com Git - people/hx242/xen.git/commitdiff
x86: add Persistent Map (PMAP) infrastructure
authorWei Liu <wei.liu2@citrix.com>
Fri, 11 Jan 2019 17:20:21 +0000 (17:20 +0000)
committerHongyan Xia <hongyax@amazon.com>
Wed, 2 Oct 2019 16:16:31 +0000 (17:16 +0100)
The basic idea is like Persistent Kernel Map (PKMAP) in linux. We
pre-populate all the relevant page tables before system is fully set
up.

It is needed to bootstrap map domain page infrastructure -- we need
some way to map pages to set up the mapcache without a direct map.

In order to keep the number of entries minimal, this infrastructure
can only be used by one CPU at a time.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyax@amazon.com>
xen/arch/x86/Makefile
xen/arch/x86/pmap.c [new file with mode: 0644]
xen/include/asm-x86/fixmap.h
xen/include/asm-x86/pmap.h [new file with mode: 0644]

index 2443fd2cc5bd227be0634c3138832d5cff5df3b3..75082196861e4bc0755de310686e1b6471b2bdb9 100644 (file)
@@ -55,6 +55,7 @@ obj-y += pci.o
 obj-y += percpu.o
 obj-y += physdev.o x86_64/physdev.o
 obj-y += platform_hypercall.o x86_64/platform_hypercall.o
+obj-y += pmap.o
 obj-y += psr.o
 obj-y += setup.o
 obj-y += shutdown.o
diff --git a/xen/arch/x86/pmap.c b/xen/arch/x86/pmap.c
new file mode 100644 (file)
index 0000000..0759f00
--- /dev/null
@@ -0,0 +1,122 @@
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/spinlock.h>
+
+#include <asm/bitops.h>
+#include <asm/fixmap.h>
+#include <asm/flushtlb.h>
+
+/*
+ * Simple mapping infrastructure to map / unmap pages in fixed map.
+ * This is used to set up the page table for mapcache, which is used
+ * by map domain page infrastructure.
+ *
+ * There is a restriction that only one CPU can use this
+ * infrastructure at a time. So this infrastructure _should not_ be
+ * used anywhere else other than the stated purpose above.
+ */
+
+static DEFINE_SPINLOCK(lock_cpu);
+/* Bitmap to track which slot is used */
+static unsigned long inuse;
+
+/* which pCPU is using PMAP at the moment */
+static unsigned int lock_cpu_id = ~0;
+/* how many entries has the current pCPU mapped */
+static unsigned int lock_count;
+
+static void pmap_cpu_up(void)
+{
+    int success = 0;
+    unsigned int cpu_id = smp_processor_id();
+
+    do
+    {
+        while ( cpu_id != lock_cpu_id && lock_count != 0 )
+            ;
+        spin_lock(&lock_cpu);
+        if ( cpu_id == lock_cpu_id || lock_count == 0 )
+        {
+            lock_cpu_id = cpu_id;
+            lock_count++;
+            success = 1;
+        }
+        spin_unlock(&lock_cpu);
+    } while ( !success );
+}
+
+static void pmap_cpu_down(void)
+{
+    spin_lock(&lock_cpu);
+    ASSERT(smp_processor_id() == lock_cpu_id);
+    ASSERT(lock_count);
+    lock_count--;
+    spin_unlock(&lock_cpu);
+}
+
+void *pmap_map(mfn_t mfn)
+{
+    unsigned long flags;
+    unsigned int idx;
+    void *linear = NULL;
+    enum fixed_addresses slot;
+    l1_pgentry_t *pl1e;
+
+    ASSERT(!in_irq());
+
+    local_irq_save(flags);
+    pmap_cpu_up();
+
+    idx = find_first_zero_bit(&inuse, NUM_FIX_PMAP);
+    if ( idx == NUM_FIX_PMAP )
+        panic("Out of PMAP slots\n");
+
+    __set_bit(idx, &inuse);
+
+    slot = idx + FIX_PMAP_BEGIN;
+    ASSERT(slot >= FIX_PMAP_BEGIN && slot <= FIX_PMAP_END);
+
+    pl1e = &l1_fixmap[L1_PAGETABLE_ENTRIES - 1 - slot];
+    l1e_write_atomic(pl1e, l1e_from_mfn(mfn, PAGE_HYPERVISOR));
+    linear = (void *)__fix_to_virt(slot);
+    flush_tlb_one_local(linear);
+
+    local_irq_restore(flags);
+    return linear;
+}
+
+void pmap_unmap(void *p)
+{
+    unsigned long flags;
+    unsigned int idx;
+    l1_pgentry_t *pl1e;
+    enum fixed_addresses slot = __virt_to_fix((unsigned long)p);
+
+    ASSERT(!in_irq());
+    ASSERT(slot >= FIX_PMAP_BEGIN && slot <= FIX_PMAP_END);
+
+    idx = slot - FIX_PMAP_BEGIN;
+    local_irq_save(flags);
+
+    __clear_bit(idx, &inuse);
+    pl1e = &l1_fixmap[L1_PAGETABLE_ENTRIES - 1 - slot];
+    l1e_write_atomic(pl1e, l1e_from_mfn(_mfn(0), 0));
+    flush_tlb_one_local(p);
+
+    pmap_cpu_down();
+    local_irq_restore(flags);
+}
+
+static void __maybe_unused build_assertions(void)
+{
+    BUILD_BUG_ON(sizeof(inuse) * BITS_PER_LONG < NUM_FIX_PMAP);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
index 9fb2f479468474714aa27f5f41a8366a83910e41..85885f895060ebeb507977f8117bc24e07f679cb 100644 (file)
@@ -23,6 +23,7 @@
 #include <xen/kexec.h>
 #include <asm/apicdef.h>
 #include <asm/msi.h>
+#include <asm/pmap.h>
 #include <acpi/apei.h>
 
 /*
@@ -48,6 +49,8 @@ enum fixed_addresses {
     FIX_XEN_SHARED_INFO,
 #endif /* CONFIG_XEN_GUEST */
     /* Everything else should go further down. */
+    FIX_PMAP_BEGIN,
+    FIX_PMAP_END = FIX_PMAP_BEGIN + NUM_FIX_PMAP - 1,
     FIX_APIC_BASE,
     FIX_IO_APIC_BASE_0,
     FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
diff --git a/xen/include/asm-x86/pmap.h b/xen/include/asm-x86/pmap.h
new file mode 100644 (file)
index 0000000..790cd71
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __X86_PMAP_H__
+#define __X86_PMAP_H__
+
+/* Large enough for mapping 5 levels of page tables with some headroom */
+#define NUM_FIX_PMAP 8
+
+void *pmap_map(mfn_t mfn);
+void pmap_unmap(void *p);
+
+#endif /* __X86_PMAP_H__ */