#include <xen/bug.h>
#include <xen/cpumask.h>
+/* Flush TLB of local processor for address va. */
+static inline void flush_tlb_one_local(vaddr_t va)
+{
+ asm volatile ( "sfence.vma %0" :: "r" (va) : "memory" );
+}
+
/*
* Filter the given set of CPUs, removing those that definitely flushed their
* TLB since @page_timestamp.
return read_atomic(p);
}
+static inline pte_t pte_from_mfn(mfn_t mfn, unsigned int flags)
+{
+ unsigned long pte = (mfn_x(mfn) << PTE_PPN_SHIFT) | flags;
+ return (pte_t){ .pte = pte };
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PAGE_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PMAP_H
+#define ASM_PMAP_H
+
+#include <xen/bug.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/page-size.h>
+
+#include <asm/fixmap.h>
+#include <asm/flushtlb.h>
+#include <asm/system.h>
+
+static inline void __init arch_pmap_map(unsigned int slot, mfn_t mfn)
+{
+ pte_t *entry = &xen_fixmap[slot];
+ pte_t pte;
+
+ ASSERT(!pte_is_valid(*entry));
+
+ pte = pte_from_mfn(mfn, PAGE_HYPERVISOR_RW);
+ write_pte(entry, pte);
+
+ flush_tlb_one_local(FIXMAP_ADDR(slot));
+}
+
+static inline void __init arch_pmap_unmap(unsigned int slot)
+{
+ pte_t pte = {};
+
+ write_pte(&xen_fixmap[slot], pte);
+
+ flush_tlb_one_local(FIXMAP_ADDR(slot));
+}
+
+#endif /* ASM_PMAP_H */