pte.pt.table = 1; /* 4k mappings always have this bit set */
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
#ifdef CONFIG_DOMAIN_PAGE
{
lpae_t pte = {0};
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
- flush_xen_data_tlb_range_va_local(BOOT_FDT_VIRT_START, SECOND_SIZE);
+ flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, SECOND_SIZE);
}
extern void relocate_xen(uint64_t ttbr, void *src, void *dst, size_t len);
asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
+/* Flush TLB of all processors in the inner-shareable domain for
+ * address va. */
+static inline void __flush_xen_data_tlb_one(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
+/* Flush TLB of all processors in the inner-shareable domain for
+ * address va. */
+static inline void __flush_xen_data_tlb_one(vaddr_t va)
+{
+ asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
isb();
}
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_data_tlb_one(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn);