pte.pt.table = 1; /* 4k mappings always have this bit set */
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Create Xen's mappings of memory.
write_pte(p + i, pte);
pte.pt.base += 1 << LPAE_SHIFT;
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
#ifdef CONFIG_DOMAIN_PAGE
* We may not have flushed this specific subpage at map time,
* since we only flush the 4k page not the superpage
*/
- flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
+ flush_xen_tlb_range_va_local(va, PAGE_SIZE);
return (void *)va;
}
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START + SZ_2M),
pte);
- flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
+ flush_xen_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
}
/*
* before flushing the TLBs.
*/
isb();
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
extern void switch_ttbr(uint64_t ttbr);
vaddr += FIRST_SIZE;
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
#endif
BUG();
}
}
- flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+ flush_xen_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
rc = 0;
}
write_pte(xen_xenmap + i, pte);
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
/* Release all __init and __initdata ranges to be reused */
isb(); /* Synchronize fetched instruction stream. */
}
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
{
asm volatile("dsb;" /* Ensure preceding are visible */
CMD_CP32(TLBIALLH)
}
/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
{
asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
{
asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
}
isb();
}
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
{
asm volatile (
"dsb sy;" /* Ensure visibility of PTE writes */
}
/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
{
asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
{
asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
} while (0)
/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of the local
+ * processor.
*/
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+static inline void flush_xen_tlb_range_va_local(vaddr_t va,
+ unsigned long size)
{
- unsigned long end = va + size;
+ vaddr_t end = va + size;
+
dsb(sy); /* Ensure preceding are visible */
while ( va < end )
{
- __flush_xen_data_tlb_one_local(va);
+ __flush_xen_tlb_one_local(va);
va += PAGE_SIZE;
}
dsb(sy); /* Ensure completion of the TLB flush */
}
/*
- * Flush a range of VA's hypervisor mappings from the data TLB of all
- * processors in the inner-shareable domain. This is not sufficient
- * when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of all
+ * processors in the inner-shareable domain.
*/
-static inline void flush_xen_data_tlb_range_va(unsigned long va,
- unsigned long size)
+static inline void flush_xen_tlb_range_va(vaddr_t va,
+ unsigned long size)
{
- unsigned long end = va + size;
+ vaddr_t end = va + size;
+
dsb(sy); /* Ensure preceding are visible */
while ( va < end )
{
- __flush_xen_data_tlb_one(va);
+ __flush_xen_tlb_one(va);
va += PAGE_SIZE;
}
dsb(sy); /* Ensure completion of the TLB flush */