pte.pt.table = 1; /* 4k mappings always have this bit set */
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
}
#ifdef CONFIG_DOMAIN_PAGE
* We may not have flushed this specific subpage at map time,
* since we only flush the 4k page not the superpage
*/
- flush_xen_data_tlb_range_va(va, PAGE_SIZE);
+ flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
return (void *)va;
}
{
lpae_t pte = {0};
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
- flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, SECOND_SIZE);
+ flush_xen_data_tlb_range_va_local(BOOT_FDT_VIRT_START, SECOND_SIZE);
}
extern void relocate_xen(uint64_t ttbr, void *src, void *dst, size_t len);
dest_va = BOOT_RELOC_VIRT_START;
pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT, WRITEALLOC);
write_pte(xen_second + second_table_offset(dest_va), pte);
- flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);
+ flush_xen_data_tlb_range_va_local(dest_va, SECOND_SIZE);
/* Calculate virt-to-phys offset for the new location */
phys_offset = xen_paddr - (unsigned long) _start;
dest_va = BOOT_RELOC_VIRT_START;
pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT, WRITEALLOC);
write_pte(boot_second + second_table_offset(dest_va), pte);
- flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);
+ flush_xen_data_tlb_range_va_local(dest_va, SECOND_SIZE);
#ifdef CONFIG_ARM_64
ttbr = (uintptr_t) xen_pgtable + phys_offset;
#else
/* From now on, no mapping may be both writable and executable. */
WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
/* Flush everything after setting WXN bit. */
- flush_xen_text_tlb();
+ flush_xen_text_tlb_local();
#ifdef CONFIG_ARM_32
per_cpu(xen_pgtable, 0) = cpu0_pgtable;
{
/* From now on, no mapping may be both writable and executable. */
WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
- flush_xen_text_tlb();
+ flush_xen_text_tlb_local();
}
/* Create Xen's mappings of memory.
write_pte(p + i, pte);
pte.pt.base += 1 << LPAE_SHIFT;
}
- flush_xen_data_tlb();
+ flush_xen_data_tlb_local();
}
#ifdef CONFIG_ARM_32
vaddr += FIRST_SIZE;
}
- flush_xen_data_tlb();
+ flush_xen_data_tlb_local();
}
#endif
BUG();
}
}
- flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+ flush_xen_data_tlb_range_va_local(virt, PAGE_SIZE * nr_mfns);
rc = 0;
}
write_pte(xen_xenmap + i, pte);
}
- flush_xen_text_tlb();
+ flush_xen_text_tlb_local();
}
/* Release all __init and __initdata ranges to be reused */
#define __clean_and_invalidate_xen_dcache_one(R) STORE_CP32(R, DCCIMVAC)
/*
- * Flush all hypervisor mappings from the TLB and branch predictor.
+ * Flush all hypervisor mappings from the TLB and branch predictor of
+ * the local processor.
+ *
* This is needed after changing Xen code mappings.
*
* The caller needs to issue the necessary DSB and D-cache flushes
* before calling flush_xen_text_tlb.
*/
-static inline void flush_xen_text_tlb(void)
+static inline void flush_xen_text_tlb_local(void)
{
register unsigned long r0 asm ("r0");
asm volatile (
}
/*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
*/
-static inline void flush_xen_data_tlb(void)
+static inline void flush_xen_data_tlb_local(void)
{
register unsigned long r0 asm ("r0");
asm volatile("dsb;" /* Ensure preceding are visible */
}
/*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
*/
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
+static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
+ unsigned long size)
{
unsigned long end = va + size;
dsb(sy); /* Ensure preceding are visible */
#define __clean_and_invalidate_xen_dcache_one(R) "dc civac, %" #R ";"
/*
- * Flush all hypervisor mappings from the TLB
+ * Flush all hypervisor mappings from the TLB of the local processor.
+ *
* This is needed after changing Xen code mappings.
*
* The caller needs to issue the necessary DSB and D-cache flushes
* before calling flush_xen_text_tlb.
*/
-static inline void flush_xen_text_tlb(void)
+static inline void flush_xen_text_tlb_local(void)
{
asm volatile (
"isb;" /* Ensure synchronization with previous changes to text */
}
/*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
*/
-static inline void flush_xen_data_tlb(void)
+static inline void flush_xen_data_tlb_local(void)
{
asm volatile (
"dsb sy;" /* Ensure visibility of PTE writes */
}
/*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
*/
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
+static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
+ unsigned long size)
{
unsigned long end = va + size;
dsb(sy); /* Ensure preceding are visible */