On ARM, flush_tlb_mask is used in the common code:
- alloc_heap_pages: the flush is only be called if the new allocated
page was used by a domain before. So we need to flush only TLB non-secure
non-hyp inner-shareable.
- common/grant-table.c: every calls to flush_tlb_mask are used with
the current domain. A flush TLB by current VMID inner-shareable is enough.
The current code only flush hypervisor TLB on the current PCPU. For now,
flush TLBs non-secure non-hyp on every PCPUs.
Signed-off-by: Julien Grall <julien.grall@linaro.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
#include <asm/cpregs.h>
#include <asm/page.h>
#include <asm/gic.h>
+#include <asm/flushtlb.h>
void flush_tlb_mask(const cpumask_t *mask)
{
/* No need to IPI other processors on ARM, the processor takes care of it. */
- flush_xen_data_tlb();
+ flush_tlb_all();
}
void smp_send_event_check_mask(const cpumask_t *mask)
isb();
}
+/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_tlb_all(void)
+{
+ dsb();
+
+ WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
+
+ dsb();
+ isb();
+}
+
#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
/*
* Local variables:
: : : "memory");
}
+/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_tlb_all(void)
+{
+ asm volatile(
+ "dsb sy;"
+ "tlbi alle1is;"
+ "dsb sy;"
+ "isb;"
+ : : : "memory");
+}
+
#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */
/*
* Local variables: