Status: Supported
Status, Cortex A57 r0p0-r1p1: Supported, not security supported
+ Status, Cortex A77 r0p0-r1p0: Supported, not security supported
For the Cortex A57 r0p0 - r1p1, see Errata 832075.
+For the Cortex A77 r0p0 - r1p0, see Errata 1508412.
## Host hardware support
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
| ARM | Neoverse-N1 | #1165522 | N/A
| ARM | Neoverse-N1 | #1286807 | ARM64_ERRATUM_1286807 |
+| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
| ARM | MMU-500 | #842869 | N/A |
If unsure, say Y.
+config ARM64_ERRATUM_1508412
+ bool "Cortex-A77: 1508412: possible deadlock on sequence of NC/Device load and store exclusive or PAR read"
+ default y
+ depends on ARM_64
+ help
+ This option adds a workaround for Arm Cortex-A77 erratum 1508412.
+
+ Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
+ of a store-exclusive or read of PAR_EL1 and a load with device or
+ non-cacheable memory attributes. The workaround depends on a firmware
+ counterpart.
+
+ Xen guests must also have the workaround implemented or they can
+ deadlock the system.
+
+ Work around the issue by inserting DMB SY barriers around PAR_EL1
+ register reads and warning Xen users. The DMB barrier is sufficient
+ to prevent a speculative PAR_EL1 read.
+
+ If unsure, say Y.
+
endmenu
config ARM64_HARDEN_BRANCH_PREDICTOR
* position on the stack before.
*/
.macro entry, hyp, compat, save_x0_x1=1
+
+ /*
+ * Ensure any PAR_EL1 reads complete, in case we were interrupted
+ * between the PAR_EL1 read and the memory barrier for the erratum
+ * 1508412 workaround.
+ */
+ alternative_if ARM64_WORKAROUND_1508412
+ dmb sy
+ alternative_else_nop_endif
+
sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
.if \hyp == 0 /* Guest mode */
ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
+ /*
+ * Ensure any device/NC reads complete, in case we were interrupted
+ * between the memory barrier for the erratum 1508412 workaround and
+ * any PAR_EL1 read.
+ */
+ alternative_if ARM64_WORKAROUND_1508412
+ dmb sy
+ alternative_else_nop_endif
+
eret
sb
.capability = ARM64_WORKAROUND_AT_SPECULATE,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
},
+#ifdef CONFIG_ARM64_ERRATUM_1508412
+ {
+ /* Cortex-A77 r0p0 - r1p0 */
+ .desc = "ARM erratum 1508412 (hypervisor portion)",
+ .capability = ARM64_WORKAROUND_1508412,
+ MIDR_RANGE(MIDR_CORTEX_A77, 0, 1),
+ },
+#endif
{
/* Cortex-A55 (All versions as erratum is open in SDEN v14) */
.desc = "ARM erratum 1530923",
{
enable_cpu_capabilities(arm_errata);
-#ifdef CONFIG_ARM64_ERRATUM_832075
- if ( cpus_have_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) )
+#if defined(CONFIG_ARM64_ERRATUM_832075) || defined(CONFIG_ARM64_ERRATUM_1508412)
+ if ( cpus_have_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
+ cpus_have_cap(ARM64_WORKAROUND_1508412) )
{
- printk_once("**** This CPU is affected by the errata 832075. ****\n"
- "**** Guests without CPU erratum workarounds can deadlock the system! ****\n"
+ printk_once("**** Guests without CPU erratum workarounds can deadlock the system! ****\n"
"**** Only trusted guests should be used. ****\n");
/* Taint the machine has being insecure */
p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1);
if ( is_32bit_domain(p->domain) )
p->arch.dacr = READ_SYSREG(DACR32_EL2);
- p->arch.par = READ_SYSREG64(PAR_EL1);
+ p->arch.par = read_sysreg_par();
#if defined(CONFIG_ARM_32)
p->arch.mair0 = READ_CP32(MAIR0);
p->arch.mair1 = READ_CP32(MAIR1);
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
- uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+ uint64_t par, tmp = read_sysreg_par();
asm volatile ("at s1e2r, %0;" : : "r" (va));
isb();
- par = READ_SYSREG64(PAR_EL1);
+ par = read_sysreg_par();
WRITE_SYSREG64(tmp, PAR_EL1);
return par;
}
/* Ask the MMU to translate a Guest VA for us */
static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags)
{
- uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+ uint64_t par, tmp = read_sysreg_par();
if ( (flags & GV2M_WRITE) == GV2M_WRITE )
asm volatile ("at s12e1w, %0;" : : "r" (va));
else
asm volatile ("at s12e1r, %0;" : : "r" (va));
isb();
- par = READ_SYSREG64(PAR_EL1);
+ par = read_sysreg_par();
WRITE_SYSREG64(tmp, PAR_EL1);
return par;
}
static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
{
- uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+ uint64_t par, tmp = read_sysreg_par();
if ( (flags & GV2M_WRITE) == GV2M_WRITE )
asm volatile ("at s1e1w, %0;" : : "r" (va));
else
asm volatile ("at s1e1r, %0;" : : "r" (va));
isb();
- par = READ_SYSREG64(PAR_EL1);
+ par = read_sysreg_par();
WRITE_SYSREG64(tmp, PAR_EL1);
return par;
}
#define ARM_WORKAROUND_BHB_LOOP_24 13
#define ARM_WORKAROUND_BHB_LOOP_32 14
#define ARM_WORKAROUND_BHB_SMCC_3 15
+#define ARM64_WORKAROUND_1508412 16
-#define ARM_NCAPS 16
+#define ARM_NCAPS 17
#ifndef __ASSEMBLY__
# error "unknown ARM variant"
#endif
+#ifndef __ASSEMBLY__
+
+#include <asm/alternative.h>
+
+static inline register_t read_sysreg_par(void)
+{
+ register_t par_el1;
+
+ /*
+ * On Cortex-A77 r0p0 and r1p0, read access to PAR_EL1 shall include a
+ * DMB SY before and after accessing it, as part of the workaround for the
+ * errata 1508412.
+ */
+ asm volatile(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412,
+ CONFIG_ARM64_ERRATUM_1508412));
+ par_el1 = READ_SYSREG64(PAR_EL1);
+ asm volatile(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412,
+ CONFIG_ARM64_ERRATUM_1508412));
+
+ return par_el1;
+}
+
+#endif /* !__ASSEMBLY__ */
+
#endif /* __ASM_ARM_SYSREGS_H */
/*
* Local variables: