cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
}
+
+/*
+ * Get bits that must be set or cleared in ACLR register.
+ * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
+ * Its expected that SCU is in operational state before this
+ * function is called.
+ */
+void
+cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
+{
+ *actlr_mask = 0;
+ *actlr_set = 0;
+
+ if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
+ switch (cpuinfo.part_number) {
+
+ case CPU_ARCH_CORTEX_A17:
+ case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
+ /*
+ * Enable SMP mode
+ */
+ *actlr_mask = (1 << 6);
+ *actlr_set = (1 << 6);
+ break;
+ case CPU_ARCH_CORTEX_A15:
+ /*
+ * Enable snoop-delayed exclusive handling
+ * Enable SMP mode
+ */
+ *actlr_mask = (1U << 31) |(1 << 6);
+ *actlr_set = (1U << 31) |(1 << 6);
+ break;
+ case CPU_ARCH_CORTEX_A9:
+ /*
+ * Disable exclusive L1/L2 cache control
+ * Enable SMP mode
+ * Enable Cache and TLB maintenance broadcast
+ */
+ *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
+ *actlr_set = (1 << 6) | (1 << 0);
+ break;
+ case CPU_ARCH_CORTEX_A8:
+ /*
+ * Enable L2 cache
+ * Enable L1 data cache hardware alias checks
+ */
+ *actlr_mask = (1 << 1) | (1 << 0);
+ *actlr_set = (1 << 1);
+ break;
+ case CPU_ARCH_CORTEX_A7:
+ /*
+ * Enable SMP mode
+ */
+ *actlr_mask = (1 << 6);
+ *actlr_set = (1 << 6);
+ break;
+ case CPU_ARCH_CORTEX_A5:
+ /*
+ * Disable exclusive L1/L2 cache control
+ * Enable SMP mode
+ * Enable Cache and TLB maintenance broadcast
+ */
+ *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
+ *actlr_set = (1 << 6) | (1 << 0);
+ break;
+ case CPU_ARCH_ARM1176:
+ /*
+ * Restrict cache size to 16KB
+ * Enable the return stack
+ * Enable dynamic branch prediction
+ * Enable static branch prediction
+ */
+ *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
+ *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
+ break;
+ }
+ return;
+ }
+}
#ifndef ARM_INTRNG
int start = 0, end = 0;
#endif
-
#ifdef ARM_NEW_PMAP
+ uint32_t actlr_mask, actlr_set;
+
pmap_set_tex();
- reinit_mmu(pmap_kern_ttb, (1<<6) | (1<< 0), (1<<6) | (1<< 0));
+ cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
+ reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
cpu_setup();
/* Provide stack pointers for other processor modes. */
pt1_entry_t *pte1p;
pt2_entry_t *pte2p;
u_int i;
+ uint32_t actlr_mask, actlr_set;
/*
* Now, we are going to make real kernel mapping. Note that we are
/* Finally, switch from 'boot_pt1' to 'kern_pt1'. */
pmap_kern_ttb = base_pt1 | ttb_flags;
- reinit_mmu(pmap_kern_ttb, (1 << 6) | (1 << 0), (1 << 6) | (1 << 0));
-
+ cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
+ reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
/*
* Initialize the first available KVA. As kernel image is mapped by
* sections, we are leaving some gap behind.
#include <sys/types.h>
+#define CPU_IMPLEMENTER_ARM 0x41
+#define CPU_IMPLEMENTER_QCOM 0x51
+#define CPU_IMPLEMENTER_MRVL 0x56
+
+/* ARM */
+#define CPU_ARCH_ARM1176 0xB76
+#define CPU_ARCH_CORTEX_A5 0xC05
+#define CPU_ARCH_CORTEX_A7 0xC07
+#define CPU_ARCH_CORTEX_A8 0xC08
+#define CPU_ARCH_CORTEX_A9 0xC09
+#define CPU_ARCH_CORTEX_A12 0xC0D
+#define CPU_ARCH_CORTEX_A15 0xC0F
+#define CPU_ARCH_CORTEX_A17 0xC11
+
+/* QCOM */
+#define CPU_ARCH_KRAIT_300 0x06F
+
struct cpuinfo {
/* raw id registers */
uint32_t midr;
extern struct cpuinfo cpuinfo;
void cpuinfo_init(void);
-
+void cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set);
#endif /* _MACHINE_CPUINFO_H_ */