--- /dev/null
+/*-
+ * Copyright (c) 2014 Robin Randhawa
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/param.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * FIXME:
+ * Need big.LITTLE awareness at some point.
+ * Using arm64_p[id]cache_line_size may not be the best option.
+ * Need better SMP awareness.
+ */
+ .text
+ .align 2
+
+.Lpage_mask:
+ .word PAGE_MASK
+
+/*
+ * Macro to handle the cache. This takes the start address in x0, length
+ * in x1. It will corrupt x0, x1, x2, and x3.
+ */
+.macro cache_handle_range dcop = 0, ic = 0, icop = 0
+.if \ic == 0
+ ldr x3, =dcache_line_size /* Load the D cache line size */
+.else
+ ldr x3, =idcache_line_size /* Load the I & D cache line size */
+.endif
+ ldr x3, [x3]
+ sub x4, x3, #1 /* Get the address mask */
+ and x2, x0, x4 /* Get the low bits of the address */
+ add x1, x1, x2 /* Add these to the size */
+ bic x0, x0, x4 /* Clear the low bit of the address */
+1:
+.if \ic != 0
+ ic \icop, x0
+.endif
+ dc \dcop, x0
+ add x0, x0, x3 /* Move to the next line */
+ subs x1, x1, x3 /* Reduce the size */
+ b.hi 1b /* Check if we are done */
+.if \ic != 0
+ isb
+.endif
+ dsb ish
+ ret
+.endm
+
+ENTRY(arm64_nullop)
+ ret
+END(arm64_nullop)
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ */
+
+ENTRY(arm64_setttb)
+ dsb ish
+ msr ttbr0_el1, x0
+ dsb ish
+ isb
+ ret
+END(arm64_setttb)
+
+ENTRY(arm64_tlb_flushID)
+#ifdef SMP
+ tlbi vmalle1is
+#else
+ tlbi vmalle1
+#endif
+ dsb ish
+ isb
+ ret
+END(arm64_tlb_flushID)
+
+ENTRY(arm64_tlb_flushID_SE)
+ ldr x1, .Lpage_mask
+ bic x0, x0, x1
+#ifdef SMP
+ tlbi vae1is, x0
+#else
+ tlbi vae1, x0
+#endif
+ dsb ish
+ isb
+ ret
+END(arm64_tlb_flushID_SE)
+
+/*
+ * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wb_range)
+ cache_handle_range dcop = cvac
+END(arm64_dcache_wb_range)
+
+/*
+ * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wbinv_range)
+ cache_handle_range dcop = civac
+END(arm64_dcache_wbinv_range)
+
+/*
+ * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
+ *
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(arm64_dcache_inv_range)
+ cache_handle_range dcop = ivac
+END(arm64_dcache_inv_range)
+
+/*
+ * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_idcache_wbinv_range)
+ cache_handle_range dcop = civac, ic = 1, icop = ivau
+END(arm64_idcache_wbinv_range)
+
+/*
+ * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_icache_sync_range)
+ cache_handle_range dcop = cvac, ic = 1, icop = ivau
+END(arm64_icache_sync_range)
struct kva_md_info kmi;
+int64_t dcache_line_size; /* The minimum D cache line size */
+int64_t icache_line_size; /* The minimum I cache line size */
+int64_t idcache_line_size; /* The minimum cache line size */
+
static void
cpu_startup(void *dummy)
{
}
#endif
+static void
+cache_setup(void)
+{
+ int dcache_line_shift, icache_line_shift;
+ uint32_t ctr_el0;
+
+ ctr_el0 = READ_SPECIALREG(ctr_el0);
+
+ /* Read the log2 words in each D cache line */
+ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
+ /* Get the D cache line size */
+ dcache_line_size = sizeof(int) << dcache_line_shift;
+
+ /* And the same for the I cache */
+ icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
+ icache_line_size = sizeof(int) << icache_line_shift;
+
+ idcache_line_size = MIN(dcache_line_size, icache_line_size);
+}
+
void
initarm(struct arm64_bootparams *abp)
{
/* Do basic tuning, hz etc */
init_param1();
+ cache_setup();
+
/* Bootstrap enough of pmap to enter the kernel proper */
pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
lastaddr - KERNBASE);
return (mpidr);
}
+#define cpu_nullop() arm64_nullop()
+#define cpu_setttb(a) arm64_setttb(a)
+
+#define cpu_tlb_flushID() arm64_tlb_flushID()
+#define cpu_tlb_flushID_SE(e) arm64_tlb_flushID_SE(e)
+
+#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
+
+#define cpu_idcache_wbinv_range(a, s) arm64_idcache_wbinv_range((a), (s))
+#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
+
+void arm64_nullop(void);
+void arm64_setttb(vm_offset_t);
+void arm64_tlb_flushID(void);
+void arm64_tlb_flushID_SE(vm_offset_t);
+void arm64_icache_sync_range(vm_offset_t, vm_size_t);
+void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
+
#endif /* _KERNEL */
#endif /* _MACHINE_CPUFUNC_H_ */