]> xenbits.xensource.com Git - people/julieng/freebsd.git/commitdiff
Initial cache handling functions
authorAndrew Turner <andrew@fubar.geek.nz>
Thu, 5 Mar 2015 11:17:09 +0000 (11:17 +0000)
committerAndrew Turner <andrew@fubar.geek.nz>
Wed, 18 Mar 2015 15:07:23 +0000 (15:07 +0000)
This adds the functions to write back and invalidate the data,
instruction, and unified caches. Further work is needed to call these
functions in pmap to flush the cache when mappings change.

These were written by Robin Randhawa. I have merged common code and
tested them on the Fixed Virtual Platform modeling the cache.

sys/arm64/arm64/cpufunc_asm.S [new file with mode: 0644]
sys/arm64/arm64/machdep.c
sys/arm64/include/armreg.h
sys/arm64/include/cpufunc.h
sys/conf/files.arm64

diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S
new file mode 100644 (file)
index 0000000..9b17c21
--- /dev/null
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2014 Robin Randhawa
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/param.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * FIXME:
+ * Need big.LITTLE awareness at some point.
+ * Using arm64_p[id]cache_line_size may not be the best option.
+ * Need better SMP awareness.
+ */
+       .text
+       .align  2
+
+.Lpage_mask:
+       .word   PAGE_MASK
+
+/*
+ * Macro to handle the cache. This takes the start address in x0, length
+ * in x1. It will corrupt x0, x1, x2, and x3.
+ */
+.macro cache_handle_range dcop = 0, ic = 0, icop = 0
+.if \ic == 0
+       ldr     x3, =dcache_line_size   /* Load the D cache line size */
+.else
+       ldr     x3, =idcache_line_size  /* Load the I & D cache line size */
+.endif
+       ldr     x3, [x3]
+       sub     x4, x3, #1              /* Get the address mask */
+       and     x2, x0, x4              /* Get the low bits of the address */
+       add     x1, x1, x2              /* Add these to the size */
+       bic     x0, x0, x4              /* Clear the low bit of the address */
+1:
+.if \ic != 0
+       ic      \icop, x0
+.endif
+       dc      \dcop, x0
+       add     x0, x0, x3              /* Move to the next line */
+       subs    x1, x1, x3              /* Reduce the size */
+       b.hi    1b                      /* Check if we are done */
+.if \ic != 0
+       isb
+.endif
+       dsb     ish
+       ret
+.endm
+
+ENTRY(arm64_nullop)
+       ret
+END(arm64_nullop)
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ */
+
+ENTRY(arm64_setttb)
+       dsb     ish
+       msr     ttbr0_el1, x0
+       dsb     ish
+       isb
+       ret
+END(arm64_setttb)
+
+ENTRY(arm64_tlb_flushID)
+#ifdef SMP
+       tlbi    vmalle1is
+#else
+       tlbi    vmalle1
+#endif
+       dsb     ish
+       isb
+       ret
+END(arm64_tlb_flushID)
+
+ENTRY(arm64_tlb_flushID_SE)
+       ldr     x1, .Lpage_mask
+       bic     x0, x0, x1
+#ifdef SMP
+       tlbi    vae1is, x0
+#else
+       tlbi    vae1, x0
+#endif
+       dsb     ish
+       isb
+       ret
+END(arm64_tlb_flushID_SE)
+
+/*
+ * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wb_range)
+       cache_handle_range      dcop = cvac
+END(arm64_dcache_wb_range)
+
+/*
+ * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wbinv_range)
+       cache_handle_range      dcop = civac
+END(arm64_dcache_wbinv_range)
+
+/*
+ * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
+ *
+ * Note, we must not invalidate everything.  If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(arm64_dcache_inv_range)
+       cache_handle_range      dcop = ivac
+END(arm64_dcache_inv_range)
+
+/*
+ * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_idcache_wbinv_range)
+       cache_handle_range      dcop = civac, ic = 1, icop = ivau
+END(arm64_idcache_wbinv_range)
+
+/*
+ * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_icache_sync_range)
+       cache_handle_range      dcop = cvac, ic = 1, icop = ivau
+END(arm64_icache_sync_range)
index 1f556afdc7c73e1563b86526ed7bb62949f827d7..d613b6a8925cc009a07f92305c96af772859814c 100644 (file)
@@ -102,6 +102,10 @@ u_int physmap_idx;
 
 struct kva_md_info kmi;
 
+int64_t dcache_line_size;      /* The minimum D cache line size */
+int64_t icache_line_size;      /* The minimum I cache line size */
+int64_t idcache_line_size;     /* The minimum cache line size */
+
 static void
 cpu_startup(void *dummy)
 {
@@ -772,6 +776,26 @@ try_load_dtb(caddr_t kmdp)
 }
 #endif
 
+static void
+cache_setup(void)
+{
+       int dcache_line_shift, icache_line_shift;
+       uint32_t ctr_el0;
+
+       ctr_el0 = READ_SPECIALREG(ctr_el0);
+
+       /* Read the log2 words in each D cache line */
+       dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
+       /* Get the D cache line size */
+       dcache_line_size = sizeof(int) << dcache_line_shift;
+
+       /* And the same for the I cache */
+       icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
+       icache_line_size = sizeof(int) << icache_line_shift;
+
+       idcache_line_size = MIN(dcache_line_size, icache_line_size);
+}
+
 void
 initarm(struct arm64_bootparams *abp)
 {
@@ -833,6 +857,8 @@ initarm(struct arm64_bootparams *abp)
        /* Do basic tuning, hz etc */
        init_param1();
 
+       cache_setup();
+
        /* Bootstrap enough of pmap  to enter the kernel proper */
        pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
            lastaddr - KERNBASE);
index e55e6c50c63bcf649287804462ade8af866adb0e..8f847c7094d095036991e629ef658a798547ec2b 100644 (file)
@@ -53,6 +53,9 @@
 #define        CTR_DLINE_SHIFT         16
 #define        CTR_DLINE_MASK          (0xf << CTR_DLINE_SHIFT)
 #define        CTR_DLINE_SIZE(reg)     (((reg) & CTR_DLINE_MASK) >> CTR_DLINE_SHIFT)
+#define        CTR_ILINE_SHIFT         0
+#define        CTR_ILINE_MASK          (0xf << CTR_ILINE_SHIFT)
+#define        CTR_ILINE_SIZE(reg)     (((reg) & CTR_ILINE_MASK) >> CTR_ILINE_SHIFT)
 
 /* ESR_ELx */
 #define        ESR_ELx_ISS_MASK        0x00ffffff
index 11c8234b19aae9aa9349c234cc376c629e628fbf..08d53553cbd3a637361074e00b795f2325bd649a 100644 (file)
@@ -108,5 +108,28 @@ get_mpidr(void)
        return (mpidr);
 }
 
+#define        cpu_nullop()                    arm64_nullop()
+#define        cpu_setttb(a)                   arm64_setttb(a)
+
+#define        cpu_tlb_flushID()               arm64_tlb_flushID()
+#define        cpu_tlb_flushID_SE(e)           arm64_tlb_flushID_SE(e)
+
+#define        cpu_dcache_wbinv_range(a, s)    arm64_dcache_wbinv_range((a), (s))
+#define        cpu_dcache_inv_range(a, s)      arm64_dcache_inv_range((a), (s))
+#define        cpu_dcache_wb_range(a, s)       arm64_dcache_wb_range((a), (s))
+
+#define        cpu_idcache_wbinv_range(a, s)   arm64_idcache_wbinv_range((a), (s))
+#define        cpu_icache_sync_range(a, s)     arm64_icache_sync_range((a), (s))
+
+void arm64_nullop(void);
+void arm64_setttb(vm_offset_t);
+void arm64_tlb_flushID(void);
+void arm64_tlb_flushID_SE(vm_offset_t);
+void arm64_icache_sync_range(vm_offset_t, vm_size_t);
+void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
+
 #endif /* _KERNEL */
 #endif /* _MACHINE_CPUFUNC_H_ */
index 2b2df49ff4bde790f7add56ec00195589a500481..1a0bec0c7512a56dd7c9dca732fd7b46a7700411 100644 (file)
@@ -11,6 +11,7 @@ arm64/arm64/busdma_machdep.c  standard
 arm64/arm64/clock.c            standard
 arm64/arm64/copyinout.S                standard
 arm64/arm64/copystr.c          standard
+arm64/arm64/cpufunc_asm.S      standard
 arm64/arm64/db_disasm.c                optional        ddb
 arm64/arm64/db_interface.c     optional        ddb
 arm64/arm64/db_trace.c         optional        ddb