This patch moves all assembly to arch/aarch64.
Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
DEFINES += -DUART_BASE=$(UART_BASE)
OFILES =
+ARCH_SRC := arch/aarch64/
if PSCI
BOOTMETHOD := psci.o
CPPFLAGS += $(INITRD_FLAGS)
-OFILES += boot.o cache.o $(GIC) mmu.o ns.o $(BOOTMETHOD)
+OFILES += $(addprefix $(ARCH_SRC),boot.o cache.o $(GIC) mmu.o ns.o $(BOOTMETHOD))
all: $(IMAGE)
--- /dev/null
+/*
+ * arch/aarch64/boot.S - simple register setup code for stand-alone Linux booting
+ *
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#include "common.S"
+
+ .section .init
+
+ .globl _start
+_start:
+ /*
+ * EL3 initialisation
+ */
+ mrs x0, CurrentEL
+ cmp x0, #CURRENTEL_EL3
+ b.ne start_no_el3 // skip EL3 initialisation
+
+ mov x0, #0x30 // RES1
+ orr x0, x0, #(1 << 0) // Non-secure EL1
+ orr x0, x0, #(1 << 8) // HVC enable
+ orr x0, x0, #(1 << 10) // 64-bit EL2
+ msr scr_el3, x0
+
+ msr cptr_el3, xzr // Disable copro. traps to EL3
+
+ ldr x0, =CNTFRQ
+ msr cntfrq_el0, x0
+
+ bl gic_secure_init
+
+ b start_el3
+
+ .ltorg
--- /dev/null
+/*
+ * arch/aarch64/cache.S - simple cache clean+invalidate code for stand-alone Linux booting
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+ .text
+
+ .globl flush_caches
+
+flush_caches:
+ mrs x0, clidr_el1
+
+ /* find what out the max cache level to flush */
+ lsr x1, x0, #24
+ and x1, x1, #(0x7)
+ cbz x1, dcaches_done
+
+ mov x2, #0 /* level 1 (represented 1-off) */
+
+1: cmp x2, x1 /* gone over all levels */
+ b.eq dcaches_done
+
+ /* find out if we have a cache at this level */
+ add x3, x2, x2, lsl 1 /* amount to shift for CtypeN */
+ lsr x4, x0, x3
+ and x4, x4, #0x7
+
+ cmp x4, #1
+ b.eq 5f /* no dcache at this level */
+
+ lsl x3, x2, #1
+ msr csselr_el1, x3
+ isb
+ mrs x3, ccsidr_el1
+ and x4, x3, #0x7
+ add x4, x4, #4 /* log2 line size, corrected for offset */
+ ubfx x6, x3, #3, #10 /* max way index */
+ clz w5, w6 /* 32 - log2 ways */
+ ubfx x7, x3, #13, #15 /* sets */
+
+ /* loop over ways */
+2: mov x8, x7 /* temporary (sets) */
+
+ /* loop over sets */
+ /* build the set/way command */
+3: lsl x9, x2, #1 /* cache level (-1) */
+ lsl x10, x6, x5 /* way << shift */
+ orr x9, x9, x10
+ lsl x10, x8, x4 /* set << line size */
+ orr x9, x9, x10
+
+ dc cisw, x9
+ dsb sy
+
+ cbz x8, 4f
+ sub x8, x8, #1
+ b 3b
+
+4: /* completed all sets for this way */
+ cbz x6, 5f
+ sub x6, x6, #1
+ b 2b
+
+5: /* finished this level, try the next */
+ dsb sy
+ add x2, x2, #1
+ b 1b
+
+dcaches_done:
+
+ dsb sy
+ ic iallu
+ dsb sy
+ isb
+ ret
+
+ .ltorg
--- /dev/null
+/*
+ * arch/aarch64/common.S - common definitions useful for boot code
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#define MPIDR_ID_BITS (0xff00ffffff)
+
+#define CURRENTEL_EL3 (3 << 2)
+
+/*
+ * RES1 bits, little-endian, caches and MMU off, no alignment checking,
+ * no WXN.
+ */
+#define SCTLR_EL2_RESET (3 << 28 | 3 << 22 | 1 << 18 | 1 << 16 | 1 << 11 | 3 << 4)
+
+#define SPSR_A (1 << 8) /* System Error masked */
+#define SPSR_D (1 << 9) /* Debug masked */
+#define SPSR_I (1 << 7) /* IRQ masked */
+#define SPSR_F (1 << 6) /* FIQ masked */
+#define SPSR_EL2H (9 << 0) /* EL2 Handler mode */
+
+#define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H)
+
+ /*
+ * Drop EL to that specified by the spsr value in register mode, at
+ * the address specified in register addr.
+ */
+ .macro drop_el mode addr
+ msr elr_el3, \addr
+ msr spsr_el3, \mode
+ eret
+ .endm
--- /dev/null
+/*
+ * arch/aarch64/gic-v3.S - Secure gicv3 initialisation for stand-alone Linux booting
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#include "common.S"
+
+ .text
+
+ .global gic_secure_init
+
+gic_secure_init:
+ /*
+ * If GICv3 is not available, skip initialisation. The OS will probably
+ * fail with a warning, but this should be easier to debug than a
+ * failure within the boot wrapper.
+ */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #24, #4
+ cmp x0, #1
+ b.ne skip_gicv3
+
+ /*
+ * Only the primary CPU setups the (re)distributors.
+ */
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ tst x0, x1
+ b.ne setup_cpu_if // secondary CPU
+
+ ldr x1, =GIC_DIST_BASE // GICD_CTLR
+ mov w0, #7 // EnableGrp0 | EnableGrp1ns | EnableGrp1s
+ orr w0, w0, #(3 << 4) // ARE_S | ARE_NS
+ str w0, [x1]
+
+ ldr x2, =GIC_RDIST_BASE
+
+ mvn w5, wzr
+
+next_rdist:
+ movn w6, #(1 << 1) // ProcessorSleep
+ ldr w4, [x2, #0x014] // GICR_WAKER
+ and w4, w4, w6 // Clear ProcessorSleep
+ str w4, [x2, #0x014] // GICR_WAKER
+ dsb st
+ isb
+
+1: ldr w4, [x2, #0x014] // GICR_WAKER
+ ands wzr, w4, #(1 << 2) // Test ChildrenAsleep
+ b.ne 1b
+
+ add x3, x2, #(1 << 16) // SGI_base
+
+ str w5, [x3, #0x80] // GICR_IGROUP0
+ str wzr, [x3, #0xD00] // GICR_IGRPMOD0
+
+ ldr w4, [x2, #8] // GICR_TYPER
+ add x3, x3, #(1 << 16) // Next redist
+ tbz w4, #1, 2f // if VLPIS is set,
+ add x3, x3, #(2 << 16) // it is two page further away
+2: mov x2, x3
+ tbz w4, #4, next_rdist
+
+ ldr w2, [x1, #4] // GICD_TYPER
+ and w2, w2, #0x1f // ITLinesNumber
+ cbz w2, setup_cpu_if
+
+ add x3, x1, #0x84 // GICD_IGROUP1
+ add x4, x1, #0xD04 // GICD_IGRPMOD1
+
+1: str w5, [x3], #4
+ str wzr, [x4], #4
+ sub w2, w2, #1
+ cbnz w2, 1b
+
+setup_cpu_if:
+
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+
+ // Enable SRE at EL3 and ICC_SRE_EL2 access
+ mov x0, #((1 << 3) | (1 << 0)) // Enable | SRE
+ mrs x1, ICC_SRE_EL3
+ orr x1, x1, x0
+ msr ICC_SRE_EL3, x1
+ isb
+
+ // Configure CPU interface
+ msr ICC_CTLR_EL3, xzr
+ isb
+
+skip_gicv3:
+ ret
--- /dev/null
+/*
+ * arch/aarch64/gic.S - Secure gic initialisation for stand-alone Linux booting
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#include "common.S"
+
+ .text
+
+ .global gic_secure_init
+
+gic_secure_init:
+ /*
+ * Check for the primary CPU to avoid a race on the distributor
+ * registers.
+ */
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ tst x0, x1
+ b.ne 1f // secondary CPU
+
+ ldr x1, =GIC_DIST_BASE // GICD_CTLR
+ mov w0, #3 // EnableGrp0 | EnableGrp1
+ str w0, [x1]
+
+1: ldr x1, =GIC_DIST_BASE + 0x80 // GICD_IGROUPR
+ mov w0, #~0 // Grp1 interrupts
+ str w0, [x1]
+ b.ne 2f // Only local interrupts for secondary CPUs
+ ldr x2, =GIC_DIST_BASE + 0x04 // GICD_TYPER
+ ldr w3, [x2]
+ ands w3, w3, #0x1f // ITLinesNumber
+ b.eq 2f
+1: str w0, [x1, #4]!
+ subs w3, w3, #1
+ b.ne 1b
+
+2: ldr x1, =GIC_CPU_BASE // GICC_CTLR
+ mov w0, #3 // EnableGrp0 | EnableGrp1
+ str w0, [x1]
+
+ mov w0, #1 << 7 // allow NS access to GICC_PMR
+ str w0, [x1, #4] // GICC_PMR
+
+ ret
--- /dev/null
+/*
+ * arch/aarch64/mmu.S - EL3 MMU identity map code to enable the use of exclusives.
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#define ATTR_UPPER_XN (1 << 54) /* Non-Executable */
+#define ATTR_UPPER_PXN (1 << 53) /* Non-Executable */
+
+#define MEM_ATTR_UPPER 0
+#define DEV_ATTR_UPPER (ATTR_UPPER_XN | ATTR_UPPER_PXN)
+
+#define ATTR_LOWER_AF (1 << 10) /* Don't trap accesses */
+#define ATTR_LOWER_SH_IS (3 << 8) /* Inner shareable */
+#define ATTR_LOWER_SH_NS (0 << 8) /* Inner shareable */
+
+#define ATTR_LOWER_AP_RW_ANY (1 << 6) /* Writeable at any privilege level */
+
+#define ATTR_LOWER_NS (1 << 5) /* non-secure PA */
+#define ATTR_LOWER_ATTRINDEX(n) ((n) << 2) /* MAIR_EL3 attrN */
+
+#define MEM_ATTR_LOWER (ATTR_LOWER_AF | ATTR_LOWER_SH_IS | \
+ ATTR_LOWER_NS | ATTR_LOWER_ATTRINDEX(0))
+
+#define DEV_ATTR_LOWER (ATTR_LOWER_AF | ATTR_LOWER_SH_NS | \
+ ATTR_LOWER_NS | ATTR_LOWER_ATTRINDEX(1))
+
+#define BLOCK_VALID (1 << 0) /* Valid block entry */
+
+/*
+ * the top 10 bits of PA [39:30]
+ */
+#define BLOCK_1GB_PA(_pa) ((_pa) & (0x3ff << 30))
+
+#define BLOCK_MEM_1GB(_pa) (MEM_ATTR_UPPER | BLOCK_1GB_PA(_pa) | \
+ MEM_ATTR_LOWER | BLOCK_VALID)
+
+#define BLOCK_DEV_1GB(_pa) (DEV_ATTR_UPPER | BLOCK_1GB_PA(_pa) | \
+ DEV_ATTR_LOWER | BLOCK_VALID)
+
+ .section .pgtables, "w"
+
+#define BLOCK_INVALID (0 << 0)
+
+ /*
+ * 1st level page table.
+ * 512 entries, each covering 1GB.
+ */
+ .align 12
+pgtable_l1:
+ .quad BLOCK_DEV_1GB(0x00000000)
+ .quad BLOCK_INVALID
+ .quad BLOCK_MEM_1GB(0x80000000)
+ .quad BLOCK_MEM_1GB(0xC0000000)
+ .rept 30
+ .quad BLOCK_INVALID
+ .endr
+ .quad BLOCK_MEM_1GB(0x880000000)
+ .quad BLOCK_MEM_1GB(0x8C0000000)
+ .rept (512-36)
+ .quad BLOCK_INVALID
+ .endr
+
+/*
+ * attr0: Normal memory, outer non-cacheable, inner write-through non-transient
+ * attrN: device-nGnRnE
+ */
+#define MAIR_ATTR 0x48
+
+#define TCR_RES1 ((1 << 31) | (1 << 23))
+#define TCR_PS (2 << 16) /* 40 bits */
+#define TCR_TG0 (0 << 14) /* 4KB */
+#define TCR_SH0 (3 << 12) /* inner shareable */
+#define TCR_ORGN0 (0 << 10) /* normal outer non-cacheable */
+#define TCR_IRGN0 (2 << 8) /* normal inner write-through */
+#define TCR_T0SZ (25 << 0) /* 2^39 bits (2^(64-25)) */
+
+#define TCR_VAL (TCR_RES1 | TCR_PS | TCR_TG0 | TCR_SH0 | TCR_ORGN0 | TCR_IRGN0 | TCR_T0SZ)
+
+#define SCTLR_RES1 ((3 << 28) | (3 << 22) | (1 << 18) | (1 << 16) | (1 << 11) | (3 << 4))
+#define SCTLR_EE (0 << 25) /* little endian */
+#define SCTLR_WXN (0 << 19) /* regions with write permission not forced to XN */
+#define SCTLR_I (0 << 12) /* Disable I cache */
+#define SCTLR_SA (0 << 3) /* No stack alignment checking */
+#define SCTLR_C (0 << 2) /* Disable caches */
+#define SCTLR_A (0 << 1) /* No alignment checking */
+#define SCTLR_M (1 << 0) /* enable MMU */
+
+#define SCTLR_VAL (SCTLR_RES1 | SCTLR_EE | SCTLR_WXN | SCTLR_I | \
+ SCTLR_SA | SCTLR_C | SCTLR_A | SCTLR_M)
+
+ .text
+
+ .globl switch_to_idmap
+ .globl switch_to_physmap
+
+switch_to_idmap:
+
+ mov x28, x30
+
+ /*
+ * We assume that the d-caches are invalid at power-on, and hence do
+ * not need to be invalidated. However the icache(s) and TLBs may still
+ * be filled with garbage.
+ */
+ ic iallu
+ tlbi alle3
+ dsb sy
+ isb
+
+ ldr x0, =pgtable_l1
+ msr ttbr0_el3, x0
+
+ ldr x0, =MAIR_ATTR
+ msr mair_el3, x0
+
+ ldr x0, =TCR_VAL
+ msr tcr_el3, x0
+
+ isb
+
+ ldr x0, =SCTLR_VAL
+ msr sctlr_el3, x0
+
+ isb
+
+ /* Identity map now active, branch back to phys/virt address */
+ ret x28
+
+switch_to_physmap:
+ mov x28, x30
+
+ mrs x0, sctlr_el3
+ mov x1, #(SCTLR_M | SCTLR_C)
+ bic x0, x0, x1
+ msr sctlr_el3, x0
+
+ isb
+
+ bl flush_caches
+
+ ret x28
+
--- /dev/null
+/*
+ * arch/aarch64/ns.S - code to initialise everything required when first booting non-secure.
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#define PL011_UARTIBRD 0x24
+#define PL011_UARTFBRD 0x28
+#define PL011_UART_LCR_H 0x2c
+#define PL011_UARTCR 0x30
+
+#define V2M_SYS_CFGDATA 0xa0
+#define V2M_SYS_CFGCTRL 0xa4
+
+ .text
+ .globl ns_init_system
+
+ns_init_system:
+ /*
+ * UART initialisation (38400 8N1)
+ */
+ ldr x4, =UART_BASE
+ mov w5, #0x10
+ str w5, [x4, #PL011_UARTIBRD]
+ str wzr, [x4, #PL011_UARTFBRD]
+ /* set parameters to 8N1 and enable the FIFOs */
+ mov w5, #0x70
+ str w5, [x4, #PL011_UART_LCR_H]
+ /* enable the UART, TXen and RXen */
+ mov w5, #0x301
+ str w5, [x4, #PL011_UARTCR]
+
+ /*
+ * CLCD output site MB
+ */
+ ldr x4, =SYSREGS_BASE
+ ldr w5, =(1 << 31) | (1 << 30) | (7 << 20) | (0 << 16) // START|WRITE|MUXFPGA|SITE_MB
+ str wzr, [x4, #V2M_SYS_CFGDATA]
+ str w5, [x4, #V2M_SYS_CFGCTRL]
+
+ ret
+
+ .ltorg
--- /dev/null
+/*
+ * arch/aarch64/psci.S - basic PSCI implementation
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+#include "common.S"
+
+#define PSCI_CPU_OFF 0x84000002
+#define PSCI_CPU_ON 0xc4000003
+
+#define PSCI_RET_SUCCESS 0
+#define PSCI_RET_NOT_IMPL (-1)
+#define PSCI_RET_INVALID (-2)
+#define PSCI_RET_DENIED (-3)
+
+#ifndef CPU_IDS
+#error No CPU MPIDRs provided.
+#endif
+
+#define MPIDR_INVALID (-1)
+#define ADDR_INVALID (-1)
+
+ .macro ventry label
+ .align 7
+ b \label
+ .endm
+
+ .section .vectors, "w"
+
+ .align 11
+vector:
+ // current EL, SP_EL0
+ ventry err_exception // synchronous
+ ventry err_exception // IRQ
+ ventry err_exception // FIQ
+ ventry err_exception // SError
+
+ // current EL, SP_ELx
+ ventry err_exception
+ ventry err_exception
+ ventry err_exception
+ ventry err_exception
+
+ // lower EL, AArch64
+ ventry psci_call64
+ ventry err_exception
+ ventry err_exception
+ ventry err_exception
+
+ // lower EL, AArch32
+ ventry psci_call32
+ ventry err_exception
+ ventry err_exception
+ ventry err_exception
+
+ .data
+ /*
+ * Array of the CPU ID (MPIDR & MPIDR_ID_BITS) of each CPU in the system.
+ * The index into the array is used as a logical id, and an index into
+ * the branch table. The branch table is automatically padded to the
+ * same size as the id table.
+ *
+ * The first CPU in the table is considered to be the primary CPU, and
+ * is the only CPU to immediately branch off to the kernel.
+ */
+ .align 3
+id_table:
+ .quad CPU_IDS
+__id_end:
+ .quad MPIDR_INVALID
+
+.equ nr_cpus, ((__id_end - id_table) / 8)
+
+branch_table:
+ .rept (nr_cpus)
+ .quad ADDR_INVALID
+ .endr
+
+ .text
+
+ .globl start_no_el3
+ .globl start_el3
+
+err_exception:
+ b err_exception
+
+psci_call32:
+ mov w0, PSCI_RET_NOT_IMPL
+ eret
+
+psci_call64:
+ ldr x7, =PSCI_CPU_OFF
+ cmp x0, x7
+ b.eq psci_cpu_off
+
+ ldr x7, =PSCI_CPU_ON
+ cmp x0, x7
+ b.eq psci_cpu_on
+
+ mov x0, PSCI_RET_NOT_IMPL
+ eret
+
+/*
+ * x1 - optional power state parameter, ignored here
+ */
+psci_cpu_off:
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ and x0, x0, x1
+ bl find_logical_id
+ adr x1, branch_table
+ mov x2, #ADDR_INVALID
+ str x2, [x1, x0, lsl #3]
+
+ b spin
+
+/*
+ * x1 - target cpu
+ * x2 - address
+ */
+psci_cpu_on:
+ mov x15, x30
+ mov x14, x2
+ mov x0, x1
+
+ bl find_logical_id
+ cmp x0, #-1
+ b.eq 1f
+
+ adr x3, branch_table
+ add x3, x3, x0, lsl #3
+
+ ldr x4, =ADDR_INVALID
+
+ ldxr x5, [x3]
+ cmp x4, x5
+ b.ne 1f
+
+ stxr w4, x14, [x3]
+ cbnz w4, 1f
+
+ dsb ishst
+ sev
+
+ mov x0, #PSCI_RET_SUCCESS
+ mov x30, x15
+ eret
+
+1: mov x0, #PSCI_RET_DENIED
+ mov x30, x15
+ eret
+
+
+/*
+ * Takes masked MPIDR in x0, returns logical id in x0
+ * Returns -1 for unknown MPIDRs
+ * Clobbers x1, x2, x3
+ */
+find_logical_id:
+__find_logical_index:
+ adr x2, id_table
+ mov x1, xzr
+1: mov x3, #nr_cpus // check we haven't walked off the end of the array
+ cmp x1, x3
+ b.gt 3f
+ ldr x3, [x2, x1, lsl #3]
+ cmp x3, x0
+ b.eq 2f
+ add x1, x1, #1
+ b 1b
+2: mov x0, x1
+ ret
+3: mov x0, #-1
+ ret
+
+setup_vector:
+ ldr x0, =vector
+ msr VBAR_EL3, x0
+ isb
+ ret
+
+start_el3:
+ bl setup_vector
+ bl switch_to_idmap
+
+ /* only boot the primary cpu (entry 0 in the table) */
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ and x0, x0, x1
+ bl find_logical_id
+ cbnz x0, spin
+
+ adr x2, branch_table
+ adr x1, start_cpu0
+ str x1, [x2]
+ sevl
+ b spin
+
+/*
+ * Poll the release table, waiting for a valid address to appear.
+ * When a valid address appears, branch to it.
+ */
+spin:
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ and x0, x0, x1
+ bl find_logical_id
+ cmp x0, #-1
+ b.eq spin_dead
+
+ adr x1, branch_table
+ mov x3, #ADDR_INVALID
+
+ add x1, x1, x0, lsl #3
+
+1: wfe
+ ldr x2, [x1]
+ cmp x2, x3
+ b.eq 1b
+
+ ldr x0, =SCTLR_EL2_RESET
+ msr sctlr_el2, x0
+
+ mov x3, #SPSR_KERNEL
+ adr x4, el2_trampoline
+ mov x0, x2
+ drop_el x3, x4
+
+/*
+ * This PSCI implementation requires EL3. Without EL3 we'll only boot the
+ * primary cpu, all others will be trapped in an infinite loop.
+ */
+start_no_el3:
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_ID_BITS
+ and x0, x0, x1
+ bl find_logical_id
+ cbz x0, start_cpu0
+spin_dead:
+ wfe
+ b spin_dead
+
+
+/*
+ * Clean and invalidate the caches at EL2 to simplify EL3's cache usage.
+ */
+el2_trampoline:
+ mov x15, x0
+ bl flush_caches
+ br x15
+
+start_cpu0:
+ /*
+ * Kernel parameters
+ */
+ mov x0, xzr
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+
+ bl ns_init_system
+ ldr x0, =dtb
+ b kernel
--- /dev/null
+/*
+ * arch/aarch64/spin.S - spin-table boot protocol implementation
+ *
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#include "common.S"
+
+ .text
+
+ .globl start_no_el3
+ .globl start_el3
+
+start_el3:
+ /*
+ * Prepare the switch to the EL2_SP1 mode from EL3
+ */
+ ldr x0, =SCTLR_EL2_RESET
+ msr sctlr_el2, x0
+ ldr x0, =start_no_el3 // Return after mode switch
+ mov x1, #SPSR_KERNEL
+ drop_el x1, x0
+
+start_no_el3:
+ /*
+ * Kernel parameters
+ */
+ mov x0, xzr
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+
+ mrs x4, mpidr_el1
+ ldr x5, =MPIDR_ID_BITS
+ tst x4, x5
+ b.eq 2f
+
+ /*
+ * Secondary CPUs
+ */
+1: wfe
+ ldr x4, mbox
+ cbz x4, 1b
+ br x4 // branch to the given address
+
+2:
+ /*
+ * Primary CPU
+ */
+ bl ns_init_system
+ ldr x0, =dtb // device tree blob
+ b kernel
+
+ .ltorg
+++ /dev/null
-/*
- * boot.S - simple register setup code for stand-alone Linux booting
- *
- * Copyright (C) 2012 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#include "common.S"
-
- .section .init
-
- .globl _start
-_start:
- /*
- * EL3 initialisation
- */
- mrs x0, CurrentEL
- cmp x0, #CURRENTEL_EL3
- b.ne start_no_el3 // skip EL3 initialisation
-
- mov x0, #0x30 // RES1
- orr x0, x0, #(1 << 0) // Non-secure EL1
- orr x0, x0, #(1 << 8) // HVC enable
- orr x0, x0, #(1 << 10) // 64-bit EL2
- msr scr_el3, x0
-
- msr cptr_el3, xzr // Disable copro. traps to EL3
-
- ldr x0, =CNTFRQ
- msr cntfrq_el0, x0
-
- bl gic_secure_init
-
- b start_el3
-
- .ltorg
+++ /dev/null
-/*
- * cache.S - simple cache clean+invalidate code for stand-alone Linux booting
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
- .text
-
- .globl flush_caches
-
-flush_caches:
- mrs x0, clidr_el1
-
- /* find what out the max cache level to flush */
- lsr x1, x0, #24
- and x1, x1, #(0x7)
- cbz x1, dcaches_done
-
- mov x2, #0 /* level 1 (represented 1-off) */
-
-1: cmp x2, x1 /* gone over all levels */
- b.eq dcaches_done
-
- /* find out if we have a cache at this level */
- add x3, x2, x2, lsl 1 /* amount to shift for CtypeN */
- lsr x4, x0, x3
- and x4, x4, #0x7
-
- cmp x4, #1
- b.eq 5f /* no dcache at this level */
-
- lsl x3, x2, #1
- msr csselr_el1, x3
- isb
- mrs x3, ccsidr_el1
- and x4, x3, #0x7
- add x4, x4, #4 /* log2 line size, corrected for offset */
- ubfx x6, x3, #3, #10 /* max way index */
- clz w5, w6 /* 32 - log2 ways */
- ubfx x7, x3, #13, #15 /* sets */
-
- /* loop over ways */
-2: mov x8, x7 /* temporary (sets) */
-
- /* loop over sets */
- /* build the set/way command */
-3: lsl x9, x2, #1 /* cache level (-1) */
- lsl x10, x6, x5 /* way << shift */
- orr x9, x9, x10
- lsl x10, x8, x4 /* set << line size */
- orr x9, x9, x10
-
- dc cisw, x9
- dsb sy
-
- cbz x8, 4f
- sub x8, x8, #1
- b 3b
-
-4: /* completed all sets for this way */
- cbz x6, 5f
- sub x6, x6, #1
- b 2b
-
-5: /* finished this level, try the next */
- dsb sy
- add x2, x2, #1
- b 1b
-
-dcaches_done:
-
- dsb sy
- ic iallu
- dsb sy
- isb
- ret
-
- .ltorg
+++ /dev/null
-/*
- * common.S - common definitions useful for boot code
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#define MPIDR_ID_BITS (0xff00ffffff)
-
-#define CURRENTEL_EL3 (3 << 2)
-
-/*
- * RES1 bits, little-endian, caches and MMU off, no alignment checking,
- * no WXN.
- */
-#define SCTLR_EL2_RESET (3 << 28 | 3 << 22 | 1 << 18 | 1 << 16 | 1 << 11 | 3 << 4)
-
-#define SPSR_A (1 << 8) /* System Error masked */
-#define SPSR_D (1 << 9) /* Debug masked */
-#define SPSR_I (1 << 7) /* IRQ masked */
-#define SPSR_F (1 << 6) /* FIQ masked */
-#define SPSR_EL2H (9 << 0) /* EL2 Handler mode */
-
-#define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H)
-
- /*
- * Drop EL to that specified by the spsr value in register mode, at
- * the address specified in register addr.
- */
- .macro drop_el mode addr
- msr elr_el3, \addr
- msr spsr_el3, \mode
- eret
- .endm
+++ /dev/null
-/*
- * gic.S - Secure gic initialisation for stand-alone Linux booting
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#include "common.S"
-
- .text
-
- .global gic_secure_init
-
-gic_secure_init:
- /*
- * If GICv3 is not available, skip initialisation. The OS will probably
- * fail with a warning, but this should be easier to debug than a
- * failure within the boot wrapper.
- */
- mrs x0, id_aa64pfr0_el1
- ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne skip_gicv3
-
- /*
- * Only the primary CPU setups the (re)distributors.
- */
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- tst x0, x1
- b.ne setup_cpu_if // secondary CPU
-
- ldr x1, =GIC_DIST_BASE // GICD_CTLR
- mov w0, #7 // EnableGrp0 | EnableGrp1ns | EnableGrp1s
- orr w0, w0, #(3 << 4) // ARE_S | ARE_NS
- str w0, [x1]
-
- ldr x2, =GIC_RDIST_BASE
-
- mvn w5, wzr
-
-next_rdist:
- movn w6, #(1 << 1) // ProcessorSleep
- ldr w4, [x2, #0x014] // GICR_WAKER
- and w4, w4, w6 // Clear ProcessorSleep
- str w4, [x2, #0x014] // GICR_WAKER
- dsb st
- isb
-
-1: ldr w4, [x2, #0x014] // GICR_WAKER
- ands wzr, w4, #(1 << 2) // Test ChildrenAsleep
- b.ne 1b
-
- add x3, x2, #(1 << 16) // SGI_base
-
- str w5, [x3, #0x80] // GICR_IGROUP0
- str wzr, [x3, #0xD00] // GICR_IGRPMOD0
-
- ldr w4, [x2, #8] // GICR_TYPER
- add x3, x3, #(1 << 16) // Next redist
- tbz w4, #1, 2f // if VLPIS is set,
- add x3, x3, #(2 << 16) // it is two page further away
-2: mov x2, x3
- tbz w4, #4, next_rdist
-
- ldr w2, [x1, #4] // GICD_TYPER
- and w2, w2, #0x1f // ITLinesNumber
- cbz w2, setup_cpu_if
-
- add x3, x1, #0x84 // GICD_IGROUP1
- add x4, x1, #0xD04 // GICD_IGRPMOD1
-
-1: str w5, [x3], #4
- str wzr, [x4], #4
- sub w2, w2, #1
- cbnz w2, 1b
-
-setup_cpu_if:
-
-#define ICC_SRE_EL2 S3_4_C12_C9_5
-#define ICC_SRE_EL3 S3_6_C12_C12_5
-#define ICC_CTLR_EL1 S3_0_C12_C12_4
-#define ICC_CTLR_EL3 S3_6_C12_C12_4
-#define ICC_PMR_EL1 S3_0_C4_C6_0
-
- // Enable SRE at EL3 and ICC_SRE_EL2 access
- mov x0, #((1 << 3) | (1 << 0)) // Enable | SRE
- mrs x1, ICC_SRE_EL3
- orr x1, x1, x0
- msr ICC_SRE_EL3, x1
- isb
-
- // Configure CPU interface
- msr ICC_CTLR_EL3, xzr
- isb
-
-skip_gicv3:
- ret
+++ /dev/null
-/*
- * gic.S - Secure gic initialisation for stand-alone Linux booting
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#include "common.S"
-
- .text
-
- .global gic_secure_init
-
-gic_secure_init:
- /*
- * Check for the primary CPU to avoid a race on the distributor
- * registers.
- */
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- tst x0, x1
- b.ne 1f // secondary CPU
-
- ldr x1, =GIC_DIST_BASE // GICD_CTLR
- mov w0, #3 // EnableGrp0 | EnableGrp1
- str w0, [x1]
-
-1: ldr x1, =GIC_DIST_BASE + 0x80 // GICD_IGROUPR
- mov w0, #~0 // Grp1 interrupts
- str w0, [x1]
- b.ne 2f // Only local interrupts for secondary CPUs
- ldr x2, =GIC_DIST_BASE + 0x04 // GICD_TYPER
- ldr w3, [x2]
- ands w3, w3, #0x1f // ITLinesNumber
- b.eq 2f
-1: str w0, [x1, #4]!
- subs w3, w3, #1
- b.ne 1b
-
-2: ldr x1, =GIC_CPU_BASE // GICC_CTLR
- mov w0, #3 // EnableGrp0 | EnableGrp1
- str w0, [x1]
-
- mov w0, #1 << 7 // allow NS access to GICC_PMR
- str w0, [x1, #4] // GICC_PMR
-
- ret
+++ /dev/null
-/*
- * mmu.S - EL3 MMU identity map code to enable the use of exclusives.
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#define ATTR_UPPER_XN (1 << 54) /* Non-Executable */
-#define ATTR_UPPER_PXN (1 << 53) /* Non-Executable */
-
-#define MEM_ATTR_UPPER 0
-#define DEV_ATTR_UPPER (ATTR_UPPER_XN | ATTR_UPPER_PXN)
-
-#define ATTR_LOWER_AF (1 << 10) /* Don't trap accesses */
-#define ATTR_LOWER_SH_IS (3 << 8) /* Inner shareable */
-#define ATTR_LOWER_SH_NS (0 << 8) /* Inner shareable */
-
-#define ATTR_LOWER_AP_RW_ANY (1 << 6) /* Writeable at any privilege level */
-
-#define ATTR_LOWER_NS (1 << 5) /* non-secure PA */
-#define ATTR_LOWER_ATTRINDEX(n) ((n) << 2) /* MAIR_EL3 attrN */
-
-#define MEM_ATTR_LOWER (ATTR_LOWER_AF | ATTR_LOWER_SH_IS | \
- ATTR_LOWER_NS | ATTR_LOWER_ATTRINDEX(0))
-
-#define DEV_ATTR_LOWER (ATTR_LOWER_AF | ATTR_LOWER_SH_NS | \
- ATTR_LOWER_NS | ATTR_LOWER_ATTRINDEX(1))
-
-#define BLOCK_VALID (1 << 0) /* Valid block entry */
-
-/*
- * the top 10 bits of PA [39:30]
- */
-#define BLOCK_1GB_PA(_pa) ((_pa) & (0x3ff << 30))
-
-#define BLOCK_MEM_1GB(_pa) (MEM_ATTR_UPPER | BLOCK_1GB_PA(_pa) | \
- MEM_ATTR_LOWER | BLOCK_VALID)
-
-#define BLOCK_DEV_1GB(_pa) (DEV_ATTR_UPPER | BLOCK_1GB_PA(_pa) | \
- DEV_ATTR_LOWER | BLOCK_VALID)
-
- .section .pgtables, "w"
-
-#define BLOCK_INVALID (0 << 0)
-
- /*
- * 1st level page table.
- * 512 entries, each covering 1GB.
- */
- .align 12
-pgtable_l1:
- .quad BLOCK_DEV_1GB(0x00000000)
- .quad BLOCK_INVALID
- .quad BLOCK_MEM_1GB(0x80000000)
- .quad BLOCK_MEM_1GB(0xC0000000)
- .rept 30
- .quad BLOCK_INVALID
- .endr
- .quad BLOCK_MEM_1GB(0x880000000)
- .quad BLOCK_MEM_1GB(0x8C0000000)
- .rept (512-36)
- .quad BLOCK_INVALID
- .endr
-
-/*
- * attr0: Normal memory, outer non-cacheable, inner write-through non-transient
- * attrN: device-nGnRnE
- */
-#define MAIR_ATTR 0x48
-
-#define TCR_RES1 ((1 << 31) | (1 << 23))
-#define TCR_PS (2 << 16) /* 40 bits */
-#define TCR_TG0 (0 << 14) /* 4KB */
-#define TCR_SH0 (3 << 12) /* inner shareable */
-#define TCR_ORGN0 (0 << 10) /* normal outer non-cacheable */
-#define TCR_IRGN0 (2 << 8) /* normal inner write-through */
-#define TCR_T0SZ (25 << 0) /* 2^39 bits (2^(64-25)) */
-
-#define TCR_VAL (TCR_RES1 | TCR_PS | TCR_TG0 | TCR_SH0 | TCR_ORGN0 | TCR_IRGN0 | TCR_T0SZ)
-
-#define SCTLR_RES1 ((3 << 28) | (3 << 22) | (1 << 18) | (1 << 16) | (1 << 11) | (3 << 4))
-#define SCTLR_EE (0 << 25) /* little endian */
-#define SCTLR_WXN (0 << 19) /* regions with write permission not forced to XN */
-#define SCTLR_I (0 << 12) /* Disable I cache */
-#define SCTLR_SA (0 << 3) /* No stack alignment checking */
-#define SCTLR_C (0 << 2) /* Disable caches */
-#define SCTLR_A (0 << 1) /* No alignment checking */
-#define SCTLR_M (1 << 0) /* enable MMU */
-
-#define SCTLR_VAL (SCTLR_RES1 | SCTLR_EE | SCTLR_WXN | SCTLR_I | \
- SCTLR_SA | SCTLR_C | SCTLR_A | SCTLR_M)
-
- .text
-
- .globl switch_to_idmap
- .globl switch_to_physmap
-
-switch_to_idmap:
-
- mov x28, x30
-
- /*
- * We assume that the d-caches are invalid at power-on, and hence do
- * not need to be invalidated. However the icache(s) and TLBs may still
- * be filled with garbage.
- */
- ic iallu
- tlbi alle3
- dsb sy
- isb
-
- ldr x0, =pgtable_l1
- msr ttbr0_el3, x0
-
- ldr x0, =MAIR_ATTR
- msr mair_el3, x0
-
- ldr x0, =TCR_VAL
- msr tcr_el3, x0
-
- isb
-
- ldr x0, =SCTLR_VAL
- msr sctlr_el3, x0
-
- isb
-
- /* Identity map now active, branch back to phys/virt address */
- ret x28
-
-switch_to_physmap:
- mov x28, x30
-
- mrs x0, sctlr_el3
- mov x1, #(SCTLR_M | SCTLR_C)
- bic x0, x0, x1
- msr sctlr_el3, x0
-
- isb
-
- bl flush_caches
-
- ret x28
-
+++ /dev/null
-/*
- * ns.S - code to initialise everything required when first booting non-secure.
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#define PL011_UARTIBRD 0x24
-#define PL011_UARTFBRD 0x28
-#define PL011_UART_LCR_H 0x2c
-#define PL011_UARTCR 0x30
-
-#define V2M_SYS_CFGDATA 0xa0
-#define V2M_SYS_CFGCTRL 0xa4
-
- .text
- .globl ns_init_system
-
-ns_init_system:
- /*
- * UART initialisation (38400 8N1)
- */
- ldr x4, =UART_BASE
- mov w5, #0x10
- str w5, [x4, #PL011_UARTIBRD]
- str wzr, [x4, #PL011_UARTFBRD]
- /* set parameters to 8N1 and enable the FIFOs */
- mov w5, #0x70
- str w5, [x4, #PL011_UART_LCR_H]
- /* enable the UART, TXen and RXen */
- mov w5, #0x301
- str w5, [x4, #PL011_UARTCR]
-
- /*
- * CLCD output site MB
- */
- ldr x4, =SYSREGS_BASE
- ldr w5, =(1 << 31) | (1 << 30) | (7 << 20) | (0 << 16) // START|WRITE|MUXFPGA|SITE_MB
- str wzr, [x4, #V2M_SYS_CFGDATA]
- str w5, [x4, #V2M_SYS_CFGCTRL]
-
- ret
-
- .ltorg
+++ /dev/null
-/*
- * psci.S - basic PSCI implementation
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-#include "common.S"
-
-#define PSCI_CPU_OFF 0x84000002
-#define PSCI_CPU_ON 0xc4000003
-
-#define PSCI_RET_SUCCESS 0
-#define PSCI_RET_NOT_IMPL (-1)
-#define PSCI_RET_INVALID (-2)
-#define PSCI_RET_DENIED (-3)
-
-#ifndef CPU_IDS
-#error No CPU MPIDRs provided.
-#endif
-
-#define MPIDR_INVALID (-1)
-#define ADDR_INVALID (-1)
-
- .macro ventry label
- .align 7
- b \label
- .endm
-
- .section .vectors, "w"
-
- .align 11
-vector:
- // current EL, SP_EL0
- ventry err_exception // synchronous
- ventry err_exception // IRQ
- ventry err_exception // FIQ
- ventry err_exception // SError
-
- // current EL, SP_ELx
- ventry err_exception
- ventry err_exception
- ventry err_exception
- ventry err_exception
-
- // lower EL, AArch64
- ventry psci_call64
- ventry err_exception
- ventry err_exception
- ventry err_exception
-
- // lower EL, AArch32
- ventry psci_call32
- ventry err_exception
- ventry err_exception
- ventry err_exception
-
- .data
- /*
- * Array of the CPU ID (MPIDR & MPIDR_ID_BITS) of each CPU in the system.
- * The index into the array is used as a logical id, and an index into
- * the branch table. The branch table is automatically padded to the
- * same size as the id table.
- *
- * The first CPU in the table is considered to be the primary CPU, and
- * is the only CPU to immediately branch off to the kernel.
- */
- .align 3
-id_table:
- .quad CPU_IDS
-__id_end:
- .quad MPIDR_INVALID
-
-.equ nr_cpus, ((__id_end - id_table) / 8)
-
-branch_table:
- .rept (nr_cpus)
- .quad ADDR_INVALID
- .endr
-
- .text
-
- .globl start_no_el3
- .globl start_el3
-
-err_exception:
- b err_exception
-
-psci_call32:
- mov w0, PSCI_RET_NOT_IMPL
- eret
-
-psci_call64:
- ldr x7, =PSCI_CPU_OFF
- cmp x0, x7
- b.eq psci_cpu_off
-
- ldr x7, =PSCI_CPU_ON
- cmp x0, x7
- b.eq psci_cpu_on
-
- mov x0, PSCI_RET_NOT_IMPL
- eret
-
-/*
- * x1 - optional power state parameter, ignored here
- */
-psci_cpu_off:
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- and x0, x0, x1
- bl find_logical_id
- adr x1, branch_table
- mov x2, #ADDR_INVALID
- str x2, [x1, x0, lsl #3]
-
- b spin
-
-/*
- * x1 - target cpu
- * x2 - address
- */
-psci_cpu_on:
- mov x15, x30
- mov x14, x2
- mov x0, x1
-
- bl find_logical_id
- cmp x0, #-1
- b.eq 1f
-
- adr x3, branch_table
- add x3, x3, x0, lsl #3
-
- ldr x4, =ADDR_INVALID
-
- ldxr x5, [x3]
- cmp x4, x5
- b.ne 1f
-
- stxr w4, x14, [x3]
- cbnz w4, 1f
-
- dsb ishst
- sev
-
- mov x0, #PSCI_RET_SUCCESS
- mov x30, x15
- eret
-
-1: mov x0, #PSCI_RET_DENIED
- mov x30, x15
- eret
-
-
-/*
- * Takes masked MPIDR in x0, returns logical id in x0
- * Returns -1 for unknown MPIDRs
- * Clobbers x1, x2, x3
- */
-find_logical_id:
-__find_logical_index:
- adr x2, id_table
- mov x1, xzr
-1: mov x3, #nr_cpus // check we haven't walked off the end of the array
- cmp x1, x3
- b.gt 3f
- ldr x3, [x2, x1, lsl #3]
- cmp x3, x0
- b.eq 2f
- add x1, x1, #1
- b 1b
-2: mov x0, x1
- ret
-3: mov x0, #-1
- ret
-
-setup_vector:
- ldr x0, =vector
- msr VBAR_EL3, x0
- isb
- ret
-
-start_el3:
- bl setup_vector
- bl switch_to_idmap
-
- /* only boot the primary cpu (entry 0 in the table) */
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- and x0, x0, x1
- bl find_logical_id
- cbnz x0, spin
-
- adr x2, branch_table
- adr x1, start_cpu0
- str x1, [x2]
- sevl
- b spin
-
-/*
- * Poll the release table, waiting for a valid address to appear.
- * When a valid address appears, branch to it.
- */
-spin:
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- and x0, x0, x1
- bl find_logical_id
- cmp x0, #-1
- b.eq spin_dead
-
- adr x1, branch_table
- mov x3, #ADDR_INVALID
-
- add x1, x1, x0, lsl #3
-
-1: wfe
- ldr x2, [x1]
- cmp x2, x3
- b.eq 1b
-
- ldr x0, =SCTLR_EL2_RESET
- msr sctlr_el2, x0
-
- mov x3, #SPSR_KERNEL
- adr x4, el2_trampoline
- mov x0, x2
- drop_el x3, x4
-
-/*
- * This PSCI implementation requires EL3. Without EL3 we'll only boot the
- * primary cpu, all others will be trapped in an infinite loop.
- */
-start_no_el3:
- mrs x0, mpidr_el1
- ldr x1, =MPIDR_ID_BITS
- and x0, x0, x1
- bl find_logical_id
- cbz x0, start_cpu0
-spin_dead:
- wfe
- b spin_dead
-
-
-/*
- * Clean and invalidate the caches at EL2 to simplify EL3's cache usage.
- */
-el2_trampoline:
- mov x15, x0
- bl flush_caches
- br x15
-
-start_cpu0:
- /*
- * Kernel parameters
- */
- mov x0, xzr
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- bl ns_init_system
- ldr x0, =dtb
- b kernel
+++ /dev/null
-/*
- * spin.S - spin-table boot protocol implementation
- *
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE.txt file.
- */
-
-#include "common.S"
-
- .text
-
- .globl start_no_el3
- .globl start_el3
-
-start_el3:
- /*
- * Prepare the switch to the EL2_SP1 mode from EL3
- */
- ldr x0, =SCTLR_EL2_RESET
- msr sctlr_el2, x0
- ldr x0, =start_no_el3 // Return after mode switch
- mov x1, #SPSR_KERNEL
- drop_el x1, x0
-
-start_no_el3:
- /*
- * Kernel parameters
- */
- mov x0, xzr
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- mrs x4, mpidr_el1
- ldr x5, =MPIDR_ID_BITS
- tst x4, x5
- b.eq 2f
-
- /*
- * Secondary CPUs
- */
-1: wfe
- ldr x4, mbox
- cbz x4, 1b
- br x4 // branch to the given address
-
-2:
- /*
- * Primary CPU
- */
- bl ns_init_system
- ldr x0, =dtb // device tree blob
- b kernel
-
- .ltorg