DEFINES += -DSTACK_SIZE=256
OFILES =
+
+CFLAGS += -mgeneral-regs-only -mstrict-align
ARCH_SRC := arch/aarch64/
if PSCI
BOOTMETHOD := psci.o
+OFILES += psci.o
PSCI_NODE := psci { \
compatible = \"arm,psci\"; \
method = \"smc\"; \
CPPFLAGS += $(INITRD_FLAGS)
CFLAGS += -Iinclude/ -I$(ARCH_SRC)/include/
+CFLAGS += -Wall -fomit-frame-pointer
+OFILES += boot_common.o
OFILES += $(addprefix $(ARCH_SRC),boot.o stack.o cache.o $(GIC) mmu.o ns.o $(BOOTMETHOD) utils.o)
all: $(IMAGE)
%.o: %.S Makefile
$(CC) $(CPPFLAGS) -D__ASSEMBLY__ $(CFLAGS) $(DEFINES) -c -o $@ $<
+%.o: %.c Makefile
+ $(CC) $(CPPFLAGS) $(CFLAGS) $(DEFINES) -c -o $@ $<
+
model.lds: $(LD_SCRIPT) Makefile
$(CPP) $(CPPFLAGS) -ansi -DPHYS_OFFSET=$(PHYS_OFFSET) -DMBOX_OFFSET=$(MBOX_OFFSET) -DKERNEL_OFFSET=$(KERNEL_OFFSET) -DFDT_OFFSET=$(FDT_OFFSET) -DFS_OFFSET=$(FS_OFFSET) -DKERNEL=$(KERNEL_IMAGE) -DFILESYSTEM=$(FILESYSTEM) -P -C -o $@ $<
.section .init
.globl _start
+ .globl jump_kernel
+
_start:
cpuid x0, x1
bl find_logical_id
*/
mrs x0, CurrentEL
cmp x0, #CURRENTEL_EL3
- b.ne start_no_el3 // skip EL3 initialisation
+ b.eq 1f
+
+ mov w0, #1
+ ldr x1, =flag_no_el3
+ str w0, [x1]
+
+ bl setup_stack
+ b start_no_el3
- mov x0, #0x30 // RES1
+1: mov x0, #0x30 // RES1
orr x0, x0, #(1 << 0) // Non-secure EL1
orr x0, x0, #(1 << 8) // HVC enable
orr x0, x0, #(1 << 10) // 64-bit EL2
err_invalid_id:
b .
+
+ /*
+ * Drop to the kernel
+ * x0: entry address
+ * x1-x4: arguments
+ */
+jump_kernel:
+ mov x19, x0
+ mov x20, x1
+ mov x21, x2
+ mov x22, x3
+ mov x23, x4
+
+ ldr x0, =SCTLR_EL2_RESET
+ msr sctlr_el2, x0
+
+ cpuid x0, x1
+ bl find_logical_id
+ bl setup_stack // Reset stack pointer
+
+ ldr w0, flag_no_el3
+ cmp w0, #0 // Prepare Z flag
+
+ b.ne el2_trampoline // No EL3
+
+ mov x4, #SPSR_KERNEL
+ adr x5, el2_trampoline
+ drop_el x4, x5
+
+el2_trampoline:
+ bl flush_caches
+
+ cpuid x0, x1
+ b.ne 1f
+ bl ns_init_system
+
+ /* Load kernel parameters */
+1: mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+
+ br x19
+
.ltorg
+
+ .data
+ .align 3
+flag_no_el3:
+ .long 0
#ifndef __ASSEMBLY__
+#define sevl() asm volatile ("sevl\n" : : : "memory")
+
static inline unsigned long read_mpidr(void)
{
unsigned long mpidr;
--- /dev/null
+/*
+ * arch/aarch64/include/asm/psci.h
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+#ifndef __ASM_AARCH64_PSCI_H
+#define __ASM_AARCH64_PSCI_H
+
+#ifndef __ASSEMBLY__
+
+static inline int psci_store_address(unsigned long address,
+ unsigned long *branch_entry)
+{
+ unsigned long ret;
+
+ asm volatile (
+ "1:\n"
+ "ldxr %0, [%2]\n"
+ "subs %0, %0, %3\n"
+ "b.ne 2f\n"
+ "stxr %w0, %1, [%2]\n"
+ "cbnz %w0, 1b\n"
+ "2:\n"
+ : "=&r" (ret)
+ : "r" (address), "r" (branch_entry), "J" (PSCI_ADDR_INVALID)
+ : "cc");
+
+ if (ret != 0)
+ /* ret value comes from subs */
+ return PSCI_RET_ALREADY_ON;
+
+ return PSCI_RET_SUCCESS;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE.txt file.
*/
-#include "common.S"
-
-#define PSCI_CPU_OFF 0x84000002
-#define PSCI_CPU_ON 0xc4000003
-
-#define PSCI_RET_SUCCESS 0
-#define PSCI_RET_NOT_IMPL (-1)
-#define PSCI_RET_INVALID (-2)
-#define PSCI_RET_DENIED (-3)
+#include <psci.h>
-#ifndef CPU_IDS
-#error No CPU MPIDRs provided.
-#endif
-
-#define ADDR_INVALID (-1)
+#include "common.S"
.macro ventry label
.align 7
ventry err_exception
// lower EL, AArch64
- ventry psci_call64
+ ventry smc_entry64
ventry err_exception
ventry err_exception
ventry err_exception
// lower EL, AArch32
- ventry psci_call32
+ ventry smc_entry32
ventry err_exception
ventry err_exception
ventry err_exception
- .data
-branch_table:
- .rept (NR_CPUS)
- .quad ADDR_INVALID
- .endr
-
.text
.globl start_no_el3
err_exception:
b err_exception
-psci_call32:
- mov w0, PSCI_RET_NOT_IMPL
- eret
-
-psci_call64:
- ldr x7, =PSCI_CPU_OFF
+ .macro branch_if val, addr
+ ldr x7, =\val
cmp x0, x7
- b.eq psci_cpu_off
-
- ldr x7, =PSCI_CPU_ON
- cmp x0, x7
- b.eq psci_cpu_on
+ adr x7, \addr
+ b.eq do_call
+ .endm
- mov x0, PSCI_RET_NOT_IMPL
+smc_entry32:
+ mov w0, PSCI_RET_NOT_SUPPORTED
eret
-/*
- * x1 - optional power state parameter, ignored here
- */
-psci_cpu_off:
- cpuid x0, x1
- bl find_logical_id
- adr x1, branch_table
- mov x2, #ADDR_INVALID
- str x2, [x1, x0, lsl #3]
-
- b spin
-
-/*
- * x1 - target cpu
- * x2 - address
- */
-psci_cpu_on:
- mov x15, x30
- mov x14, x2
+smc_entry64:
+ /* SMC entry uses 112 bytes of stack */
+ stp x18, x19, [sp, #-16]!
+ stp x20, x21, [sp, #-16]!
+ stp x22, x23, [sp, #-16]!
+ stp x24, x25, [sp, #-16]!
+ stp x26, x27, [sp, #-16]!
+ stp x28, x29, [sp, #-16]!
+ // Keep sp aligned to 16 bytes
+ stp x30, xzr, [sp, #-16]!
+
+ /* If function ID matches, do_call with procedure address in x7 */
+ branch_if PSCI_CPU_ON_64, psci_cpu_on
+ branch_if PSCI_CPU_OFF, psci_cpu_off
+
+ /* Otherwise, return error in x0/w0 */
+ mov x0, PSCI_RET_NOT_SUPPORTED
+ b smc_exit
+
+do_call:
mov x0, x1
-
- bl find_logical_id
- cmp x0, #-1
- b.eq 1f
-
- adr x3, branch_table
- add x3, x3, x0, lsl #3
-
- ldr x4, =ADDR_INVALID
-
- ldxr x5, [x3]
- cmp x4, x5
- b.ne 1f
-
- stxr w4, x14, [x3]
- cbnz w4, 1f
-
- dsb ishst
- sev
-
- mov x0, #PSCI_RET_SUCCESS
- mov x30, x15
- eret
-
-1: mov x0, #PSCI_RET_DENIED
- mov x30, x15
+ mov x1, x2
+ mov x2, x3
+
+ blr x7
+
+smc_exit:
+ ldp x30, xzr, [sp], #16
+ ldp x28, x29, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x20, x21, [sp], #16
+ ldp x18, x19, [sp], #16
eret
/* only boot the primary cpu (entry 0 in the table) */
cpuid x0, x1
bl find_logical_id
- cbnz x0, spin
-
- adr x2, branch_table
- adr x1, start_cpu0
- str x1, [x2]
- sevl
- b spin
-
-/*
- * Poll the release table, waiting for a valid address to appear.
- * When a valid address appears, branch to it.
- */
-spin:
- cpuid x0, x1
- bl find_logical_id
- cmp x0, #-1
- b.eq spin_dead
-
- adr x1, branch_table
- mov x3, #ADDR_INVALID
-
- add x1, x1, x0, lsl #3
-
-1: wfe
- ldr x2, [x1]
- cmp x2, x3
- b.eq 1b
-
- ldr x0, =SCTLR_EL2_RESET
- msr sctlr_el2, x0
-
- mov x3, #SPSR_KERNEL
- adr x4, el2_trampoline
- mov x0, x2
- drop_el x3, x4
+ b psci_first_spin
/*
* This PSCI implementation requires EL3. Without EL3 we'll only boot the
start_no_el3:
cpuid x0, x1
bl find_logical_id
- cbz x0, start_cpu0
+ cbz x0, psci_first_spin
spin_dead:
wfe
b spin_dead
-
-
-/*
- * Clean and invalidate the caches at EL2 to simplify EL3's cache usage.
- */
-el2_trampoline:
- mov x15, x0
- bl flush_caches
- br x15
-
-start_cpu0:
- /*
- * Kernel parameters
- */
- mov x0, xzr
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- bl ns_init_system
- ldr x0, =dtb
- b kernel
.globl start_el3
start_el3:
- /*
- * Prepare the switch to the EL2_SP1 mode from EL3
- */
- ldr x0, =SCTLR_EL2_RESET
- msr sctlr_el2, x0
- ldr x0, =start_no_el3 // Return after mode switch
- mov x1, #SPSR_KERNEL
- drop_el x1, x0
-
start_no_el3:
- /*
- * Kernel parameters
- */
- mov x0, xzr
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- cpuid x4, x5
- b.eq 2f
-
- /*
- * Secondary CPUs
- */
-1: wfe
- ldr x4, mbox
- cbz x4, 1b
- br x4 // branch to the given address
+ cpuid x0, x1
+ bl find_logical_id
-2:
/*
- * Primary CPU
+ * Primary CPU (x0 = 0) jumps to kernel, the other ones wait for an
+ * address to appear in mbox
*/
- bl ns_init_system
- ldr x0, =dtb // device tree blob
- b kernel
+ adr x3, mbox
+ adr x4, kernel_address
+ cmp x0, #0
+ csel x1, x3, x4, ne
+ mov x2, #0
+ bl first_spin
+
+kernel_address:
+ .align 3
+ .long 0
.ltorg
* found in the LICENSE.txt file.
*/
+#include <cpu.h>
+
.globl find_logical_id
.globl setup_vector
-#include "common.S"
-
- .data
-
- /*
- * Array of the CPU ID (MPIDR & MPIDR_ID_BITS) of each CPU in the system.
- * The index into the array is used as a logical id, and an index into
- * the branch table. The branch table is automatically padded to the
- * same size as the id table.
- *
- * The first CPU in the table is considered to be the primary CPU, and
- * is the only CPU to immediately branch off to the kernel.
- */
- .align 3
-id_table:
- .quad CPU_IDS
-__id_end:
- .quad MPIDR_INVALID
-
.text
/*
--- /dev/null
+/*
+ * boot_common.c - common spin function for all boot methods
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+#include <cpu.h>
+#include <spin.h>
+
+extern unsigned long kernel;
+extern unsigned long dtb;
+
+void __noreturn jump_kernel(unsigned long address,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3);
+
+const unsigned long id_table[] = { CPU_IDS };
+
+/**
+ * Wait for an address to appear in mbox, and jump to it.
+ *
+ * @mbox: location to watch
+ * @invalid: value of an invalid address, 0 or -1 depending on the boot method
+ * @is_entry: when true, pass boot parameters to the kernel, instead of 0
+ */
+void __noreturn spin(unsigned long *mbox, unsigned long invalid, int is_entry)
+{
+ unsigned long addr = invalid;
+
+ while (addr == invalid) {
+ wfe();
+ addr = *mbox;
+ }
+
+ if (is_entry)
+ jump_kernel(addr, (unsigned long)&dtb, 0, 0, 0);
+
+ jump_kernel(addr, 0, 0, 0, 0);
+
+ unreachable();
+}
+
+/**
+ * Primary CPU finishes platform initialisation and jumps to the kernel.
+ * Secondaries are parked, waiting for their mbox to contain a valid address.
+ *
+ * @cpu: logical CPU number
+ * @mbox: location to watch
+ * @invalid: value of an invalid address, 0 or -1 depending on the boot method
+ */
+void __noreturn first_spin(unsigned int cpu, unsigned long *mbox,
+ unsigned long invalid)
+{
+ if (cpu == 0) {
+ *mbox = (unsigned long)&kernel;
+ sevl();
+ spin(mbox, invalid, 1);
+ } else {
+ *mbox = invalid;
+ spin(mbox, invalid, 0);
+ }
+
+ unreachable();
+}
--- /dev/null
+/*
+ * include/compiler.h - common compiler defines
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ *
+ * Note: we only support GCC.
+ */
+#ifndef __COMPILER_H
+#define __COMPILER_H
+
+#define unreachable() __builtin_unreachable()
+
+#define __noreturn __attribute__((noreturn))
+
+#endif
#define MPIDR_INVALID (-1)
+#ifndef __ASSEMBLY__
+
+#define dsb(arg) asm volatile ("dsb " #arg "\n" : : : "memory")
+#define sev() asm volatile ("sev\n" : : : "memory")
+#define wfe() asm volatile ("wfe\n" : : : "memory")
+
+unsigned int find_logical_id(unsigned long mpidr);
+
+#endif /* !__ASSEMBLY__ */
#endif
--- /dev/null
+/*
+ * include/psci.h
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+#ifndef __PSCI_H
+#define __PSCI_H
+
+#define PSCI_CPU_OFF 0x84000002
+#define PSCI_CPU_ON_64 0xc4000003
+
+#define PSCI_RET_SUCCESS 0
+#define PSCI_RET_NOT_SUPPORTED (-1)
+#define PSCI_RET_INVALID_PARAMETERS (-2)
+#define PSCI_RET_DENIED (-3)
+#define PSCI_RET_ALREADY_ON (-4)
+#define PSCI_RET_ON_PENDING (-5)
+#define PSCI_RET_INTERNAL_FAILURE (-6)
+#define PSCI_RET_NOT_PRESENT (-7)
+#define PSCI_RET_DISABLED (-8)
+
+#define PSCI_ADDR_INVALID (-1)
+
+#include <asm/psci.h>
+
+#endif
--- /dev/null
+/*
+ * include/spin.h
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+#ifndef __SPIN_H
+#define __SPIN_H
+
+#include <compiler.h>
+
+void __noreturn spin(unsigned long *mbox, unsigned long invalid, int is_entry);
+
+void __noreturn first_spin(unsigned int cpu, unsigned long *mbox,
+ unsigned long invalid_addr);
+
+#endif
--- /dev/null
+/*
+ * psci.c - basic PSCI implementation
+ *
+ * Copyright (C) 2015 ARM Limited. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE.txt file.
+ */
+
+#include <stdint.h>
+
+#include <cpu.h>
+#include <psci.h>
+#include <spin.h>
+
+#ifndef CPU_IDS
+#error "No MPIDRs provided"
+#endif
+
+static unsigned long branch_table[NR_CPUS];
+
+int psci_cpu_on(unsigned long target_mpidr, unsigned long address)
+{
+ int ret;
+ unsigned int cpu = find_logical_id(target_mpidr);
+
+ if (cpu == MPIDR_INVALID)
+ return PSCI_RET_INVALID_PARAMETERS;
+
+ ret = psci_store_address(address, branch_table + cpu);
+
+ dsb(ishst);
+ sev();
+
+ return ret;
+}
+
+int psci_cpu_off(void)
+{
+ unsigned long mpidr = read_mpidr();
+ unsigned int cpu = find_logical_id(mpidr);
+
+ if (cpu == MPIDR_INVALID)
+ return PSCI_RET_DENIED;
+
+ branch_table[cpu] = PSCI_ADDR_INVALID;
+
+ spin(branch_table + cpu, PSCI_ADDR_INVALID, 0);
+
+ unreachable();
+}
+
+void __noreturn psci_first_spin(unsigned int cpu)
+{
+ if (cpu == MPIDR_INVALID)
+ while (1);
+
+ first_spin(cpu, branch_table + cpu, PSCI_ADDR_INVALID);
+
+ unreachable();
+}