subdir-y += lib
+obj-y += entry.o
obj-y += mode_switch.o
+obj-y += traps.o
obj-y += domain.o
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+#define COMPILE_OFFSETS
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/bitops.h>
+#include <public/xen.h>
+#include <asm/current.h>
+
+#define DEFINE(_sym, _val) \
+ __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
+#define BLANK() \
+ __asm__ __volatile__ ( "\n->" : : )
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void __dummy__(void)
+{
+ OFFSET(UREGS_X0, struct cpu_user_regs, x0);
+ OFFSET(UREGS_LR, struct cpu_user_regs, lr);
+
+ OFFSET(UREGS_SP, struct cpu_user_regs, sp);
+ OFFSET(UREGS_PC, struct cpu_user_regs, pc);
+ OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
+
+ OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
+
+ OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
+ OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
+ OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
+ OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
+
+ OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0);
+ OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1);
+ OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1);
+
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr);
+ DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
+ BLANK();
+
+ DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+
+ OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#include <xen/config.h>
+#include <asm/asm_defns.h>
+#include <public/xen.h>
+
+/*
+ * Register aliases.
+ */
+lr .req x30 // link register
+
+/*
+ * Stack pushing/popping (register pairs only). Equivalent to store decrement
+ * before, load increment after.
+ */
+ .macro push, xreg1, xreg2
+ stp \xreg1, \xreg2, [sp, #-16]!
+ .endm
+
+ .macro pop, xreg1, xreg2
+ ldp \xreg1, \xreg2, [sp], #16
+ .endm
+
+/*
+ * Save/restore guest mode specific state, outer stack frame
+ */
+ .macro entry_guest, compat
+
+ add x21, sp, #UREGS_SPSR_el1
+ mrs x23, SPSR_EL1
+ str x23, [x21]
+
+ .if \compat == 0 /* Aarch64 mode */
+
+ add x21, sp, #UREGS_SP_el0
+ mrs x22, SP_el0
+ str x22, [x21]
+
+ add x21, sp, #UREGS_ELR_el1
+ mrs x22, SP_el1
+ mrs x23, ELR_el1
+ stp x22, x23, [x21]
+
+ .else /* Aarch32 mode */
+
+ add x21, sp, #UREGS_SPSR_fiq
+ mrs x22, spsr_fiq
+ mrs x23, spsr_irq
+ stp w22, w23, [x21]
+
+ add x21, sp, #UREGS_SPSR_und
+ mrs x22, spsr_und
+ mrs x23, spsr_abt
+ stp w22, w23, [x21]
+
+ .endif
+
+ .endm
+
+/*
+ * Save state on entry to hypervisor
+ */
+ .macro entry, hyp, compat
+ sub sp, sp, #(UREGS_SPSR_el1 - UREGS_SP)
+ push x28, x29
+ push x26, x27
+ push x24, x25
+ push x22, x23
+ push x20, x21
+ push x18, x19
+ push x16, x17
+ push x14, x15
+ push x12, x13
+ push x10, x11
+ push x8, x9
+ push x6, x7
+ push x4, x5
+ push x2, x3
+ push x0, x1
+
+ .if \hyp == 1 /* Hypervisor mode */
+
+ add x21, sp, #(UREGS_X0 - UREGS_SP)
+
+ .else /* Guest mode */
+
+ entry_guest \compat
+ mov x21, ~0 /* sp only valid for hyp frame XXX */
+
+ .endif
+
+ stp lr, x21, [sp, #UREGS_LR]
+
+ mrs x22, elr_el2
+ mrs x23, spsr_el2
+ stp x22, x23, [sp, #UREGS_PC]
+
+ .endm
+
+/*
+ * Bad Abort numbers
+ *-----------------
+ */
+#define BAD_SYNC 0
+#define BAD_IRQ 1
+#define BAD_FIQ 2
+#define BAD_ERROR 3
+
+ .macro invalid, reason
+ mov x0, sp
+ mov x1, #\reason
+ b do_bad_mode
+ .endm
+
+hyp_sync_invalid:
+ entry hyp=1
+ invalid BAD_SYNC
+
+hyp_irq_invalid:
+ entry hyp=1
+ invalid BAD_IRQ
+
+hyp_fiq_invalid:
+ entry hyp=1
+ invalid BAD_FIQ
+
+hyp_error_invalid:
+ entry hyp=1
+ invalid BAD_ERROR
+
+/* Traps taken in Current EL with SP_ELx */
+hyp_sync:
+ entry hyp=1
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
+ b return_to_hypervisor
+
+hyp_irq:
+ entry hyp=1
+ mov x0, sp
+ bl do_trap_irq
+ b return_to_hypervisor
+
+guest_sync:
+ entry hyp=0, compat=0
+ invalid BAD_SYNC /* No AArch64 guest support yet */
+
+guest_irq:
+ entry hyp=0, compat=0
+ invalid BAD_IRQ /* No AArch64 guest support yet */
+
+guest_fiq_invalid:
+ entry hyp=0, compat=0
+ invalid BAD_FIQ
+
+guest_error_invalid:
+ entry hyp=0, compat=0
+ invalid BAD_ERROR
+
+guest_sync_compat:
+ entry hyp=0, compat=1
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
+ b return_to_guest
+
+guest_irq_compat:
+ entry hyp=0, compat=1
+ mov x0, sp
+ bl do_trap_irq
+ b return_to_guest
+
+guest_fiq_invalid_compat:
+ entry hyp=0, compat=1
+ invalid BAD_FIQ
+
+guest_error_invalid_compat:
+ entry hyp=0, compat=1
+ invalid BAD_ERROR
+
+ENTRY(return_to_new_vcpu)
+ ldr x21, [sp, #UREGS_CPSR]
+ and x21, x21, #PSR_MODE_MASK
+ /* Returning to EL2? */
+ cmp x21, #PSR_MODE_EL2t
+ ccmp x21, #PSR_MODE_EL2h, #0x4, ne
+ b.eq return_to_hypervisor /* Yes */
+ /* Fall thru */
+ENTRY(return_to_guest)
+ bl leave_hypervisor_tail /* Disables interrupts on return */
+ /* Fall thru */
+ENTRY(return_to_hypervisor)
+ msr daifset, #2 /* Mask interrupts */
+
+ ldp x21, x22, [sp, #UREGS_PC] // load ELR, SPSR
+
+ pop x0, x1
+ pop x2, x3
+ pop x4, x5
+ pop x6, x7
+ pop x8, x9
+
+ msr elr_el2, x21 // set up the return data
+ msr spsr_el2, x22
+
+ pop x10, x11
+ pop x12, x13
+ pop x14, x15
+ pop x16, x17
+ pop x18, x19
+ pop x20, x21
+ pop x22, x23
+ pop x24, x25
+ pop x26, x27
+ pop x28, x29
+
+ ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP)
+ eret
+
+/*
+ * Exception vectors.
+ */
+ .macro ventry label
+ .align 7
+ b \label
+ .endm
+
+ .align 11
+ENTRY(hyp_traps_vector)
+ ventry hyp_sync_invalid // Synchronous EL2t
+ ventry hyp_irq_invalid // IRQ EL2t
+ ventry hyp_fiq_invalid // FIQ EL2t
+ ventry hyp_error_invalid // Error EL2t
+
+ ventry hyp_sync // Synchronous EL2h
+ ventry hyp_irq // IRQ EL2h
+ ventry hyp_fiq_invalid // FIQ EL2h
+ ventry hyp_error_invalid // Error EL2h
+
+ ventry guest_sync // Synchronous 64-bit EL0/EL1
+ ventry guest_irq // IRQ 64-bit EL0/EL1
+ ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1
+ ventry guest_error_invalid // Error 64-bit EL0/EL1
+
+ ventry guest_sync_compat // Synchronous 32-bit EL0/EL1
+ ventry guest_irq_compat // IRQ 32-bit EL0/EL1
+ ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1
+ ventry guest_error_invalid_compat // Error 32-bit EL0/EL1
+
+/*
+ * Local variables:
+ * mode: ASM
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * xen/arch/arm/arm64/traps.c
+ *
+ * ARM AArch64 Specific Trap handlers
+ *
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+#include <asm/system.h>
+#include <asm/processor.h>
+
+#include <public/xen.h>
+
+asmlinkage void do_trap_serror(struct cpu_user_regs *regs)
+{
+ panic("Unhandled serror trap\n");
+}
+
+static const char *handler[]= {
+ "Synchronous Abort",
+ "IRQ",
+ "FIQ",
+ "Error"
+};
+
+asmlinkage void do_bad_mode(struct cpu_user_regs *regs, int reason)
+{
+ uint64_t esr = READ_SYSREG64(ESR_EL2);
+ printk("Bad mode in %s handler detected, code 0x%08"PRIx64"\n",
+ handler[reason], esr);
+
+ local_irq_disable();
+ panic("bad mode");
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
set_processor_id(cpuid);
/* Setup Hyp vector base */
- WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
+ WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);
mmu_init_secondary_cpu();
enable_vfp();
}
-void dump_guest_s1_walk(struct domain *d, uint32_t addr)
+void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
{
uint32_t ttbcr = READ_CP32(TTBCR);
uint32_t ttbr0 = READ_CP32(TTBR0);
uint32_t offset;
uint32_t *first = NULL, *second = NULL;
- printk("dom%d VA 0x%08"PRIx32"\n", d->domain_id, addr);
+ printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
printk(" TTBCR: 0x%08"PRIx32"\n", ttbcr);
printk(" TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n",
ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK));
mmio_info_t info;
info.dabt = dabt;
+#ifdef CONFIG_ARM_32
info.gva = READ_CP32(HDFAR);
+#else
+ info.gva = READ_SYSREG64(FAR_EL2);
+#endif
if (dabt.s1ptw)
goto bad_data_abort;
/* XXX inject a suitable fault into the guest */
printk("Guest data abort: %s%s%s\n"
- " gva=%"PRIx32"\n",
+ " gva=%"PRIvaddr"\n",
msg, dabt.s1ptw ? " S2 during S1" : "",
fsc_level_str(level),
info.gva);
asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
{
- union hsr hsr = { .bits = READ_CP32(HSR) };
+ union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
switch (hsr.ec) {
case HSR_EC_CP15_32:
+ if ( ! is_pv32_domain(current->domain) )
+ goto bad_trap;
do_cp15_32(regs, hsr);
break;
case HSR_EC_CP15_64:
+ if ( ! is_pv32_domain(current->domain) )
+ goto bad_trap;
do_cp15_64(regs, hsr);
break;
case HSR_EC_HVC:
do_trap_data_abort_guest(regs, hsr.dabt);
break;
default:
+ bad_trap:
printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=%"PRIx32"\n",
hsr.bits, hsr.ec, hsr.len, hsr.iss);
do_unexpected_trap("Hypervisor", regs);
#define CCSIDR_EL1 CCSIDR
#define CLIDR_EL1 CLIDR
#define CSSELR_EL1 CSSELR
+#define ESR_EL2 HSR
#define ID_AFR0_EL1 ID_AFR0
#define ID_DFR0_EL1 ID_DFR0
#define ID_ISAR0_EL1 ID_ISAR0
#endif
#ifndef __ASSEMBLY__
-extern uint32_t hyp_traps_vector[8];
+extern uint32_t hyp_traps_vector[];
void panic_PAR(uint64_t par);