#include <xen/lib.h>
#include <xen/kernel.h>
+#include <xen/sched.h>
#include <public/xen.h>
+#include <asm/mmio.h>
#include <asm/processor.h>
#include <asm/traps.h>
do_unexpected_trap("Data Abort", regs);
}
+void finalize_instr_emulation(const struct instr_details *instr)
+{
+ /*
+ * We have not implemented decoding of post indexing instructions for 32 bit.
+ * Thus, this should be unreachable.
+ */
+ if ( instr->state == INSTR_LDR_STR_POSTINDEXING )
+ domain_crash(current->domain);
+}
+
/*
* Local variables:
* mode: C
*/
#include <xen/lib.h>
+#include <xen/sched.h>
#include <asm/hsr.h>
#include <asm/system.h>
panic("bad mode\n");
}
+void finalize_instr_emulation(const struct instr_details *instr)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ register_t val = 0;
+ uint8_t psr_mode = (regs->cpsr & PSR_MODE_MASK);
+
+ /* Currently, we handle only ldr/str post indexing instructions */
+ if ( instr->state != INSTR_LDR_STR_POSTINDEXING )
+ return;
+
+ /*
+ * Handle when rn = SP
+ * Refer ArmV8 ARM DDI 0487G.b, Page - D1-2463 "Stack pointer register
+ * selection"
+ * t = SP_EL0
+ * h = SP_ELx
+ * and M[3:0] (Page - C5-474 "When exception taken from AArch64 state:")
+ */
+ if ( instr->rn == 31 )
+ {
+ switch ( psr_mode )
+ {
+ case PSR_MODE_EL1h:
+ val = regs->sp_el1;
+ break;
+ case PSR_MODE_EL1t:
+ case PSR_MODE_EL0t:
+ val = regs->sp_el0;
+ break;
+
+ default:
+ domain_crash(current->domain);
+ return;
+ }
+ }
+ else
+ val = get_user_reg(regs, instr->rn);
+
+ val += instr->imm9;
+
+ if ( instr->rn == 31 )
+ {
+ if ( (regs->cpsr & PSR_MODE_MASK) == PSR_MODE_EL1h )
+ regs->sp_el1 = val;
+ else
+ regs->sp_el0 = val;
+ }
+ else
+ set_user_reg(regs, instr->rn, val);
+}
+
/*
* Local variables:
* mode: C
update_dabt(dabt, opcode.ldr_str.rt, opcode.ldr_str.size, false);
+ dabt_instr->state = INSTR_LDR_STR_POSTINDEXING;
dabt_instr->rn = opcode.ldr_str.rn;
dabt_instr->imm9 = opcode.ldr_str.imm9;
+ dabt->valid = 1;
return 0;
/* vPCI is not available on Arm */
#define has_vpci(d) ({ (void)(d); false; })
+struct arch_vcpu_io {
+ struct instr_details dabt_instr; /* when the instruction is decoded */
+};
+
#endif /* __ASM_DOMAIN_H__ */
/*
#define MAX_IO_HANDLER 16
+enum instr_decode_state
+{
+ INSTR_ERROR, /* Error encountered while decoding instr */
+ INSTR_VALID, /* ISS is valid, so no need to decode */
+ /*
+ * Instruction is decoded successfully. It is a ldr/str post indexing
+ * instruction.
+ */
+ INSTR_LDR_STR_POSTINDEXING,
+};
+
typedef struct
{
struct hsr_dabt dabt;
struct instr_details {
unsigned long rn:5;
signed int imm9:9;
+ enum instr_decode_state state;
} dabt_instr;
paddr_t gpa;
} mmio_info_t;
};
enum io_state try_handle_mmio(struct cpu_user_regs *regs,
- const union hsr hsr,
- paddr_t gpa);
+ mmio_info_t *info);
void register_mmio_handler(struct domain *d,
const struct mmio_handler_ops *ops,
paddr_t addr, paddr_t size, void *priv);
int domain_io_init(struct domain *d, int max_count);
void domain_io_free(struct domain *d);
+void try_decode_instruction(const struct cpu_user_regs *regs,
+ mmio_info_t *info);
#endif /* __ASM_ARM_MMIO_H__ */
return r;
}
+void finalize_instr_emulation(const struct instr_details *instr);
+
#endif /* __ASM_ARM_TRAPS__ */
/*
* Local variables:
return handler;
}
+void try_decode_instruction(const struct cpu_user_regs *regs,
+ mmio_info_t *info)
+{
+ int rc;
+
+ if ( info->dabt.valid )
+ {
+ info->dabt_instr.state = INSTR_VALID;
+
+ /*
+ * Erratum 766422: Thumb store translation fault to Hypervisor may
+ * not have correct HSR Rt value.
+ */
+ if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) &&
+ info->dabt.write )
+ {
+ rc = decode_instruction(regs, info);
+ if ( rc )
+ {
+ gprintk(XENLOG_DEBUG, "Unable to decode instruction\n");
+ info->dabt_instr.state = INSTR_ERROR;
+ }
+ }
+ return;
+ }
+
+ /*
+ * Armv8 processor does not provide a valid syndrome for decoding some
+ * instructions. So in order to process these instructions, Xen must
+ * decode them.
+ */
+ rc = decode_instruction(regs, info);
+ if ( rc )
+ {
+ gprintk(XENLOG_ERR, "Unable to decode instruction\n");
+ info->dabt_instr.state = INSTR_ERROR;
+ }
+}
+
enum io_state try_handle_mmio(struct cpu_user_regs *regs,
- const union hsr hsr,
- paddr_t gpa)
+ mmio_info_t *info)
{
struct vcpu *v = current;
const struct mmio_handler *handler = NULL;
- const struct hsr_dabt dabt = hsr.dabt;
- mmio_info_t info = {
- .gpa = gpa,
- .dabt = dabt
- };
+ int rc;
- ASSERT(hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL);
+ ASSERT(info->dabt.ec == HSR_EC_DATA_ABORT_LOWER_EL);
- handler = find_mmio_handler(v->domain, info.gpa);
- if ( !handler )
+ if ( !info->dabt.valid )
{
- int rc;
+ ASSERT_UNREACHABLE();
+ return IO_ABORT;
+ }
- rc = try_fwd_ioserv(regs, v, &info);
+ handler = find_mmio_handler(v->domain, info->gpa);
+ if ( !handler )
+ {
+ rc = try_fwd_ioserv(regs, v, info);
if ( rc == IO_HANDLED )
return handle_ioserv(regs, v);
return rc;
}
- /* All the instructions used on emulated MMIO region should be valid */
- if ( !dabt.valid )
- return IO_ABORT;
-
/*
- * Erratum 766422: Thumb store translation fault to Hypervisor may
- * not have correct HSR Rt value.
+ * At this point, we know that the instruction is either valid or has been
+ * decoded successfully. Thus, Xen should be allowed to execute the
+ * instruction on the emulated MMIO region.
*/
- if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) &&
- dabt.write )
- {
- int rc;
-
- rc = decode_instruction(regs, &info);
- if ( rc )
- {
- gprintk(XENLOG_DEBUG, "Unable to decode instruction\n");
- return IO_ABORT;
- }
- }
-
- if ( info.dabt.write )
- return handle_write(handler, v, &info);
+ if ( info->dabt.write )
+ return handle_write(handler, v, info);
else
- return handle_read(handler, v, &info);
+ return handle_read(handler, v, info);
}
void register_mmio_handler(struct domain *d,
struct vcpu *v, mmio_info_t *info)
{
struct vcpu_io *vio = &v->io;
+ struct instr_details instr = info->dabt_instr;
+ struct hsr_dabt dabt = info->dabt;
ioreq_t p = {
.type = IOREQ_TYPE_COPY,
.addr = info->gpa,
if ( !s )
return IO_UNHANDLED;
- if ( !info->dabt.valid )
- return IO_ABORT;
+ ASSERT(dabt.valid);
vio->req = p;
+ vio->info.dabt_instr = instr;
rc = ioreq_send(s, &p, 0);
if ( rc != IO_RETRY || v->domain->is_shutting_down )
bool arch_ioreq_complete_mmio(void)
{
struct vcpu *v = current;
+ struct instr_details dabt_instr = v->io.info.dabt_instr;
struct cpu_user_regs *regs = guest_cpu_user_regs();
const union hsr hsr = { .bits = regs->hsr };
if ( handle_ioserv(regs, v) == IO_HANDLED )
{
+ finalize_instr_emulation(&dabt_instr);
advance_pc(regs, hsr);
return true;
}
return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
}
+static inline bool check_p2m(bool is_data, paddr_t gpa)
+{
+ /*
+ * First check if the translation fault can be resolved by the P2M subsystem.
+ * If that's the case nothing else to do.
+ */
+ if ( p2m_resolve_translation_fault(current->domain , gaddr_to_gfn(gpa)) )
+ return true;
+
+ if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) )
+ return true;
+
+ return false;
+}
+
static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
const union hsr hsr)
{
paddr_t gpa;
uint8_t fsc = xabt.fsc & ~FSC_LL_MASK;
bool is_data = (hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL);
+ mmio_info_t info;
+ enum io_state state;
/*
* If this bit has been set, it means that this stage-2 abort is caused
return;
}
case FSC_FLT_TRANS:
+ {
+ info.gpa = gpa;
+ info.dabt = hsr.dabt;
+
/*
- * Attempt first to emulate the MMIO as the data abort will
- * likely happen in an emulated region.
- *
- * Note that emulated region cannot be executed
+ * Assumption :- Most of the times when we get a data abort and the ISS
+ * is invalid or an instruction abort, the underlying cause is that the
+ * page tables have not been set up correctly.
*/
- if ( is_data )
+ if ( !is_data || !info.dabt.valid )
{
- enum io_state state = try_handle_mmio(regs, hsr, gpa);
+ if ( check_p2m(is_data, gpa) )
+ return;
- switch ( state )
- {
+ /*
+ * If the instruction abort could not be resolved by setting the
+ * appropriate bits in the translation table, then Xen should
+ * forward the abort to the guest.
+ */
+ if ( !is_data )
+ goto inject_abt;
+ }
+
+ try_decode_instruction(regs, &info);
+
+ /*
+ * If Xen could not decode the instruction or encountered an error
+ * while decoding, then it should forward the abort to the guest.
+ */
+ if ( info.dabt_instr.state == INSTR_ERROR )
+ goto inject_abt;
+
+ state = try_handle_mmio(regs, &info);
+
+ switch ( state )
+ {
case IO_ABORT:
goto inject_abt;
case IO_HANDLED:
+ /*
+ * If the instruction was decoded and has executed successfully
+ * on the MMIO region, then Xen should execute the next part of
+ * the instruction. (for eg increment the rn if it is a
+ * post-indexing instruction.
+ */
+ finalize_instr_emulation(&info.dabt_instr);
advance_pc(regs, hsr);
return;
case IO_RETRY:
case IO_UNHANDLED:
/* IO unhandled, try another way to handle it. */
break;
- }
}
/*
- * First check if the translation fault can be resolved by the
- * P2M subsystem. If that's the case nothing else to do.
+ * If the instruction syndrome was invalid, then we already checked if
+ * this was due to a P2M fault. So no point to check again as the result
+ * will be the same.
*/
- if ( p2m_resolve_translation_fault(current->domain,
- gaddr_to_gfn(gpa)) )
- return;
-
- if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) )
+ if ( (info.dabt_instr.state == INSTR_VALID) && check_p2m(is_data, gpa) )
return;
break;
+ }
default:
gprintk(XENLOG_WARNING,
"Unsupported FSC: HSR=%#"PRIregister" DFSC=%#x\n",
: is_pv_32bit_domain(d) ? PV32_VM_ASSIST_MASK \
: PV64_VM_ASSIST_MASK)
+struct arch_vcpu_io {
+};
+
#endif /* __ASM_DOMAIN_H__ */
/*
/* I/O request in flight to device model. */
enum vio_completion completion;
ioreq_t req;
+ /* Arch specific info pertaining to the io request */
+ struct arch_vcpu_io info;
};
struct vcpu