.df = df,
.data = data,
.data_is_ptr = data_is_addr, /* ioreq_t field name is misleading */
+ .state = STATE_IOREQ_READY,
};
void *p_data = (void *)data;
int rc;
}
}
- switch ( vio->io_state )
+ switch ( vio->io_req.state )
{
case STATE_IOREQ_NONE:
break;
case STATE_IORESP_READY:
- vio->io_state = STATE_IOREQ_NONE;
+ vio->io_req.state = STATE_IOREQ_NONE;
+ p = vio->io_req;
+
+ /* Verify the emulation request has been correctly re-issued */
+ if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
+ (p.addr != addr) ||
+ (p.size != size) ||
+ (p.count != reps) ||
+ (p.dir != dir) ||
+ (p.df != df) ||
+ (p.data_is_ptr != data_is_addr) )
+ domain_crash(curr->domain);
+
if ( data_is_addr || dir == IOREQ_WRITE )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
return X86EMUL_UNHANDLEABLE;
}
- vio->io_state = STATE_IOREQ_READY;
- vio->io_size = size;
- vio->io_dir = dir;
- vio->io_data_is_addr = data_is_addr;
-
if ( dir == IOREQ_WRITE )
{
if ( !data_is_addr )
hvmtrace_io_assist(&p);
}
+ vio->io_req = p;
+
rc = hvm_io_intercept(&p);
switch ( rc )
{
case X86EMUL_OKAY:
- vio->io_data = p.data;
- vio->io_state = STATE_IOREQ_NONE;
+ vio->io_req.state = STATE_IOREQ_NONE;
break;
case X86EMUL_UNHANDLEABLE:
{
if ( !s )
{
rc = hvm_process_io_intercept(&null_handler, &p);
- if ( rc == X86EMUL_OKAY )
- vio->io_data = p.data;
- vio->io_state = STATE_IOREQ_NONE;
+ vio->io_req.state = STATE_IOREQ_NONE;
}
else
{
rc = hvm_send_assist_req(s, &p);
if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
- vio->io_state = STATE_IOREQ_NONE;
+ vio->io_req.state = STATE_IOREQ_NONE;
else if ( data_is_addr || dir == IOREQ_WRITE )
rc = X86EMUL_OKAY;
}
hvmtrace_io_assist(&p);
if ( !data_is_addr )
- memcpy(p_data, &vio->io_data, size);
+ memcpy(p_data, &p.data, size);
}
if ( is_mmio && !data_is_addr )
if ( hvm_vcpu_io_need_completion(vio) )
{
- vio->io_state = STATE_IORESP_READY;
- vio->io_data = p->data;
+ vio->io_req.state = STATE_IORESP_READY;
+ vio->io_req.data = p->data;
}
else
- vio->io_state = STATE_IOREQ_NONE;
+ vio->io_req.state = STATE_IOREQ_NONE;
msix_write_completion(curr);
vcpu_end_shutdown_deferral(curr);
handle_mmio();
break;
case HVMIO_pio_completion:
- if ( vio->io_size == 4 ) /* Needs zero extension. */
- guest_cpu_user_regs()->rax = (uint32_t)vio->io_data;
- else
- memcpy(&guest_cpu_user_regs()->rax, &vio->io_data, vio->io_size);
- vio->io_state = STATE_IOREQ_NONE;
+ (void)handle_pio(vio->io_req.addr, vio->io_req.size,
+ vio->io_req.dir);
break;
case HVMIO_realmode_completion:
{
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.hvm_io.io_state != STATE_IOREQ_NONE )
+ if ( v->arch.hvm_vcpu.hvm_io.io_req.state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
vmx_realmode_emulate_one(&hvmemul_ctxt);
- if ( vio->io_state != STATE_IOREQ_NONE || vio->mmio_retry )
+ if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry )
break;
/* Stop emulating unless our segment state is not safe */
}
/* Need to emulate next time if we've started an IO operation */
- if ( vio->io_state != STATE_IOREQ_NONE )
+ if ( vio->io_req.state != STATE_IOREQ_NONE )
curr->arch.hvm_vmx.vmx_emulate = 1;
if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
enum hvm_io_completion io_completion;
- unsigned long io_data;
- unsigned int io_size;
- uint8_t io_state;
- uint8_t io_dir;
- uint8_t io_data_is_addr;
+ ioreq_t io_req;
/*
* HVM emulation:
static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
{
- return (vio->io_state == STATE_IOREQ_READY) &&
- !vio->io_data_is_addr &&
- (vio->io_dir == IOREQ_READ);
+ return (vio->io_req.state == STATE_IOREQ_READY) &&
+ !vio->io_req.data_is_ptr &&
+ (vio->io_req.dir == IOREQ_READ);
}
#define VMCX_EADDR (~0ULL)