switch ( vio->io_state )
{
- case HVMIO_none:
+ case STATE_IOREQ_NONE:
break;
- case HVMIO_completed:
- vio->io_state = HVMIO_none;
+ case STATE_IORESP_READY:
+ vio->io_state = STATE_IOREQ_NONE;
if ( data_is_addr || dir == IOREQ_WRITE )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
return X86EMUL_UNHANDLEABLE;
}
- vio->io_state = HVMIO_awaiting_completion;
+ vio->io_state = STATE_IOREQ_READY;
vio->io_size = size;
vio->io_dir = dir;
vio->io_data_is_addr = data_is_addr;
{
case X86EMUL_OKAY:
vio->io_data = p.data;
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
break;
case X86EMUL_UNHANDLEABLE:
{
rc = hvm_process_io_intercept(&null_handler, &p);
if ( rc == X86EMUL_OKAY )
vio->io_data = p.data;
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
}
else
{
rc = hvm_send_assist_req(s, &p);
if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
else if ( data_is_addr || dir == IOREQ_WRITE )
rc = X86EMUL_OKAY;
}
if ( hvm_vcpu_io_need_completion(vio) )
{
- vio->io_state = HVMIO_completed;
+ vio->io_state = STATE_IORESP_READY;
vio->io_data = p->data;
}
else
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
msix_write_completion(curr);
vcpu_end_shutdown_deferral(curr);
guest_cpu_user_regs()->rax = (uint32_t)vio->io_data;
else
memcpy(&guest_cpu_user_regs()->rax, &vio->io_data, vio->io_size);
- vio->io_state = HVMIO_none;
+ vio->io_state = STATE_IOREQ_NONE;
break;
case HVMIO_realmode_completion:
{
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.hvm_io.io_state != HVMIO_none )
+ if ( v->arch.hvm_vcpu.hvm_io.io_state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
vmx_realmode_emulate_one(&hvmemul_ctxt);
- if ( vio->io_state != HVMIO_none || vio->mmio_retry )
+ if ( vio->io_state != STATE_IOREQ_NONE || vio->mmio_retry )
break;
/* Stop emulating unless our segment state is not safe */
}
/* Need to emulate next time if we've started an IO operation */
- if ( vio->io_state != HVMIO_none )
+ if ( vio->io_state != STATE_IOREQ_NONE )
curr->arch.hvm_vmx.vmx_emulate = 1;
if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
#include <asm/hvm/svm/nestedsvm.h>
#include <asm/mtrr.h>
-enum hvm_io_state {
- HVMIO_none = 0,
- HVMIO_awaiting_completion,
- HVMIO_completed
-};
-
enum hvm_io_completion {
HVMIO_no_completion,
HVMIO_mmio_completion,
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
- enum hvm_io_state io_state;
enum hvm_io_completion io_completion;
unsigned long io_data;
unsigned int io_size;
+ uint8_t io_state;
uint8_t io_dir;
uint8_t io_data_is_addr;
static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
{
- return (vio->io_state == HVMIO_awaiting_completion) &&
+ return (vio->io_state == STATE_IOREQ_READY) &&
!vio->io_data_is_addr &&
(vio->io_dir == IOREQ_READ);
}