p.data = get_user_reg(regs, info->dabt.reg);
vio->req = p;
+ vio->suspended = false;
vio->info.dabt_instr = instr;
rc = ioreq_send(s, &p, 0);
- if ( rc != IO_RETRY || v->domain->is_shutting_down )
+ if ( rc != IO_RETRY || vio->suspended )
vio->req.state = STATE_IOREQ_NONE;
else if ( !ioreq_needs_completion(&vio->req) )
rc = IO_HANDLED;
ASSERT(p.count);
vio->req = p;
+ vio->suspended = false;
rc = hvm_io_intercept(&p);
else
{
rc = ioreq_send(s, &p, 0);
- if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
+ if ( rc != X86EMUL_RETRY || vio->suspended )
vio->req.state = STATE_IOREQ_NONE;
else if ( !ioreq_needs_completion(&vio->req) )
rc = X86EMUL_OKAY;
case X86EMUL_RETRY:
/*
- * We should not advance RIP/EIP if the domain is shutting down or
- * if X86EMUL_RETRY has been returned by an internal handler.
+ * We should not advance RIP/EIP if the vio was suspended (e.g.
+ * because the domain is shutting down) or if X86EMUL_RETRY has
+ * been returned by an internal handler.
*/
- if ( curr->domain->is_shutting_down || !vcpu_ioreq_pending(curr) )
+ if ( vio->suspended || !vcpu_ioreq_pending(curr) )
return false;
break;
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct ioreq_vcpu *sv;
+ struct vcpu_io *vio = &curr->io;
ASSERT(s);
return ioreq_send_buffered(s, proto_p);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
+ {
+ vio->suspended = true;
return IOREQ_STATUS_RETRY;
+ }
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
struct vcpu_io {
/* I/O request in flight to device model. */
enum vio_completion completion;
+ /*
+ * Indicate whether the I/O was not handled because the domain
+ * is about to be paused.
+ */
+ bool suspended;
ioreq_t req;
/* Arch specific info pertaining to the io request */
struct arch_vcpu_io info;