This saves a VMENTRY and a VMEXIT since we no longer retry the
ioport read on backing DM not handling a given ioreq.
There are 2 case about "no ioreq server exists for this I/O":
1) No ioreq servers (PVH case)
2) No ioreq servers for this I/O (non PVH case)
The routine hvm_has_dm() used to check for the empty list, the PVH
case (#1).
By changing from hvm_has_dm() to hvm_select_ioreq_server() both
cases are considered. Doing it this way allows
hvm_send_assist_req() to only have 2 possible return values.
The key part of skipping the retry is to do "rc = X86EMUL_OKAY"
which is what the error path on the call to hvm_has_dm() does in
hvmemul_do_io() (the only call on hvm_has_dm()).
Since this case is no longer handled in hvm_send_assist_req(), move
the call to hvm_complete_assist_req() into hvmemul_do_io().
As part of this change, do the work of hvm_complete_assist_req() in
the PVH case. Acting more like real hardware looks to be better.
Adding "rc = X86EMUL_OKAY" in the failing case of
hvm_send_assist_req() would break what was done in commit
bac0999325056a3b3a92f7622df7ffbc5388b1c3 and commit
f20f3c8ece5c10fa7626f253d28f570a43b23208. We are currently doing
the succeeding case of hvm_send_assist_req() and retying the I/O.
Since hvm_select_ioreq_server() has already been called, switch to
using hvm_send_assist_req_to_ioreq_server().
Since there is no longer any calls to hvm_send_assist_req(), drop
that routine and rename hvm_send_assist_req_to_ioreq_server() to
hvm_send_assist_req.
Since hvm_send_assist_req() is an extern, add an ASSERT() on s.
Signed-off-by: Don Slutz <dslutz@verizon.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
vio->io_state = HVMIO_handle_mmio_awaiting_completion;
break;
case X86EMUL_UNHANDLEABLE:
- /* If there is no backing DM, just ignore accesses */
- if ( !hvm_has_dm(curr->domain) )
+ {
+ struct hvm_ioreq_server *s =
+ hvm_select_ioreq_server(curr->domain, &p);
+
+ /* If there is no suitable backing DM, just ignore accesses */
+ if ( !s )
{
+ hvm_complete_assist_req(&p);
rc = X86EMUL_OKAY;
vio->io_state = HVMIO_none;
}
else
{
rc = X86EMUL_RETRY;
- if ( !hvm_send_assist_req(&p) )
+ if ( !hvm_send_assist_req(s, &p) )
vio->io_state = HVMIO_none;
else if ( p_data == NULL )
rc = X86EMUL_OKAY;
}
break;
+ }
default:
BUG();
}
}
}
-static struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
- ioreq_t *p)
+struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
+ ioreq_t *p)
{
#define CF8_BDF(cf8) (((cf8) & 0x00ffff00) >> 8)
#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc)
return 1;
}
-bool_t hvm_has_dm(struct domain *d)
-{
- return !list_empty(&d->arch.hvm_domain.ioreq_server.list);
-}
-
-bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
- ioreq_t *proto_p)
+bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct hvm_ioreq_vcpu *sv;
+ ASSERT(s);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
return 0; /* implicitly bins the i/o operation */
return 0;
}
-static bool_t hvm_complete_assist_req(ioreq_t *p)
+void hvm_complete_assist_req(ioreq_t *p)
{
switch ( p->type )
{
hvm_io_assist(p);
break;
}
-
- return 1;
-}
-
-bool_t hvm_send_assist_req(ioreq_t *p)
-{
- struct hvm_ioreq_server *s = hvm_select_ioreq_server(current->domain, p);
-
- if ( !s )
- return hvm_complete_assist_req(p);
-
- return hvm_send_assist_req_to_ioreq_server(s, p);
}
void hvm_broadcast_assist_req(ioreq_t *p)
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
- (void) hvm_send_assist_req_to_ioreq_server(s, p);
+ (void) hvm_send_assist_req(s, p);
}
void hvm_hlt(unsigned long rflags)
void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
-bool_t hvm_send_assist_req(ioreq_t *p);
+struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
+ ioreq_t *p);
+bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
void hvm_broadcast_assist_req(ioreq_t *p);
+void hvm_complete_assist_req(ioreq_t *p);
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
-bool_t hvm_has_dm(struct domain *d);
bool_t hvm_io_pending(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);