* cache indexed by linear MMIO address.
*/
static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
- struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir)
+ struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir, bool create)
{
unsigned int i;
struct hvm_mmio_cache *cache;
return cache;
}
+ if ( !create )
+ return NULL;
+
i = vio->mmio_cache_count;
if( i == ARRAY_SIZE(vio->mmio_cache) )
return NULL;
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
- struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
+ struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir, true);
unsigned int chunk, buffer_offset = 0;
paddr_t gpa;
unsigned long one_rep = 1;
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
{
pagefault_info_t pfinfo;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc;
+ int rc = HVMTRANS_bad_gfn_to_mfn;
if ( offset + bytes > PAGE_SIZE )
{
return rc;
}
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+ /*
+ * If there is an MMIO cache entry for the access then we must be re-issuing
+ * an access that was previously handled as MMIO. Thus it is imperative that
+ * we handle this access in the same way to guarantee completion and hence
+ * clean up any interim state.
+ */
+ if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_READ, false) )
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
switch ( rc )
{
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
{
pagefault_info_t pfinfo;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc;
+ int rc = HVMTRANS_bad_gfn_to_mfn;
if ( offset + bytes > PAGE_SIZE )
{
return rc;
}
- rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+ /*
+ * If there is an MMIO cache entry for the access then we must be re-issuing
+ * an access that was previously handled as MMIO. Thus it is imperative that
+ * we handle this access in the same way to guarantee completion and hence
+ * clean up any interim state.
+ */
+ if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_WRITE, false) )
+ rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{