if ( vio->mmio_access.gla_valid )
return;
- vio->mmio_gva = gla & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = PFN_DOWN(gpa);
vio->mmio_access = (struct npfec){ .gla_valid = 1,
.read_access = 1,
if ( ((access_type != hvm_access_insn_fetch
? vio->mmio_access.read_access
: vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
return rc;
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
return 1;
}
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
? access : (struct npfec){};
- vio->mmio_gva = gva & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = gpfn;
return handle_mmio();
}
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
/*
* HVM emulation:
- * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+ * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
* The latter is known to be an MMIO frame (not RAM).
* This translation is only valid for accesses as per @mmio_access.
*/
struct npfec mmio_access;
- unsigned long mmio_gva;
+ unsigned long mmio_gla;
unsigned long mmio_gpfn;
/*