int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
{
- static const struct x86_emulate_ops hvm_intercept_ops_mmcfg = {
- .read = x86emul_unhandleable_rw,
- .insn_fetch = hvmemul_insn_fetch,
- .write = mmcfg_intercept_write,
- .validate = hvmemul_validate,
- };
static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = {
.read = x86emul_unhandleable_rw,
.insn_fetch = hvmemul_insn_fetch,
};
struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = gla, .mfn = _mfn(mfn) };
struct hvm_emulate_ctxt ctxt;
- const struct x86_emulate_ops *ops;
unsigned int seg, bdf;
int rc;
if ( pci_ro_mmcfg_decode(mfn, &seg, &bdf) )
{
- mmio_ro_ctxt.seg = seg;
- mmio_ro_ctxt.bdf = bdf;
- ops = &hvm_intercept_ops_mmcfg;
+ /* Should be always handled by vPCI for PVH dom0. */
+ gdprintk(XENLOG_ERR, "unhandled MMCFG access for %pp\n",
+ &PCI_SBDF(seg, bdf));
+ ASSERT_UNREACHABLE();
+ return X86EMUL_UNHANDLEABLE;
}
- else
- ops = &hvm_ro_emulate_ops_mmio;
hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write,
guest_cpu_user_regs());
ctxt.ctxt.data = &mmio_ro_ctxt;
- switch ( rc = _hvm_emulate_one(&ctxt, ops, VIO_no_completion) )
+ switch ( rc = _hvm_emulate_one(&ctxt, &hvm_ro_emulate_ops_mmio,
+ VIO_no_completion) )
{
case X86EMUL_UNHANDLEABLE:
case X86EMUL_UNIMPLEMENTED:
- hvm_dump_emulation_state(XENLOG_G_WARNING, "MMCFG", &ctxt, rc);
+ hvm_dump_emulation_state(XENLOG_G_WARNING, "r/o MMIO", &ctxt, rc);
break;
case X86EMUL_EXCEPTION:
hvm_inject_event(&ctxt.ctxt.event);
int cf_check mmio_ro_emulated_write(
enum x86_segment seg, unsigned long offset, void *p_data,
unsigned int bytes, struct x86_emulate_ctxt *ctxt);
-int cf_check mmcfg_intercept_write(
- enum x86_segment seg, unsigned long offset, void *p_data,
- unsigned int bytes, struct x86_emulate_ctxt *ctxt);
int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
return X86EMUL_OKAY;
}
-int cf_check mmcfg_intercept_write(
- enum x86_segment seg,
- unsigned long offset,
- void *p_data,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
-{
- struct mmio_ro_emulate_ctxt *mmio_ctxt = ctxt->data;
-
- /*
- * Only allow naturally-aligned stores no wider than 4 bytes to the
- * original %cr2 address.
- */
- if ( ((bytes | offset) & (bytes - 1)) || bytes > 4 || !bytes ||
- offset != mmio_ctxt->cr2 )
- {
- gdprintk(XENLOG_WARNING, "bad write (cr2=%lx, addr=%lx, bytes=%u)\n",
- mmio_ctxt->cr2, offset, bytes);
- return X86EMUL_UNHANDLEABLE;
- }
-
- offset &= 0xfff;
- if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf,
- offset, bytes, p_data) >= 0 )
- pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf),
- PCI_DEVFN(mmio_ctxt->bdf), offset, bytes,
- *(uint32_t *)p_data);
-
- return X86EMUL_OKAY;
-}
-
/*
* For these PTE APIs, the caller must follow the alloc-map-unmap-free
* lifecycle, which means explicitly mapping the PTE pages before accessing
* fault handling for read-only MMIO pages
*/
+static int cf_check mmcfg_intercept_write(
+ enum x86_segment seg,
+ unsigned long offset,
+ void *p_data,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct mmio_ro_emulate_ctxt *mmio_ctxt = ctxt->data;
+
+ /*
+ * Only allow naturally-aligned stores no wider than 4 bytes to the
+ * original %cr2 address.
+ */
+ if ( ((bytes | offset) & (bytes - 1)) || bytes > 4 || !bytes ||
+ offset != mmio_ctxt->cr2 )
+ {
+ gdprintk(XENLOG_WARNING, "bad write (cr2=%lx, addr=%lx, bytes=%u)\n",
+ mmio_ctxt->cr2, offset, bytes);
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ offset &= 0xfff;
+ if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf,
+ offset, bytes, p_data) >= 0 )
+ pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf),
+ PCI_DEVFN(mmio_ctxt->bdf), offset, bytes,
+ *(uint32_t *)p_data);
+
+ return X86EMUL_OKAY;
+}
+
static const struct x86_emulate_ops mmio_ro_emulate_ops = {
.read = x86emul_unhandleable_rw,
.insn_fetch = ptwr_emulated_insn_fetch,