int subpage_mmio_ro_add(paddr_t start, size_t size);
bool subpage_mmio_write_accept(mfn_t mfn, unsigned long gla);
-struct mmio_ro_emulate_ctxt {
- unsigned long cr2;
- /* Used only for mmcfg case */
- unsigned int seg, bdf;
- /* Used only for non-mmcfg case */
- mfn_t mfn;
-};
-
-int cf_check mmio_ro_emulated_write(
- enum x86_segment seg, unsigned long offset, void *p_data,
- unsigned int bytes, struct x86_emulate_ctxt *ctxt);
-
/* r/o MMIO subpage access handlers. */
struct subpage_ro_range {
struct list_head list;
goto write_ignored;
}
-int cf_check mmio_ro_emulated_write(
- enum x86_segment seg,
- unsigned long offset,
- void *p_data,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
-{
- struct mmio_ro_emulate_ctxt *mmio_ro_ctxt = ctxt->data;
- unsigned long data = 0;
-
- /* Only allow naturally-aligned stores at the original %cr2 address. */
- if ( ((bytes | offset) & (bytes - 1)) || !bytes ||
- offset != mmio_ro_ctxt->cr2 )
- {
- gdprintk(XENLOG_WARNING, "bad access (cr2=%lx, addr=%lx, bytes=%u)\n",
- mmio_ro_ctxt->cr2, offset, bytes);
- return X86EMUL_UNHANDLEABLE;
- }
-
- if ( bytes <= sizeof(data) )
- {
- memcpy(&data, p_data, bytes);
- subpage_mmio_write_emulate(mmio_ro_ctxt->mfn, PAGE_OFFSET(offset),
- data, bytes);
- }
- else if ( subpage_mmio_find_page(mmio_ro_ctxt->mfn) )
- gprintk(XENLOG_WARNING,
- "unsupported %u-byte write to R/O MMIO 0x%"PRI_mfn"%03lx\n",
- bytes, mfn_x(mmio_ro_ctxt->mfn), PAGE_OFFSET(offset));
-
- return X86EMUL_OKAY;
-}
-
/*
* For these PTE APIs, the caller must follow the alloc-map-unmap-free
* lifecycle, which means explicitly mapping the PTE pages before accessing
* fault handling for read-only MMIO pages
*/
+struct mmio_ro_emulate_ctxt {
+ unsigned long cr2;
+ /* Used only for mmcfg case */
+ unsigned int seg, bdf;
+ /* Used only for non-mmcfg case */
+ mfn_t mfn;
+};
+
static int cf_check mmcfg_intercept_write(
enum x86_segment seg,
unsigned long offset,
return X86EMUL_OKAY;
}
+int cf_check mmio_ro_emulated_write(
+ enum x86_segment seg,
+ unsigned long offset,
+ void *p_data,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct mmio_ro_emulate_ctxt *mmio_ro_ctxt = ctxt->data;
+ unsigned long data = 0;
+
+ /* Only allow naturally-aligned stores at the original %cr2 address. */
+ if ( ((bytes | offset) & (bytes - 1)) || !bytes ||
+ offset != mmio_ro_ctxt->cr2 )
+ {
+ gdprintk(XENLOG_WARNING, "bad access (cr2=%lx, addr=%lx, bytes=%u)\n",
+ mmio_ro_ctxt->cr2, offset, bytes);
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ if ( bytes <= sizeof(data) )
+ {
+ memcpy(&data, p_data, bytes);
+ subpage_mmio_write_emulate(mmio_ro_ctxt->mfn, PAGE_OFFSET(offset),
+ data, bytes);
+ }
+ else if ( subpage_mmio_find_page(mmio_ro_ctxt->mfn) )
+ gprintk(XENLOG_WARNING,
+ "unsupported %u-byte write to R/O MMIO 0x%"PRI_mfn"%03lx\n",
+ bytes, mfn_x(mmio_ro_ctxt->mfn), PAGE_OFFSET(offset));
+
+ return X86EMUL_OKAY;
+}
+
static const struct x86_emulate_ops mmio_ro_emulate_ops = {
.read = x86emul_unhandleable_rw,
.insn_fetch = ptwr_emulated_insn_fetch,