obj-y += io.o
obj-y += ioreq.o
obj-y += irq.o
+obj-y += mmio.o
obj-y += monitor.o
obj-y += mtrr.o
obj-y += nestedhvm.o
/* If there is no suitable backing DM, just ignore accesses */
if ( !s )
{
- if ( is_mmio && is_hardware_domain(currd) )
+ if ( is_mmio && is_hardware_domain(currd) &&
+ /*
+ * Do not attempt to fixup write accesses to r/o MMIO regions,
+ * they are expected to be terminated by the null handler
+ * below.
+ */
+ (dir == IOREQ_READ ||
+ !rangeset_contains_singleton(mmio_ro_ranges,
+ PFN_DOWN(addr))) )
{
/*
* PVH dom0 is likely missing MMIO mappings on the p2m, due to
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion);
}
-int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
-{
- static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = {
- .read = x86emul_unhandleable_rw,
- .insn_fetch = hvmemul_insn_fetch,
- .write = mmio_ro_emulated_write,
- .validate = hvmemul_validate,
- };
- struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = gla, .mfn = _mfn(mfn) };
- struct hvm_emulate_ctxt ctxt;
- unsigned int seg, bdf;
- int rc;
-
- if ( pci_ro_mmcfg_decode(mfn, &seg, &bdf) )
- {
- /* Should be always handled by vPCI for PVH dom0. */
- gdprintk(XENLOG_ERR, "unhandled MMCFG access for %pp\n",
- &PCI_SBDF(seg, bdf));
- ASSERT_UNREACHABLE();
- return X86EMUL_UNHANDLEABLE;
- }
-
- hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write,
- guest_cpu_user_regs());
- ctxt.ctxt.data = &mmio_ro_ctxt;
-
- switch ( rc = _hvm_emulate_one(&ctxt, &hvm_ro_emulate_ops_mmio,
- VIO_no_completion) )
- {
- case X86EMUL_UNHANDLEABLE:
- case X86EMUL_UNIMPLEMENTED:
- hvm_dump_emulation_state(XENLOG_G_WARNING, "r/o MMIO", &ctxt, rc);
- break;
- case X86EMUL_EXCEPTION:
- hvm_inject_event(&ctxt.ctxt.event);
- /* fallthrough */
- default:
- hvm_emulate_writeback(&ctxt);
- break;
- }
-
- return rc;
-}
-
void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
unsigned int errcode)
{
*/
#include <xen/init.h>
+#include <xen/io.h>
#include <xen/ioreq.h>
#include <xen/lib.h>
#include <xen/trace.h>
#include <asm/current.h>
#include <asm/debugreg.h>
#include <asm/e820.h>
-#include <asm/io.h>
#include <asm/regs.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
register_portio_handler(d, XEN_HVM_DEBUGCONS_IOPORT, 1, hvm_print_line);
+ register_subpage_ro_handler(d);
+
if ( hvm_tsc_scaling_supported )
d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
*/
if ( (p2mt == p2m_mmio_dm) ||
(npfec.write_access &&
- (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
+ (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server) ||
+ /* MMIO entries can be r/o if the target mfn is in mmio_ro_ranges. */
+ (p2mt == p2m_mmio_direct &&
+ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn))))) )
{
if ( !handle_mmio_with_translation(gla, gfn, npfec) )
hvm_inject_hw_exception(X86_EXC_GP, 0);
goto out_put_gfn;
}
- if ( (p2mt == p2m_mmio_direct) && npfec.write_access && npfec.present &&
- (is_hardware_domain(currd) || subpage_mmio_write_accept(mfn, gla)) &&
- (hvm_emulate_one_mmio(mfn_x(mfn), gla) == X86EMUL_OKAY) )
- {
- rc = 1;
- goto out_put_gfn;
- }
-
/* If we fell through, the vcpu will retry now that access restrictions have
* been removed. It may fault again if the p2m entry type still requires so.
* Otherwise, this is an error condition. */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MMIO related routines.
+ *
+ * Copyright (c) 2025 Cloud Software Group
+ */
+
+#include <xen/io.h>
+#include <xen/mm.h>
+
+#include <asm/p2m.h>
+
+static int cf_check subpage_mmio_accept(struct vcpu *v, unsigned long addr)
+{
+ p2m_type_t t;
+ mfn_t mfn = get_gfn_query_unlocked(v->domain, PFN_DOWN(addr), &t);
+
+ return !mfn_eq(mfn, INVALID_MFN) && t == p2m_mmio_direct &&
+ subpage_mmio_find_page(mfn);
+}
+
+/*
+ * The guest has read access to those regions, and consequently read accesses
+ * shouldn't fault. However read-modify-write operations may take this path,
+ * so handling of reads is necessary.
+ */
+static int cf_check subpage_mmio_read(
+ struct vcpu *v, unsigned long addr, unsigned int len, unsigned long *data)
+{
+ struct domain *d = v->domain;
+ unsigned long gfn = PFN_DOWN(addr);
+ p2m_type_t t;
+ mfn_t mfn;
+ struct subpage_ro_range *entry;
+ volatile void __iomem *mem;
+
+ *data = ~0UL;
+
+ if ( !len || len > 8 || (len & (len - 1)) || !IS_ALIGNED(addr, len) )
+ {
+ gprintk(XENLOG_ERR, "ignoring read to r/o MMIO subpage %#lx size %u\n",
+ addr, len);
+ return X86EMUL_OKAY;
+ }
+
+ mfn = get_gfn_query(d, gfn, &t);
+ if ( mfn_eq(mfn, INVALID_MFN) || t != p2m_mmio_direct )
+ {
+ put_gfn(d, gfn);
+ return X86EMUL_RETRY;
+ }
+
+ entry = subpage_mmio_find_page(mfn);
+ if ( !entry )
+ {
+ put_gfn(d, gfn);
+ return X86EMUL_OKAY;
+ }
+
+ mem = subpage_mmio_map_page(entry);
+ if ( !mem )
+ {
+ put_gfn(d, gfn);
+ gprintk(XENLOG_ERR,
+ "Failed to map page for MMIO read at %#lx -> %#lx\n",
+ addr, mfn_to_maddr(mfn) + PAGE_OFFSET(addr));
+ return X86EMUL_OKAY;
+ }
+
+ *data = read_mmio(mem + PAGE_OFFSET(addr), len);
+
+ put_gfn(d, gfn);
+ return X86EMUL_OKAY;
+}
+
+static int cf_check subpage_mmio_write(
+ struct vcpu *v, unsigned long addr, unsigned int len, unsigned long data)
+{
+ struct domain *d = v->domain;
+ unsigned long gfn = PFN_DOWN(addr);
+ p2m_type_t t;
+ mfn_t mfn;
+
+ if ( !len || len > 8 || (len & (len - 1)) || !IS_ALIGNED(addr, len) )
+ {
+ gprintk(XENLOG_ERR, "ignoring write to r/o MMIO subpage %#lx size %u\n",
+ addr, len);
+ return X86EMUL_OKAY;
+ }
+
+ mfn = get_gfn_query(d, gfn, &t);
+ if ( mfn_eq(mfn, INVALID_MFN) || t != p2m_mmio_direct )
+ {
+ put_gfn(d, gfn);
+ return X86EMUL_RETRY;
+ }
+
+ subpage_mmio_write_emulate(mfn, PAGE_OFFSET(addr), data, len);
+
+ put_gfn(d, gfn);
+ return X86EMUL_OKAY;
+}
+
+void register_subpage_ro_handler(struct domain *d)
+{
+ static const struct hvm_mmio_ops subpage_mmio_ops = {
+ .check = subpage_mmio_accept,
+ .read = subpage_mmio_read,
+ .write = subpage_mmio_write,
+ };
+
+ register_mmio_handler(d, &subpage_mmio_ops);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
struct segment_register *hvmemul_get_seg_reg(
enum x86_segment seg,
struct hvm_emulate_ctxt *hvmemul_ctxt);
-int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla);
static inline bool handle_mmio(void)
{
/* Remove MMCFG regions from a domain ->iomem_caps. */
int vpci_mmcfg_deny_access(struct domain *d);
+/* r/o MMIO subpage access handler. */
+void register_subpage_ro_handler(struct domain *d);
+
#endif /* __ASM_X86_HVM_IO_H__ */
enum x86_segment seg, unsigned long offset, void *p_data,
unsigned int bytes, struct x86_emulate_ctxt *ctxt);
+/* r/o MMIO subpage access handlers. */
+struct subpage_ro_range {
+ struct list_head list;
+ mfn_t mfn;
+ void __iomem *mapped;
+ DECLARE_BITMAP(ro_elems, PAGE_SIZE / MMIO_RO_SUBPAGE_GRAN);
+};
+struct subpage_ro_range *subpage_mmio_find_page(mfn_t mfn);
+void __iomem *subpage_mmio_map_page(struct subpage_ro_range *entry);
+void subpage_mmio_write_emulate(
+ mfn_t mfn, unsigned int offset, unsigned long data, unsigned int len);
+
int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
static uint32_t __ro_after_init base_disallow_mask;
/* Handling sub-page read-only MMIO regions */
-struct subpage_ro_range {
- struct list_head list;
- mfn_t mfn;
- void __iomem *mapped;
- DECLARE_BITMAP(ro_elems, PAGE_SIZE / MMIO_RO_SUBPAGE_GRAN);
-};
-
static LIST_HEAD_RO_AFTER_INIT(subpage_ro_ranges);
static DEFINE_SPINLOCK(subpage_ro_lock);
return rc;
}
-static struct subpage_ro_range *subpage_mmio_find_page(mfn_t mfn)
+struct subpage_ro_range *subpage_mmio_find_page(mfn_t mfn)
{
struct subpage_ro_range *entry;
return rc;
}
-static void __iomem *subpage_mmio_map_page(
+void __iomem *subpage_mmio_map_page(
struct subpage_ro_range *entry)
{
void __iomem *mapped_page;
return entry->mapped;
}
-static void subpage_mmio_write_emulate(
+void subpage_mmio_write_emulate(
mfn_t mfn,
unsigned int offset,
unsigned long data,
goto write_ignored;
}
-#ifdef CONFIG_HVM
-bool subpage_mmio_write_accept(mfn_t mfn, unsigned long gla)
-{
- unsigned int offset = PAGE_OFFSET(gla);
- const struct subpage_ro_range *entry;
-
- entry = subpage_mmio_find_page(mfn);
- if ( !entry )
- return false;
-
- if ( !test_bit(offset / MMIO_RO_SUBPAGE_GRAN, entry->ro_elems) )
- {
- /*
- * We don't know the write size at this point yet, so it could be
- * an unaligned write, but accept it here anyway and deal with it
- * later.
- */
- return true;
- }
-
- return false;
-}
-#endif
-
int cf_check mmio_ro_emulated_write(
enum x86_segment seg,
unsigned long offset,