static uint32_t __ro_after_init base_disallow_mask;
+/* Handling sub-page read-only MMIO regions */
+struct subpage_ro_range {
+ struct list_head list;
+ mfn_t mfn;
+ void __iomem *mapped;
+ DECLARE_BITMAP(ro_elems, PAGE_SIZE / MMIO_RO_SUBPAGE_GRAN);
+};
+
+static LIST_HEAD_RO_AFTER_INIT(subpage_ro_ranges);
+static DEFINE_SPINLOCK(subpage_ro_lock);
+
/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
#define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
return rc;
}
+static struct subpage_ro_range *subpage_mmio_find_page(mfn_t mfn)
+{
+ struct subpage_ro_range *entry;
+
+ list_for_each_entry(entry, &subpage_ro_ranges, list)
+ if ( mfn_eq(entry->mfn, mfn) )
+ return entry;
+
+ return NULL;
+}
+
+/*
+ * Mark part of the page as R/O.
+ * Returns:
+ * - 0 on success - first range in the page
+ * - 1 on success - subsequent range in the page
+ * - <0 on error
+ */
+static int __init subpage_mmio_ro_add_page(
+ mfn_t mfn,
+ unsigned int offset_s,
+ unsigned int offset_e)
+{
+ struct subpage_ro_range *entry = NULL;
+ bool new_entry = false;
+ unsigned int i;
+
+ entry = subpage_mmio_find_page(mfn);
+ if ( !entry )
+ {
+ entry = xzalloc(struct subpage_ro_range);
+ if ( !entry )
+ return -ENOMEM;
+ entry->mfn = mfn;
+ list_add(&entry->list, &subpage_ro_ranges);
+ new_entry = true;
+ }
+
+ for ( i = offset_s; i <= offset_e; i += MMIO_RO_SUBPAGE_GRAN )
+ {
+ bool oldbit = __test_and_set_bit(i / MMIO_RO_SUBPAGE_GRAN,
+ entry->ro_elems);
+ ASSERT(!oldbit);
+ }
+
+ return !new_entry;
+}
+
+static void __init subpage_mmio_ro_remove_page(
+ mfn_t mfn,
+ unsigned int offset_s,
+ unsigned int offset_e)
+{
+ struct subpage_ro_range *entry = NULL;
+ unsigned int i;
+
+ entry = subpage_mmio_find_page(mfn);
+ if ( !entry )
+ return;
+
+ for ( i = offset_s; i <= offset_e; i += MMIO_RO_SUBPAGE_GRAN )
+ __clear_bit(i / MMIO_RO_SUBPAGE_GRAN, entry->ro_elems);
+
+ if ( !bitmap_empty(entry->ro_elems, PAGE_SIZE / MMIO_RO_SUBPAGE_GRAN) )
+ return;
+
+ list_del(&entry->list);
+ if ( entry->mapped )
+ iounmap(entry->mapped);
+ xfree(entry);
+}
+
+int __init subpage_mmio_ro_add(
+ paddr_t start,
+ size_t size)
+{
+ mfn_t mfn_start = maddr_to_mfn(start);
+ paddr_t end = start + size - 1;
+ mfn_t mfn_end = maddr_to_mfn(end);
+ unsigned int offset_end = 0;
+ int rc;
+ bool subpage_start, subpage_end;
+
+ ASSERT(IS_ALIGNED(start, MMIO_RO_SUBPAGE_GRAN));
+ ASSERT(IS_ALIGNED(size, MMIO_RO_SUBPAGE_GRAN));
+ if ( !IS_ALIGNED(start, MMIO_RO_SUBPAGE_GRAN) ||
+ !IS_ALIGNED(size, MMIO_RO_SUBPAGE_GRAN) )
+ return -EINVAL;
+
+ if ( !size )
+ return 0;
+
+ if ( mfn_eq(mfn_start, mfn_end) )
+ {
+ /* Both starting and ending parts handled at once */
+ subpage_start = PAGE_OFFSET(start) || PAGE_OFFSET(end) != PAGE_SIZE - 1;
+ subpage_end = false;
+ }
+ else
+ {
+ subpage_start = PAGE_OFFSET(start);
+ subpage_end = PAGE_OFFSET(end) != PAGE_SIZE - 1;
+ }
+
+ if ( subpage_start )
+ {
+ offset_end = mfn_eq(mfn_start, mfn_end) ?
+ PAGE_OFFSET(end) :
+ (PAGE_SIZE - 1);
+ rc = subpage_mmio_ro_add_page(mfn_start,
+ PAGE_OFFSET(start),
+ offset_end);
+ if ( rc < 0 )
+ goto err_unlock;
+ /* Check if not marking R/W part of a page intended to be fully R/O */
+ ASSERT(rc || !rangeset_contains_singleton(mmio_ro_ranges,
+ mfn_x(mfn_start)));
+ }
+
+ if ( subpage_end )
+ {
+ rc = subpage_mmio_ro_add_page(mfn_end, 0, PAGE_OFFSET(end));
+ if ( rc < 0 )
+ goto err_unlock_remove;
+ /* Check if not marking R/W part of a page intended to be fully R/O */
+ ASSERT(rc || !rangeset_contains_singleton(mmio_ro_ranges,
+ mfn_x(mfn_end)));
+ }
+
+ rc = rangeset_add_range(mmio_ro_ranges, mfn_x(mfn_start), mfn_x(mfn_end));
+ if ( rc )
+ goto err_remove;
+
+ return 0;
+
+ err_remove:
+ if ( subpage_end )
+ subpage_mmio_ro_remove_page(mfn_end, 0, PAGE_OFFSET(end));
+ err_unlock_remove:
+ if ( subpage_start )
+ subpage_mmio_ro_remove_page(mfn_start, PAGE_OFFSET(start), offset_end);
+ err_unlock:
+ return rc;
+}
+
+static void __iomem *subpage_mmio_map_page(
+ struct subpage_ro_range *entry)
+{
+ void __iomem *mapped_page;
+
+ if ( entry->mapped )
+ return entry->mapped;
+
+ mapped_page = ioremap(mfn_to_maddr(entry->mfn), PAGE_SIZE);
+
+ spin_lock(&subpage_ro_lock);
+ /* Re-check under the lock */
+ if ( entry->mapped )
+ {
+ spin_unlock(&subpage_ro_lock);
+ if ( mapped_page )
+ iounmap(mapped_page);
+ return entry->mapped;
+ }
+
+ entry->mapped = mapped_page;
+ spin_unlock(&subpage_ro_lock);
+ return entry->mapped;
+}
+
+static void subpage_mmio_write_emulate(
+ mfn_t mfn,
+ unsigned int offset,
+ const void *data,
+ unsigned int len)
+{
+ struct subpage_ro_range *entry;
+ volatile void __iomem *addr;
+
+ entry = subpage_mmio_find_page(mfn);
+ if ( !entry )
+ /* Do not print message for pages without any writable parts. */
+ return;
+
+ if ( test_bit(offset / MMIO_RO_SUBPAGE_GRAN, entry->ro_elems) )
+ {
+ write_ignored:
+ gprintk(XENLOG_WARNING,
+ "ignoring write to R/O MMIO 0x%"PRI_mfn"%03x len %u\n",
+ mfn_x(mfn), offset, len);
+ return;
+ }
+
+ addr = subpage_mmio_map_page(entry);
+ if ( !addr )
+ {
+ gprintk(XENLOG_ERR,
+ "Failed to map page for MMIO write at 0x%"PRI_mfn"%03x\n",
+ mfn_x(mfn), offset);
+ return;
+ }
+
+ switch ( len )
+ {
+ case 1:
+ writeb(*(const uint8_t*)data, addr);
+ break;
+ case 2:
+ writew(*(const uint16_t*)data, addr);
+ break;
+ case 4:
+ writel(*(const uint32_t*)data, addr);
+ break;
+ case 8:
+ writeq(*(const uint64_t*)data, addr);
+ break;
+ default:
+ /* mmio_ro_emulated_write() already validated the size */
+ ASSERT_UNREACHABLE();
+ goto write_ignored;
+ }
+}
+
+#ifdef CONFIG_HVM
+bool subpage_mmio_write_accept(mfn_t mfn, unsigned long gla)
+{
+ unsigned int offset = PAGE_OFFSET(gla);
+ const struct subpage_ro_range *entry;
+
+ entry = subpage_mmio_find_page(mfn);
+ if ( !entry )
+ return false;
+
+ if ( !test_bit(offset / MMIO_RO_SUBPAGE_GRAN, entry->ro_elems) )
+ {
+ /*
+ * We don't know the write size at this point yet, so it could be
+ * an unaligned write, but accept it here anyway and deal with it
+ * later.
+ */
+ return true;
+ }
+
+ return false;
+}
+#endif
+
int cf_check mmio_ro_emulated_write(
enum x86_segment seg,
unsigned long offset,
return X86EMUL_UNHANDLEABLE;
}
+ subpage_mmio_write_emulate(mmio_ro_ctxt->mfn, PAGE_OFFSET(offset),
+ p_data, bytes);
+
return X86EMUL_OKAY;
}