option is only applicable to a PV Dom0 and is enabled by default on Intel
hardware.
+* `map-reserved`: sets up DMA remapping for all the reserved regions in the
+ memory map for Dom0. Use this to work around firmware issues providing
+ incorrect RMRR/IVMD entries. Rather than only mapping RAM pages for IOMMU
+ accesses for Dom0, all memory regions marked as reserved in the memory map
+ that don't overlap with any MMIO region from emulated devices will be
+ identity mapped. This option maps a subset of the memory that would be
+ mapped when using the `map-inclusive` option. This option is available to all
+ Dom0 modes and is enabled by default on Intel hardware.
+
### dom0\_ioports\_disable (x86)
> `= List of <hex>-<hex>`
return NULL;
}
+bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr)
+{
+ return vpci_mmcfg_find(d, addr);
+}
+
static unsigned int vpci_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg,
paddr_t addr, pci_sbdf_t *sbdf)
{
/* Inclusive IOMMU mappings are disabled by default on AMD hardware. */
if ( iommu_hwdom_inclusive == -1 )
iommu_hwdom_inclusive = 0;
+ /* Reserved IOMMU mappings are disabled by default on AMD hardware. */
+ if ( iommu_hwdom_reserved == -1 )
+ iommu_hwdom_reserved = 0;
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
printk(XENLOG_WARNING
"map-inclusive dom0-iommu option is not supported on ARM\n");
iommu_hwdom_inclusive = 0;
+ if ( iommu_hwdom_reserved == 1 )
+ printk(XENLOG_WARNING
+ "map-reserved dom0-iommu option is not supported on ARM\n");
+ iommu_hwdom_reserved = 0;
}
static void arm_smmu_iommu_domain_teardown(struct domain *d)
bool __hwdom_initdata iommu_hwdom_strict;
bool __read_mostly iommu_hwdom_passthrough;
int8_t __hwdom_initdata iommu_hwdom_inclusive = -1;
+int8_t __hwdom_initdata iommu_hwdom_reserved = -1;
/*
* In the current implementation of VT-d posted interrupts, in some extreme
iommu_hwdom_strict = val;
else if ( (val = parse_boolean("map-inclusive", s, ss)) >= 0 )
iommu_hwdom_inclusive = val;
+ else if ( (val = parse_boolean("map-reserved", s, ss)) >= 0 )
+ iommu_hwdom_inclusive = val;
else
rc = -EINVAL;
hd->platform_ops->hwdom_init(d);
- ASSERT(iommu_hwdom_inclusive != -1);
+ ASSERT(iommu_hwdom_inclusive != -1 && iommu_hwdom_inclusive != -1);
if ( iommu_hwdom_inclusive && !is_pv_domain(d) )
{
printk(XENLOG_WARNING
/* Inclusive mappings are enabled by default on Intel hardware for PV. */
if ( iommu_hwdom_inclusive == -1 )
iommu_hwdom_inclusive = is_pv_domain(d);
+ /* Reserved IOMMU mappings are enabled by default on Intel hardware. */
+ if ( iommu_hwdom_reserved == -1 )
+ iommu_hwdom_reserved = 1;
setup_hwdom_pci_devices(d, setup_hwdom_device);
setup_hwdom_rmrr(d);
#include <xen/softirq.h>
#include <xsm/xsm.h>
+#include <asm/hvm/io.h>
#include <asm/setup.h>
void iommu_update_ire_from_apic(
unsigned long max_pfn)
{
mfn_t mfn = _mfn(pfn);
+ unsigned int i, type;
/*
* Set up 1:1 mapping for dom0. Default to include only conventional RAM
* areas and let RMRRs include needed reserved regions. When set, the
* inclusive mapping additionally maps in every pfn up to 4GB except those
- * that fall in unusable ranges.
+ * that fall in unusable ranges for PV Dom0.
*/
- if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) )
+ if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) ||
+ /*
+ * Ignore any address below 1MB, that's already identity mapped by the
+ * Dom0 builder for HVM.
+ */
+ (!d->domain_id && is_hvm_domain(d) && pfn < PFN_DOWN(MB(1))) )
return false;
- switch ( page_get_ram_type(mfn) )
+ switch ( type = page_get_ram_type(mfn) )
{
case RAM_TYPE_UNUSABLE:
return false;
break;
default:
- if ( !iommu_hwdom_inclusive || pfn > max_pfn )
+ if ( type & RAM_TYPE_RESERVED )
+ {
+ if ( !iommu_hwdom_inclusive && !iommu_hwdom_reserved )
+ return false;
+ }
+ else if ( is_hvm_domain(d) || !iommu_hwdom_inclusive || pfn > max_pfn )
return false;
}
+ /*
+ * Check that it doesn't overlap with the LAPIC
+ * TODO: if the guest relocates the MMIO area of the LAPIC Xen should make
+ * sure there's nothing in the new address that would prevent trapping.
+ */
+ if ( has_vlapic(d) )
+ {
+ const struct vcpu *v;
+
+ for_each_vcpu(d, v)
+ if ( pfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
+ return false;
+ }
+ /* ... or the IO-APIC */
+ for ( i = 0; has_vioapic(d) && i < d->arch.hvm.nr_vioapics; i++ )
+ if ( pfn == PFN_DOWN(domain_vioapic(d, i)->base_address) )
+ return false;
+ /*
+ * ... or the PCIe MCFG regions.
+ * TODO: runtime added MMCFG regions are not checked to make sure they
+ * don't overlap with already mapped regions, thus preventing trapping.
+ */
+ if ( has_vpci(d) && vpci_is_mmcfg_address(d, pfn_to_paddr(pfn)) )
+ return false;
+
return true;
}
BUG_ON(!is_hardware_domain(d));
- if ( iommu_hwdom_passthrough || !is_pv_domain(d) )
+ if ( iommu_hwdom_passthrough )
return;
max_pfn = (GB(4) >> PAGE_SHIFT) - 1;
if ( !hwdom_iommu_map(d, pfn, max_pfn) )
continue;
- rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
+ if ( paging_mode_translate(d) )
+ rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
+ else
+ rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
/* Destroy tracked MMCFG areas. */
void destroy_vpci_mmcfg(struct domain *d);
+/* Check if an address is between a MMCFG region for a domain. */
+bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr);
+
#endif /* __ASM_X86_HVM_IO_H__ */
extern bool_t amd_iommu_perdev_intremap;
extern bool iommu_hwdom_strict, iommu_hwdom_passthrough;
-extern int8_t iommu_hwdom_inclusive;
+extern int8_t iommu_hwdom_inclusive, iommu_hwdom_reserved;
extern unsigned int iommu_dev_iotlb_timeout;