enabled.
### dom0-iommu
-> `= List of [ passthrough | strict ]`
+> `= List of [ passthrough | strict | map-inclusive ]`
This list of booleans controls the iommu usage by Dom0:
`true` for a PVH Dom0 and any attempt to overwrite it from the command line
is ignored.
+* `map-inclusive`: sets up DMA remapping for all the non-RAM regions below 4GB
+ except for unusable ranges. Use this to work around firmware issues providing
+ incorrect RMRR/IVMD entries. Rather than only mapping RAM pages for IOMMU
+ accesses for Dom0, with this option all pages up to 4GB, not marked as
+ unusable in the E820 table, will get a mapping established. Note that this
+ option is only applicable to a PV Dom0 and is enabled by default on Intel
+ hardware.
+
### dom0\_ioports\_disable (x86)
> `= List of <hex>-<hex>`
### iommu\_inclusive\_mapping (VT-d)
> `= <boolean>`
+**WARNING: This command line option is deprecated, and superseded by
+_dom0-iommu=map-inclusive_ - using both options in combination is undefined.**
+
> Default: `true`
Use this to work around firmware issues providing incorrect RMRR entries.
unsigned long i;
const struct amd_iommu *iommu;
+ /* Inclusive IOMMU mappings are disabled by default on AMD hardware. */
+ if ( iommu_hwdom_inclusive == -1 )
+ iommu_hwdom_inclusive = 0;
+
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
/* The IOMMU shares the p2m with the CPU */
return -ENOSYS;
}
+
+void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
+{
+}
static void __hwdom_init arm_smmu_iommu_hwdom_init(struct domain *d)
{
+ /* Set to false options not supported on ARM. */
+ if ( iommu_hwdom_inclusive == 1 )
+ printk(XENLOG_WARNING
+ "map-inclusive dom0-iommu option is not supported on ARM\n");
+ iommu_hwdom_inclusive = 0;
}
static void arm_smmu_iommu_domain_teardown(struct domain *d)
bool __hwdom_initdata iommu_hwdom_strict;
bool __read_mostly iommu_hwdom_passthrough;
+int8_t __hwdom_initdata iommu_hwdom_inclusive = -1;
/*
* In the current implementation of VT-d posted interrupts, in some extreme
iommu_hwdom_passthrough = val;
else if ( (val = parse_boolean("strict", s, ss)) >= 0 )
iommu_hwdom_strict = val;
+ else if ( (val = parse_boolean("map-inclusive", s, ss)) >= 0 )
+ iommu_hwdom_inclusive = val;
else
rc = -EINVAL;
}
hd->platform_ops->hwdom_init(d);
+
+ ASSERT(iommu_hwdom_inclusive != -1);
+ if ( iommu_hwdom_inclusive && !is_pv_domain(d) )
+ {
+ printk(XENLOG_WARNING
+ "IOMMU inclusive mappings are only supported on PV Dom0\n");
+ iommu_hwdom_inclusive = 0;
+ }
+
+ arch_iommu_hwdom_init(d);
}
void iommu_teardown(struct domain *d)
bool_t platform_supports_intremap(void);
bool_t platform_supports_x2apic(void);
-void vtd_set_hwdom_mapping(struct domain *d);
-
#endif // _VTD_EXTERN_H_
{
struct acpi_drhd_unit *drhd;
- if ( !iommu_hwdom_passthrough && is_pv_domain(d) )
- {
- /* Set up 1:1 page table for hardware domain. */
- vtd_set_hwdom_mapping(d);
- }
+ /* Inclusive mappings are enabled by default on Intel hardware for PV. */
+ if ( iommu_hwdom_inclusive == -1 )
+ iommu_hwdom_inclusive = is_pv_domain(d);
setup_hwdom_pci_devices(d, setup_hwdom_device);
setup_hwdom_rmrr(d);
#include <xen/irq.h>
#include <xen/numa.h>
#include <asm/fixmap.h>
-#include <asm/setup.h>
#include "../iommu.h"
#include "../dmar.h"
#include "../vtd.h"
* iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
* 1:1 iommu mappings except xen and unusable regions.
*/
-static bool_t __hwdom_initdata iommu_inclusive_mapping = 1;
-boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
+boolean_param("iommu_inclusive_mapping", iommu_hwdom_inclusive);
void *map_vtd_domain_page(u64 maddr)
{
wbinvd();
}
-void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
-{
- unsigned long i, top, max_pfn;
-
- BUG_ON(!is_hardware_domain(d));
-
- max_pfn = (GB(4) >> PAGE_SHIFT) - 1;
- top = max(max_pdx, pfn_to_pdx(max_pfn) + 1);
-
- for ( i = 0; i < top; i++ )
- {
- unsigned long pfn = pdx_to_pfn(i);
- bool map;
- int rc;
-
- /*
- * Set up 1:1 mapping for dom0. Default to include only
- * conventional RAM areas and let RMRRs include needed reserved
- * regions. When set, the inclusive mapping additionally maps in
- * every pfn up to 4GB except those that fall in unusable ranges.
- */
- if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) )
- continue;
-
- if ( iommu_inclusive_mapping && pfn <= max_pfn )
- map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE);
- else
- map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL);
-
- if ( !map )
- continue;
-
- /* Exclude Xen bits */
- if ( xen_in_range(pfn) )
- continue;
-
- /*
- * If dom0-strict mode is enabled then exclude conventional RAM
- * and let the common code map dom0's pages.
- */
- if ( iommu_hwdom_strict &&
- page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
- continue;
-
- rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
- if ( rc )
- printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
- d->domain_id, rc);
-
- if (!(i & 0xfffff))
- process_pending_softirqs();
- }
-}
-
#include <xen/softirq.h>
#include <xsm/xsm.h>
+#include <asm/setup.h>
+
void iommu_update_ire_from_apic(
unsigned int apic, unsigned int reg, unsigned int value)
{
{
}
+void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
+{
+ unsigned long i, top, max_pfn;
+
+ BUG_ON(!is_hardware_domain(d));
+
+ if ( iommu_hwdom_passthrough || !is_pv_domain(d) )
+ return;
+
+ max_pfn = (GB(4) >> PAGE_SHIFT) - 1;
+ top = max(max_pdx, pfn_to_pdx(max_pfn) + 1);
+
+ for ( i = 0; i < top; i++ )
+ {
+ unsigned long pfn = pdx_to_pfn(i);
+ bool map;
+ int rc;
+
+ /*
+ * Set up 1:1 mapping for dom0. Default to include only
+ * conventional RAM areas and let RMRRs include needed reserved
+ * regions. When set, the inclusive mapping additionally maps in
+ * every pfn up to 4GB except those that fall in unusable ranges.
+ */
+ if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) )
+ continue;
+
+ if ( iommu_hwdom_inclusive && pfn <= max_pfn )
+ map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE);
+ else
+ map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL);
+
+ if ( !map )
+ continue;
+
+ /* Exclude Xen bits */
+ if ( xen_in_range(pfn) )
+ continue;
+
+ /*
+ * If dom0-strict mode is enabled then exclude conventional RAM
+ * and let the common code map dom0's pages.
+ */
+ if ( iommu_hwdom_strict &&
+ page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
+ continue;
+
+ rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
+ if ( rc )
+ printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
+
+ if (!(i & 0xfffff))
+ process_pending_softirqs();
+ }
+}
+
/*
* Local variables:
* mode: C
extern bool_t amd_iommu_perdev_intremap;
extern bool iommu_hwdom_strict, iommu_hwdom_passthrough;
+extern int8_t iommu_hwdom_inclusive;
extern unsigned int iommu_dev_iotlb_timeout;
int arch_iommu_domain_init(struct domain *d);
int arch_iommu_populate_page_table(struct domain *d);
void arch_iommu_check_autotranslated_hwdom(struct domain *d);
+void arch_iommu_hwdom_init(struct domain *d);
int iommu_construct(struct domain *d);