if ( ret )
goto destroy_m2p;
- if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
+ if ( iommu_enabled && !iommu_hwdom_passthrough &&
+ !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
radix_tree_destroy(&ivrs_maps, xfree);
iommu_enabled = 0;
- iommu_passthrough = 0;
+ iommu_hwdom_passthrough = false;
iommu_intremap = 0;
iommuv2_enabled = 0;
}
BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
!iommu->dev_table.buffer );
- if ( iommu_passthrough && is_hardware_domain(domain) )
+ if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
valid = 0;
if ( ats_enabled )
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
- if ( !iommu_passthrough && !need_iommu(d) )
+ if ( !iommu_hwdom_passthrough && !need_iommu(d) )
{
int rc = 0;
bool_t __initdata iommu_enable = 1;
bool_t __read_mostly iommu_enabled;
bool_t __read_mostly force_iommu;
-bool_t __hwdom_initdata iommu_dom0_strict;
bool_t __read_mostly iommu_verbose;
bool_t __read_mostly iommu_workaround_bios_bug;
bool_t __read_mostly iommu_igfx = 1;
-bool_t __read_mostly iommu_passthrough;
bool_t __read_mostly iommu_snoop = 1;
bool_t __read_mostly iommu_qinval = 1;
bool_t __read_mostly iommu_intremap = 1;
+bool __hwdom_initdata iommu_hwdom_strict;
+bool __read_mostly iommu_hwdom_passthrough;
+
/*
* In the current implementation of VT-d posted interrupts, in some extreme
* cases, the per cpu list which saves the blocked vCPU will be very long,
else if ( !strncmp(s, "amd-iommu-perdev-intremap", ss - s) )
amd_iommu_perdev_intremap = val;
else if ( !strncmp(s, "dom0-passthrough", ss - s) )
- iommu_passthrough = val;
+ iommu_hwdom_passthrough = val;
else if ( !strncmp(s, "dom0-strict", ss - s) )
- iommu_dom0_strict = val;
+ iommu_hwdom_strict = val;
else if ( !strncmp(s, "sharept", ss - s) )
iommu_hap_pt_share = val;
else
arch_iommu_check_autotranslated_hwdom(d);
- if ( iommu_passthrough )
+ if ( iommu_hwdom_passthrough )
panic("Dom0 uses paging translated mode, dom0-passthrough must not be enabled\n");
- iommu_dom0_strict = 1;
+ iommu_hwdom_strict = true;
}
void __hwdom_init iommu_hwdom_init(struct domain *d)
return;
register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
- d->need_iommu = !!iommu_dom0_strict;
+ d->need_iommu = iommu_hwdom_strict;
if ( need_iommu(d) && !iommu_use_hap_pt(d) )
{
struct page_info *page;
int rc = -ENODEV;
bool_t force_intremap = force_iommu && iommu_intremap;
- if ( iommu_dom0_strict )
- iommu_passthrough = 0;
+ if ( iommu_hwdom_strict )
+ iommu_hwdom_passthrough = false;
if ( iommu_enable )
{
if ( !iommu_enabled )
{
iommu_snoop = 0;
- iommu_passthrough = 0;
- iommu_dom0_strict = 0;
+ iommu_hwdom_passthrough = false;
+ iommu_hwdom_strict = false;
}
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
if ( iommu_enabled )
{
printk(" - Dom0 mode: %s\n",
- iommu_passthrough ? "Passthrough" :
- iommu_dom0_strict ? "Strict" : "Relaxed");
+ iommu_hwdom_passthrough ? "Passthrough" :
+ iommu_hwdom_strict ? "Strict" : "Relaxed");
printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis");
tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0);
}
{
struct acpi_drhd_unit *drhd;
- if ( !iommu_passthrough && is_pv_domain(d) )
+ if ( !iommu_hwdom_passthrough && is_pv_domain(d) )
{
/* Set up 1:1 page table for hardware domain. */
vtd_set_hwdom_mapping(d);
return res;
}
- if ( iommu_passthrough && is_hardware_domain(domain) )
+ if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
{
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
agaw = level_to_agaw(iommu->nr_pt_levels);
return 0;
/* Do nothing if hardware domain and iommu supports pass thru. */
- if ( iommu_passthrough && is_hardware_domain(d) )
+ if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
spin_lock(&hd->arch.mapping_lock);
unsigned long gfn)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
- if ( iommu_passthrough && is_hardware_domain(d) )
+ if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
if ( iommu_snoop && !ecap_snp_ctl(iommu->ecap) )
iommu_snoop = 0;
- if ( iommu_passthrough && !ecap_pass_thru(iommu->ecap) )
- iommu_passthrough = 0;
+ if ( iommu_hwdom_passthrough && !ecap_pass_thru(iommu->ecap) )
+ iommu_hwdom_passthrough = false;
if ( iommu_qinval && !ecap_queued_inval(iommu->ecap) )
iommu_qinval = 0;
#define P(p,s) printk("Intel VT-d %s %senabled.\n", s, (p)? "" : "not ")
P(iommu_snoop, "Snoop Control");
- P(iommu_passthrough, "Dom0 DMA Passthrough");
+ P(iommu_hwdom_passthrough, "Dom0 DMA Passthrough");
P(iommu_qinval, "Queued Invalidation");
P(iommu_intremap, "Interrupt Remapping");
P(iommu_intpost, "Posted Interrupt");
error:
iommu_enabled = 0;
iommu_snoop = 0;
- iommu_passthrough = 0;
+ iommu_hwdom_passthrough = false;
iommu_qinval = 0;
iommu_intremap = 0;
iommu_intpost = 0;
* If dom0-strict mode is enabled then exclude conventional RAM
* and let the common code map dom0's pages.
*/
- if ( iommu_dom0_strict &&
+ if ( iommu_hwdom_strict &&
page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
#include <asm/iommu.h>
extern bool_t iommu_enable, iommu_enabled;
-extern bool_t force_iommu, iommu_dom0_strict, iommu_verbose;
-extern bool_t iommu_workaround_bios_bug, iommu_igfx, iommu_passthrough;
+extern bool_t force_iommu, iommu_verbose;
+extern bool_t iommu_workaround_bios_bug, iommu_igfx;
extern bool_t iommu_snoop, iommu_qinval, iommu_intremap, iommu_intpost;
extern bool_t iommu_hap_pt_share;
extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
+extern bool iommu_hwdom_strict, iommu_hwdom_passthrough;
+
extern unsigned int iommu_dev_iotlb_timeout;
int iommu_setup(void);