iommu->enabled = 1;
spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void disable_iommu(struct amd_iommu *iommu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ if ( !iommu->enabled )
+ {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return;
+ }
+
+ amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
+ set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
+ set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
+
+ if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) )
+ set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_DISABLED);
+
+ if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) )
+ set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_DISABLED);
+
+ set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
+
+ iommu->enabled = 0;
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static void __init deallocate_buffer(void *buf, uint32_t sz)
list_del(&iommu->list);
if ( iommu->enabled )
{
+ disable_iommu(iommu);
deallocate_ring_buffer(&iommu->cmd_buffer);
deallocate_ring_buffer(&iommu->event_log);
deallocate_ring_buffer(&iommu->ppr_log);
return rc;
}
-static void disable_iommu(struct amd_iommu *iommu)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- if ( !iommu->enabled )
- {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return;
- }
-
- amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
- set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
- set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
-
- if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) )
- set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_DISABLED);
-
- if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) )
- set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_DISABLED);
-
- set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
-
- iommu->enabled = 0;
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
-}
-
static void invalidate_all_domain_pages(void)
{
struct domain *d;