]> xenbits.xensource.com Git - xen.git/commitdiff
amd iommu: Support INVALIDATE_IOMMU_ALL command.
authorWei Wang <wei.wang2@amd.com>
Tue, 22 Nov 2011 13:27:19 +0000 (13:27 +0000)
committerWei Wang <wei.wang2@amd.com>
Tue, 22 Nov 2011 13:27:19 +0000 (13:27 +0000)
It is one of the new architectural commands supported by iommu v2.
It instructs iommu to clear all address translation and interrupt
remapping caches for all devices and all domains.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
Committed-by: Keir Fraser <keir@xen.org>
xen/drivers/passthrough/amd/iommu_cmd.c
xen/drivers/passthrough/amd/iommu_init.c
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h

index fdf089e28b3626ee1bdb62fb83b9dde07e384b5e..f3a6a5ed86bb2334974fb4122cf6612f2ffc96fa 100644 (file)
@@ -277,6 +277,20 @@ static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
     send_iommu_command(iommu, cmd);
 }
 
+void invalidate_iommu_all(struct amd_iommu *iommu)
+{
+    u32 cmd[4], entry;
+
+    cmd[3] = cmd[2] = cmd[0] = 0;
+
+    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_ALL, 0,
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
+    cmd[1] = entry;
+
+    send_iommu_command(iommu, cmd);
+}
+
 void amd_iommu_flush_iotlb(struct pci_dev *pdev,
                            uint64_t gaddr, unsigned int order)
 {
@@ -380,3 +394,11 @@ void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf)
     invalidate_interrupt_table(iommu, bdf);
     flush_command_buffer(iommu);
 }
+
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
+{
+    ASSERT( spin_is_locked(&iommu->lock) );
+
+    invalidate_iommu_all(iommu);
+    flush_command_buffer(iommu);
+}
index 12fe3e40e4c5a6b2a6b05c1cf529a0ff3cc6cc7a..404638d19475280bff81f63872028133e07cc1d2 100644 (file)
@@ -598,6 +598,9 @@ static void enable_iommu(struct amd_iommu *iommu)
     set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
     set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
 
+    if ( iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) )
+        amd_iommu_flush_all_caches(iommu);
+
     iommu->enabled = 1;
     spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -970,6 +973,9 @@ void amd_iommu_resume(void)
     }
 
     /* flush all cache entries after iommu re-enabled */
-    invalidate_all_devices();
-    invalidate_all_domain_pages();
+    if ( !iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) )
+    {
+        invalidate_all_devices();
+        invalidate_all_domain_pages();
+    }
 }
index c6bf6cd8ec32334849797534b0cbdb9ba9c34b78..a9325cb72405e429c473ea0d17f828bb142fc36c 100644 (file)
 #define IOMMU_CMD_INVALIDATE_IOMMU_PAGES       0x3
 #define IOMMU_CMD_INVALIDATE_IOTLB_PAGES       0x4
 #define IOMMU_CMD_INVALIDATE_INT_TABLE         0x5
+#define IOMMU_CMD_INVALIDATE_IOMMU_ALL      0x8
 
 /* COMPLETION_WAIT command */
 #define IOMMU_COMP_WAIT_DATA_BUFFER_SIZE       8
index 5a327bfad0ff99339e3c117679b798671a40a9d1..960e9ff78357e55b605c82d4ef2153e9537c0519 100644 (file)
@@ -78,6 +78,7 @@ void amd_iommu_flush_iotlb(struct pci_dev *pdev, uint64_t gaddr,
                            unsigned int order);
 void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
 void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
 
 /* find iommu for bdf */
 struct amd_iommu *find_iommu_for_device(int seg, int bdf);