]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/IOMMU: don't restrict IRQ affinities to online CPUs
authorJan Beulich <jbeulich@suse.com>
Thu, 25 Jul 2019 10:14:52 +0000 (12:14 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 25 Jul 2019 10:14:52 +0000 (12:14 +0200)
In line with "x86/IRQ: desc->affinity should strictly represent the
requested value" the internally used IRQ(s) also shouldn't be restricted
to online ones. Make set_desc_affinity() (set_msi_affinity() then does
by implication) cope with a NULL mask being passed (just like
assign_irq_vector() does), and have IOMMU code pass NULL instead of
&cpu_online_map (when, for VT-d, there's no NUMA node information
available).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Brian Woods <brian.woods@amd.com>
xen/arch/x86/irq.c
xen/drivers/passthrough/amd/iommu_init.c
xen/drivers/passthrough/vtd/iommu.c

index 9171dbe150160a95b2494850f870d0fa02f7a920..668a1f5b367d6117c3323b9319475117991379c7 100644 (file)
@@ -796,18 +796,26 @@ unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask)
     unsigned long flags;
     cpumask_t dest_mask;
 
-    if (!cpumask_intersects(mask, &cpu_online_map))
+    if ( mask && !cpumask_intersects(mask, &cpu_online_map) )
         return BAD_APICID;
 
     spin_lock_irqsave(&vector_lock, flags);
-    ret = _assign_irq_vector(desc, mask);
+    ret = _assign_irq_vector(desc, mask ?: TARGET_CPUS);
     spin_unlock_irqrestore(&vector_lock, flags);
 
-    if (ret < 0)
+    if ( ret < 0 )
         return BAD_APICID;
 
-    cpumask_copy(desc->affinity, mask);
-    cpumask_and(&dest_mask, mask, desc->arch.cpu_mask);
+    if ( mask )
+    {
+        cpumask_copy(desc->affinity, mask);
+        cpumask_and(&dest_mask, mask, desc->arch.cpu_mask);
+    }
+    else
+    {
+        cpumask_setall(desc->affinity);
+        cpumask_copy(&dest_mask, desc->arch.cpu_mask);
+    }
     cpumask_and(&dest_mask, &dest_mask, &cpu_online_map);
 
     return cpu_mask_to_apicid(&dest_mask);
index 98103c87eca1534690cbe7eb86a25470d827b389..94790feb31be5d189c7817e62a494522515f9d47 100644 (file)
@@ -887,7 +887,7 @@ static void enable_iommu(struct amd_iommu *iommu)
 
     desc = irq_to_desc(iommu->msi.irq);
     spin_lock(&desc->lock);
-    set_msi_affinity(desc, &cpu_online_map);
+    set_msi_affinity(desc, NULL);
     spin_unlock(&desc->lock);
 
     amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
index f0e8a419b3451e5180326c74b97b35c6272c45d1..4cf1e0980eeae6b755a331c0c271251dcad6c4be 100644 (file)
@@ -2133,11 +2133,11 @@ static void adjust_irq_affinity(struct acpi_drhd_unit *drhd)
     const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
     unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
                              : NUMA_NO_NODE;
-    const cpumask_t *cpumask = &cpu_online_map;
+    const cpumask_t *cpumask = NULL;
     struct irq_desc *desc;
 
     if ( node < MAX_NUMNODES && node_online(node) &&
-         cpumask_intersects(&node_to_cpumask(node), cpumask) )
+         cpumask_intersects(&node_to_cpumask(node), &cpu_online_map) )
         cpumask = &node_to_cpumask(node);
 
     desc = irq_to_desc(drhd->iommu->msi.irq);