continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
desc = irq_to_desc(irq);
- BUG_ON(cpumask_empty(desc->arch.cpu_mask));
+ BUG_ON(!cpumask_intersects(desc->arch.cpu_mask, &cpu_online_map));
set_ioapic_affinity_irq(desc, desc->arch.cpu_mask);
}
{
struct irq_desc *desc = irq_to_desc(irq);
struct IO_APIC_route_entry entry;
- cpumask_t mask;
unsigned long flags;
int vector;
return vector;
entry.vector = vector;
- cpumask_copy(&mask, TARGET_CPUS);
- /* Don't chance ending up with an empty mask. */
- if (cpumask_intersects(&mask, desc->arch.cpu_mask))
- cpumask_and(&mask, &mask, desc->arch.cpu_mask);
- SET_DEST(entry, logical, cpu_mask_to_apicid(&mask));
+ if (cpumask_intersects(desc->arch.cpu_mask, TARGET_CPUS)) {
+ cpumask_t *mask = this_cpu(scratch_cpumask);
+
+ cpumask_and(mask, desc->arch.cpu_mask, TARGET_CPUS);
+ SET_DEST(entry, logical, cpu_mask_to_apicid(mask));
+ } else {
+ printk(XENLOG_ERR "IRQ%d: no target CPU (%*pb vs %*pb)\n",
+ irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+ nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+ desc->status |= IRQ_DISABLED;
+ }
apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
"(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
/* Set the vector field to the real vector! */
rte.vector = desc->arch.vector;
- SET_DEST(rte, logical, cpu_mask_to_apicid(desc->arch.cpu_mask));
+ if ( cpumask_intersects(desc->arch.cpu_mask, TARGET_CPUS) )
+ {
+ cpumask_t *mask = this_cpu(scratch_cpumask);
+
+ cpumask_and(mask, desc->arch.cpu_mask, TARGET_CPUS);
+ SET_DEST(rte, logical, cpu_mask_to_apicid(mask));
+ }
+ else
+ {
+ gprintk(XENLOG_ERR, "IRQ%d: no target CPU (%*pb vs %*pb)\n",
+ irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+ nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+ desc->status |= IRQ_DISABLED;
+ rte.mask = 1;
+ }
__ioapic_write_entry(apic, pin, 0, rte);
*/
static int current_vector = FIRST_DYNAMIC_VECTOR, current_offset = 0;
int cpu, err, old_vector;
- cpumask_t tmp_mask;
vmask_t *irq_used_vectors = NULL;
old_vector = irq_to_vector(irq);
- if (old_vector > 0) {
+ if ( old_vector > 0 )
+ {
+ cpumask_t tmp_mask;
+
cpumask_and(&tmp_mask, mask, &cpu_online_map);
if (cpumask_intersects(&tmp_mask, desc->arch.cpu_mask)) {
desc->arch.vector = old_vector;
else
irq_used_vectors = irq_get_used_vector_mask(irq);
- for_each_cpu(cpu, mask) {
+ for_each_cpu(cpu, mask)
+ {
+ const cpumask_t *vec_mask;
int new_cpu;
int vector, offset;
if (!cpu_online(cpu))
continue;
- cpumask_and(&tmp_mask, vector_allocation_cpumask(cpu),
- &cpu_online_map);
+ vec_mask = vector_allocation_cpumask(cpu);
vector = current_vector;
offset = current_offset;
&& test_bit(vector, irq_used_vectors) )
goto next;
- for_each_cpu(new_cpu, &tmp_mask)
+ for_each_cpu(new_cpu, vec_mask)
if (per_cpu(vector_irq, new_cpu)[vector] >= 0)
goto next;
/* Found one! */
release_old_vec(desc);
}
- trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
+ trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, vec_mask);
- for_each_cpu(new_cpu, &tmp_mask)
+ for_each_cpu(new_cpu, vec_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
desc->arch.vector = vector;
- cpumask_copy(desc->arch.cpu_mask, &tmp_mask);
+ cpumask_copy(desc->arch.cpu_mask, vec_mask);
desc->arch.used = IRQ_USED;
ASSERT((desc->arch.used_vectors == NULL)
cpumask_copy(desc->affinity, mask);
cpumask_and(&dest_mask, mask, desc->arch.cpu_mask);
+ cpumask_and(&dest_mask, &dest_mask, &cpu_online_map);
return cpu_mask_to_apicid(&dest_mask);
}
struct arch_irq_desc {
s16 vector; /* vector itself is only 8 bits, */
s16 old_vector; /* but we use -1 for unassigned */
+ /*
+ * Except for high priority interrupts @cpu_mask may have bits set for
+ * offline CPUs. Consumers need to be careful to mask this down to
+ * online ones as necessary. There is supposed to always be a non-
+ * empty intersection with cpu_online_map.
+ */
cpumask_var_t cpu_mask;
cpumask_var_t old_cpu_mask;
cpumask_var_t pending_mask;