}
__initcall(setup_dump_irqs);
-/* A cpu has been removed from cpu_online_mask. Re-set irq affinities. */
-void fixup_irqs(void)
+/* Reset irq affinities to match the given CPU mask. */
+void fixup_irqs(const cpumask_t *mask, bool_t verbose)
{
- unsigned int irq, sp;
+ unsigned int irq;
static int warned;
struct irq_desc *desc;
- irq_guest_action_t *action;
- struct pending_eoi *peoi;
for ( irq = 0; irq < nr_irqs; irq++ )
{
vector = irq_to_vector(irq);
if ( vector >= FIRST_HIPRIORITY_VECTOR &&
vector <= LAST_HIPRIORITY_VECTOR )
- cpumask_and(desc->arch.cpu_mask, desc->arch.cpu_mask,
- &cpu_online_map);
+ cpumask_and(desc->arch.cpu_mask, desc->arch.cpu_mask, mask);
cpumask_copy(&affinity, desc->affinity);
- if ( !desc->action || cpumask_subset(&affinity, &cpu_online_map) )
+ if ( !desc->action || cpumask_subset(&affinity, mask) )
{
spin_unlock(&desc->lock);
continue;
}
- cpumask_and(&affinity, &affinity, &cpu_online_map);
+ cpumask_and(&affinity, &affinity, mask);
if ( cpumask_empty(&affinity) )
{
break_affinity = 1;
- cpumask_copy(&affinity, &cpu_online_map);
+ cpumask_copy(&affinity, mask);
}
if ( desc->handler->disable )
spin_unlock(&desc->lock);
+ if ( !verbose )
+ continue;
+
if ( break_affinity && set_affinity )
printk("Broke affinity for irq %i\n", irq);
else if ( !set_affinity )
local_irq_enable();
mdelay(1);
local_irq_disable();
+}
+
+void fixup_eoi(void)
+{
+ unsigned int irq, sp;
+ struct irq_desc *desc;
+ irq_guest_action_t *action;
+ struct pending_eoi *peoi;
/* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
for ( irq = 0; irq < nr_irqs; irq++ )
int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
bool_t hvm_domain_use_pirq(const struct domain *, const struct pirq *);
-/* A cpu has been removed from cpu_online_mask. Re-set irq affinities. */
-void fixup_irqs(void);
+/* Reset irq affinities to match the given CPU mask. */
+void fixup_irqs(const cpumask_t *mask, bool_t verbose);
+void fixup_eoi(void);
int init_irq_data(void);