direct-io.hg

changeset 11234:0bb18319b8a0

[XEN] Fix pirq_guest_unbind(). Remove unnecessary code
(in fact flush_all_pending_eoi() was broken!).
Based on a patch from Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 23 11:27:06 2006 +0100 (2006-08-23)
parents af704c33a9a4
children 62b7b5f3029f 79afceca9065
files xen/arch/x86/irq.c
line diff
     1.1 --- a/xen/arch/x86/irq.c	Tue Aug 22 18:51:02 2006 +0100
     1.2 +++ b/xen/arch/x86/irq.c	Wed Aug 23 11:27:06 2006 +0100
     1.3 @@ -264,40 +264,6 @@ static void set_eoi_ready(void *data)
     1.4      flush_ready_eoi(NULL);
     1.5  }
     1.6  
     1.7 -/*
     1.8 - * Forcibly flush all pending EOIs on this CPU by emulating end-of-ISR
     1.9 - * notifications from guests. The caller of this function must ensure that
    1.10 - * all CPUs execute flush_ready_eoi().
    1.11 - */
    1.12 -static void flush_all_pending_eoi(void *unused)
    1.13 -{
    1.14 -    irq_desc_t         *desc;
    1.15 -    irq_guest_action_t *action;
    1.16 -    struct pending_eoi *peoi = this_cpu(pending_eoi);
    1.17 -    int                 i, vector, sp;
    1.18 -
    1.19 -    ASSERT(!local_irq_is_enabled());
    1.20 -
    1.21 -    sp = pending_eoi_sp(peoi);
    1.22 -    while ( --sp >= 0 )
    1.23 -    {
    1.24 -        if ( peoi[sp].ready )
    1.25 -            continue;
    1.26 -        vector = peoi[sp].vector;
    1.27 -        desc = &irq_desc[vector];
    1.28 -        spin_lock(&desc->lock);
    1.29 -        action = (irq_guest_action_t *)desc->action;
    1.30 -        ASSERT(action->ack_type == ACKTYPE_EOI);
    1.31 -        ASSERT(desc->status & IRQ_GUEST);
    1.32 -        for ( i = 0; i < action->nr_guests; i++ )
    1.33 -            clear_bit(vector_to_irq(vector), action->guest[i]->pirq_mask);
    1.34 -        action->in_flight = 0;
    1.35 -        spin_unlock(&desc->lock);
    1.36 -    }
    1.37 -
    1.38 -    flush_ready_eoi(NULL);
    1.39 -}
    1.40 -
    1.41  static void __pirq_guest_eoi(struct domain *d, int irq)
    1.42  {
    1.43      irq_desc_t         *desc;
    1.44 @@ -566,6 +532,10 @@ int pirq_guest_unbind(struct domain *d, 
    1.45          break;
    1.46      }
    1.47  
    1.48 +    /*
    1.49 +     * The guest cannot re-bind to this IRQ until this function returns. So,
    1.50 +     * when we have flushed this IRQ from pirq_mask, it should remain flushed.
    1.51 +     */
    1.52      BUG_ON(test_bit(irq, d->pirq_mask));
    1.53  
    1.54      if ( action->nr_guests != 0 )
    1.55 @@ -579,17 +549,18 @@ int pirq_guest_unbind(struct domain *d, 
    1.56      desc->handler->disable(vector);
    1.57  
    1.58      /*
    1.59 -     * We may have a EOI languishing anywhere in one of the per-CPU
    1.60 -     * EOI stacks. Forcibly flush the stack on every CPU where this might
    1.61 -     * be the case.
    1.62 +     * Mark any remaining pending EOIs as ready to flush.
    1.63 +     * NOTE: We will need to make this a stronger barrier if in future we allow
    1.64 +     * an interrupt vectors to be re-bound to a different PIC. In that case we
    1.65 +     * would need to flush all ready EOIs before returning as otherwise the
    1.66 +     * desc->handler could change and we would call the wrong 'end' hook.
    1.67       */
    1.68      cpu_eoi_map = action->cpu_eoi_map;
    1.69      if ( !cpus_empty(cpu_eoi_map) )
    1.70      {
    1.71          BUG_ON(action->ack_type != ACKTYPE_EOI);
    1.72          spin_unlock_irqrestore(&desc->lock, flags);
    1.73 -        on_selected_cpus(cpu_eoi_map, flush_all_pending_eoi, NULL, 1, 1);
    1.74 -        on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 1);
    1.75 +        on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1);
    1.76          spin_lock_irqsave(&desc->lock, flags);
    1.77      }
    1.78  
    1.79 @@ -672,41 +643,3 @@ static int __init setup_dump_irqs(void)
    1.80      return 0;
    1.81  }
    1.82  __initcall(setup_dump_irqs);
    1.83 -
    1.84 -static DEFINE_PER_CPU(struct timer, end_irq_timer);
    1.85 -
    1.86 -/*
    1.87 - * force_intack: Forcibly emit all pending EOIs on each CPU every second.
    1.88 - * Mainly useful for debugging or poking lazy guests ISRs.
    1.89 - */
    1.90 -
    1.91 -static void end_irq_timeout(void *unused)
    1.92 -{
    1.93 -    local_irq_disable();
    1.94 -    flush_all_pending_eoi(NULL);
    1.95 -    local_irq_enable();
    1.96 -
    1.97 -    on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0);
    1.98 -
    1.99 -    set_timer(&this_cpu(end_irq_timer), NOW() + MILLISECS(1000));
   1.100 -}
   1.101 -
   1.102 -static int force_intack;
   1.103 -boolean_param("force_intack", force_intack);
   1.104 -
   1.105 -static int __init setup_irq_timeout(void)
   1.106 -{
   1.107 -    unsigned int cpu;
   1.108 -
   1.109 -    if ( !force_intack )
   1.110 -        return 0;
   1.111 -
   1.112 -    for_each_online_cpu ( cpu )
   1.113 -    {
   1.114 -        init_timer(&per_cpu(end_irq_timer, cpu), end_irq_timeout, NULL, cpu);
   1.115 -        set_timer(&per_cpu(end_irq_timer, cpu), NOW() + MILLISECS(1000));
   1.116 -    }
   1.117 -
   1.118 -    return 0;
   1.119 -}
   1.120 -__initcall(setup_irq_timeout);