ia64/xen-unstable

changeset 18542:31f09a5e24cf

x86: Properly synchronise updates to pirq-to-vector mapping.

Per-domain irq mappings are now protected by d->evtchn_lock and by the
per-vector irq_desc lock.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Sep 24 12:36:55 2008 +0100 (2008-09-24)
parents 7750906b06b3
children b53b02976633
files xen/arch/ia64/xen/irq.c xen/arch/x86/domain.c xen/arch/x86/io_apic.c xen/arch/x86/irq.c xen/arch/x86/msi.c xen/arch/x86/physdev.c xen/common/event_channel.c xen/include/asm-x86/domain.h xen/include/asm-x86/irq.h xen/include/asm-x86/msi.h xen/include/xen/irq.h
line diff
     1.1 --- a/xen/arch/ia64/xen/irq.c	Wed Sep 24 10:23:51 2008 +0100
     1.2 +++ b/xen/arch/ia64/xen/irq.c	Wed Sep 24 12:36:55 2008 +0100
     1.3 @@ -459,20 +459,24 @@ int pirq_guest_bind(struct vcpu *v, int 
     1.4      return rc;
     1.5  }
     1.6  
     1.7 -void pirq_guest_unbind(struct domain *d, int irq)
     1.8 +int pirq_guest_unbind(struct domain *d, int irq)
     1.9  {
    1.10      irq_desc_t         *desc = &irq_desc[irq];
    1.11      irq_guest_action_t *action;
    1.12      unsigned long       flags;
    1.13 -    int                 i;
    1.14 +    int                 i, rc = 0;
    1.15  
    1.16      spin_lock_irqsave(&desc->lock, flags);
    1.17  
    1.18      action = (irq_guest_action_t *)desc->action;
    1.19  
    1.20 -    i = 0;
    1.21 -    while ( action->guest[i] && (action->guest[i] != d) )
    1.22 -        i++;
    1.23 +    for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
    1.24 +        continue;
    1.25 +    if ( i == action->nr_guests )
    1.26 +    {
    1.27 +        rc = -EINVAL;
    1.28 +        goto out;
    1.29 +    }
    1.30      memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
    1.31      action->nr_guests--;
    1.32  
    1.33 @@ -492,7 +496,9 @@ void pirq_guest_unbind(struct domain *d,
    1.34          desc->handler->shutdown(irq);
    1.35      }
    1.36  
    1.37 + out:
    1.38      spin_unlock_irqrestore(&desc->lock, flags);    
    1.39 +    return rc;
    1.40  }
    1.41  
    1.42  void
     2.1 --- a/xen/arch/x86/domain.c	Wed Sep 24 10:23:51 2008 +0100
     2.2 +++ b/xen/arch/x86/domain.c	Wed Sep 24 12:36:55 2008 +0100
     2.3 @@ -414,8 +414,6 @@ int arch_domain_create(struct domain *d,
     2.4              goto fail;
     2.5      }
     2.6  
     2.7 -    spin_lock_init(&d->arch.irq_lock);
     2.8 -
     2.9      if ( is_hvm_domain(d) )
    2.10      {
    2.11          if ( (rc = hvm_domain_initialise(d)) != 0 )
     3.1 --- a/xen/arch/x86/io_apic.c	Wed Sep 24 10:23:51 2008 +0100
     3.2 +++ b/xen/arch/x86/io_apic.c	Wed Sep 24 12:36:55 2008 +0100
     3.3 @@ -45,16 +45,6 @@
     3.4  int (*ioapic_renumber_irq)(int ioapic, int irq);
     3.5  atomic_t irq_mis_count;
     3.6  
     3.7 -int domain_irq_to_vector(struct domain *d, int irq)
     3.8 -{
     3.9 -    return d->arch.pirq_vector[irq];
    3.10 -}
    3.11 -
    3.12 -int domain_vector_to_irq(struct domain *d, int vector)
    3.13 -{
    3.14 -    return d->arch.vector_pirq[vector];
    3.15 -}
    3.16 -
    3.17  /* Where if anywhere is the i8259 connect in external int mode */
    3.18  static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
    3.19  
    3.20 @@ -721,7 +711,6 @@ next:
    3.21  
    3.22  static struct hw_interrupt_type ioapic_level_type;
    3.23  static struct hw_interrupt_type ioapic_edge_type;
    3.24 -struct hw_interrupt_type pci_msi_type;
    3.25  
    3.26  #define IOAPIC_AUTO	-1
    3.27  #define IOAPIC_EDGE	0
     4.1 --- a/xen/arch/x86/irq.c	Wed Sep 24 10:23:51 2008 +0100
     4.2 +++ b/xen/arch/x86/irq.c	Wed Sep 24 12:36:55 2008 +0100
     4.3 @@ -277,6 +277,35 @@ static void __do_IRQ_guest(int vector)
     4.4      }
     4.5  }
     4.6  
     4.7 +/*
     4.8 + * Retrieve Xen irq-descriptor corresponding to a domain-specific irq.
     4.9 + * The descriptor is returned locked. This function is safe against changes
    4.10 + * to the per-domain irq-to-vector mapping.
    4.11 + */
    4.12 +static irq_desc_t *domain_spin_lock_irq_desc(
    4.13 +    struct domain *d, int irq, unsigned long *pflags)
    4.14 +{
    4.15 +    unsigned int vector;
    4.16 +    unsigned long flags;
    4.17 +    irq_desc_t *desc;
    4.18 +
    4.19 +    for ( ; ; )
    4.20 +    {
    4.21 +        vector = domain_irq_to_vector(d, irq);
    4.22 +        if ( vector <= 0 )
    4.23 +            return NULL;
    4.24 +        desc = &irq_desc[vector];
    4.25 +        spin_lock_irqsave(&desc->lock, flags);
    4.26 +        if ( vector == domain_irq_to_vector(d, irq) )
    4.27 +            break;
    4.28 +        spin_unlock_irqrestore(&desc->lock, flags);
    4.29 +    }
    4.30 +
    4.31 +    if ( pflags != NULL )
    4.32 +        *pflags = flags;
    4.33 +    return desc;
    4.34 +}
    4.35 +
    4.36  /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
    4.37  static void flush_ready_eoi(void *unused)
    4.38  {
    4.39 @@ -342,11 +371,13 @@ static void __pirq_guest_eoi(struct doma
    4.40      cpumask_t           cpu_eoi_map;
    4.41      int                 vector;
    4.42  
    4.43 -    vector = domain_irq_to_vector(d, irq);
    4.44 -    desc   = &irq_desc[vector];
    4.45 +    ASSERT(local_irq_is_enabled());
    4.46 +    desc = domain_spin_lock_irq_desc(d, irq, NULL);
    4.47 +    if ( desc == NULL )
    4.48 +        return;
    4.49 +
    4.50      action = (irq_guest_action_t *)desc->action;
    4.51 -
    4.52 -    spin_lock_irq(&desc->lock);
    4.53 +    vector = desc - irq_desc;
    4.54  
    4.55      ASSERT(!test_bit(irq, d->pirq_mask) ||
    4.56             (action->ack_type != ACKTYPE_NONE));
    4.57 @@ -418,7 +449,7 @@ int pirq_acktype(struct domain *d, int i
    4.58      unsigned int vector;
    4.59  
    4.60      vector = domain_irq_to_vector(d, irq);
    4.61 -    if ( vector == 0 )
    4.62 +    if ( vector <= 0 )
    4.63          return ACKTYPE_NONE;
    4.64  
    4.65      desc = &irq_desc[vector];
    4.66 @@ -447,13 +478,6 @@ int pirq_acktype(struct domain *d, int i
    4.67      if ( !strcmp(desc->handler->typename, "XT-PIC") )
    4.68          return ACKTYPE_UNMASK;
    4.69  
    4.70 -    if ( strstr(desc->handler->typename, "MPIC") )
    4.71 -    {
    4.72 -        if ( desc->status & IRQ_LEVEL )
    4.73 -            return (desc->status & IRQ_PER_CPU) ? ACKTYPE_EOI : ACKTYPE_UNMASK;
    4.74 -        return ACKTYPE_NONE; /* edge-triggered => no final EOI */
    4.75 -    }
    4.76 -
    4.77      printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq);
    4.78      BUG();
    4.79  
    4.80 @@ -462,21 +486,18 @@ int pirq_acktype(struct domain *d, int i
    4.81  
    4.82  int pirq_shared(struct domain *d, int irq)
    4.83  {
    4.84 -    unsigned int        vector;
    4.85      irq_desc_t         *desc;
    4.86      irq_guest_action_t *action;
    4.87      unsigned long       flags;
    4.88      int                 shared;
    4.89  
    4.90 -    vector = domain_irq_to_vector(d, irq);
    4.91 -    if ( vector == 0 )
    4.92 +    desc = domain_spin_lock_irq_desc(d, irq, &flags);
    4.93 +    if ( desc == NULL )
    4.94          return 0;
    4.95  
    4.96 -    desc = &irq_desc[vector];
    4.97 -
    4.98 -    spin_lock_irqsave(&desc->lock, flags);
    4.99      action = (irq_guest_action_t *)desc->action;
   4.100      shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1));
   4.101 +
   4.102      spin_unlock_irqrestore(&desc->lock, flags);
   4.103  
   4.104      return shared;
   4.105 @@ -491,16 +512,15 @@ int pirq_guest_bind(struct vcpu *v, int 
   4.106      int                 rc = 0;
   4.107      cpumask_t           cpumask = CPU_MASK_NONE;
   4.108  
   4.109 +    WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
   4.110 +
   4.111   retry:
   4.112 -    vector = domain_irq_to_vector(v->domain, irq);
   4.113 -    if ( vector == 0 )
   4.114 +    desc = domain_spin_lock_irq_desc(v->domain, irq, &flags);
   4.115 +    if ( desc == NULL )
   4.116          return -EINVAL;
   4.117  
   4.118 -    desc = &irq_desc[vector];
   4.119 -
   4.120 -    spin_lock_irqsave(&desc->lock, flags);
   4.121 -
   4.122      action = (irq_guest_action_t *)desc->action;
   4.123 +    vector = desc - irq_desc;
   4.124  
   4.125      if ( !(desc->status & IRQ_GUEST) )
   4.126      {
   4.127 @@ -575,26 +595,39 @@ int pirq_guest_bind(struct vcpu *v, int 
   4.128      return rc;
   4.129  }
   4.130  
   4.131 -void pirq_guest_unbind(struct domain *d, int irq)
   4.132 +int pirq_guest_unbind(struct domain *d, int irq)
   4.133  {
   4.134 -    unsigned int        vector;
   4.135 +    int                 vector;
   4.136      irq_desc_t         *desc;
   4.137      irq_guest_action_t *action;
   4.138      cpumask_t           cpu_eoi_map;
   4.139      unsigned long       flags;
   4.140 -    int                 i;
   4.141 +    int                 i, rc = 0;
   4.142  
   4.143 -    vector = domain_irq_to_vector(d, irq);
   4.144 -    desc = &irq_desc[vector];
   4.145 -    BUG_ON(vector == 0);
   4.146 +    WARN_ON(!spin_is_locked(&d->evtchn_lock));
   4.147  
   4.148 -    spin_lock_irqsave(&desc->lock, flags);
   4.149 +    desc = domain_spin_lock_irq_desc(d, irq, &flags);
   4.150 +    if ( unlikely(desc == NULL) )
   4.151 +    {
   4.152 +        if ( (vector = -domain_irq_to_vector(d, irq)) == 0 )
   4.153 +            return -EINVAL;
   4.154 +        BUG_ON(vector <= 0);
   4.155 +        desc = &irq_desc[vector];
   4.156 +        spin_lock_irqsave(&desc->lock, flags);
   4.157 +        d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
   4.158 +        goto out;
   4.159 +    }
   4.160  
   4.161      action = (irq_guest_action_t *)desc->action;
   4.162 +    vector = desc - irq_desc;
   4.163  
   4.164 -    i = 0;
   4.165 -    while ( action->guest[i] && (action->guest[i] != d) )
   4.166 -        i++;
   4.167 +    for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
   4.168 +        continue;
   4.169 +    if ( i == action->nr_guests )
   4.170 +    {
   4.171 +        rc = -EINVAL;
   4.172 +        goto out;
   4.173 +    }
   4.174      memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
   4.175      action->nr_guests--;
   4.176  
   4.177 @@ -661,7 +694,8 @@ void pirq_guest_unbind(struct domain *d,
   4.178      desc->handler->shutdown(vector);
   4.179  
   4.180   out:
   4.181 -    spin_unlock_irqrestore(&desc->lock, flags);    
   4.182 +    spin_unlock_irqrestore(&desc->lock, flags);
   4.183 +    return rc;
   4.184  }
   4.185  
   4.186  extern void dump_ioapic_irq_info(void);
     5.1 --- a/xen/arch/x86/msi.c	Wed Sep 24 10:23:51 2008 +0100
     5.2 +++ b/xen/arch/x86/msi.c	Wed Sep 24 12:36:55 2008 +0100
     5.3 @@ -727,7 +727,6 @@ void pci_disable_msi(int vector)
     5.4          __pci_disable_msix(vector);
     5.5  }
     5.6  
     5.7 -extern struct hw_interrupt_type pci_msi_type;
     5.8  static void msi_free_vectors(struct pci_dev* dev)
     5.9  {
    5.10      struct msi_desc *entry, *tmp;
     6.1 --- a/xen/arch/x86/physdev.c	Wed Sep 24 10:23:51 2008 +0100
     6.2 +++ b/xen/arch/x86/physdev.c	Wed Sep 24 12:36:55 2008 +0100
     6.3 @@ -26,17 +26,11 @@ int
     6.4  ioapic_guest_write(
     6.5      unsigned long physbase, unsigned int reg, u32 pval);
     6.6  
     6.7 -
     6.8 -extern struct hw_interrupt_type pci_msi_type;
     6.9 -
    6.10  static int get_free_pirq(struct domain *d, int type, int index)
    6.11  {
    6.12      int i;
    6.13  
    6.14 -    if ( d == NULL )
    6.15 -        return -EINVAL;
    6.16 -
    6.17 -    ASSERT(spin_is_locked(&d->arch.irq_lock));
    6.18 +    ASSERT(spin_is_locked(&d->evtchn_lock));
    6.19  
    6.20      if ( type == MAP_PIRQ_TYPE_GSI )
    6.21      {
    6.22 @@ -64,11 +58,10 @@ static int map_domain_pirq(struct domain
    6.23      int ret = 0;
    6.24      int old_vector, old_pirq;
    6.25      struct msi_info msi;
    6.26 +    irq_desc_t *desc;
    6.27 +    unsigned long flags;
    6.28  
    6.29 -    if ( d == NULL )
    6.30 -        return -EINVAL;
    6.31 -
    6.32 -    ASSERT(spin_is_locked(&d->arch.irq_lock));
    6.33 +    ASSERT(spin_is_locked(&d->evtchn_lock));
    6.34  
    6.35      if ( !IS_PRIV(current->domain) )
    6.36          return -EPERM;
    6.37 @@ -88,8 +81,7 @@ static int map_domain_pirq(struct domain
    6.38      {
    6.39          dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
    6.40                  d->domain_id, pirq, vector);
    6.41 -        ret = -EINVAL;
    6.42 -        goto done;
    6.43 +        return -EINVAL;
    6.44      }
    6.45  
    6.46      ret = irq_permit_access(d, pirq);
    6.47 @@ -97,17 +89,14 @@ static int map_domain_pirq(struct domain
    6.48      {
    6.49          dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
    6.50                  d->domain_id, pirq);
    6.51 -        goto done;
    6.52 +        return ret;
    6.53      }
    6.54  
    6.55 +    desc = &irq_desc[vector];
    6.56 +    spin_lock_irqsave(&desc->lock, flags);
    6.57 +
    6.58      if ( map && MAP_PIRQ_TYPE_MSI == map->type )
    6.59      {
    6.60 -        irq_desc_t         *desc;
    6.61 -        unsigned long flags;
    6.62 -
    6.63 -        desc = &irq_desc[vector];
    6.64 -
    6.65 -        spin_lock_irqsave(&desc->lock, flags);
    6.66          if ( desc->handler != &no_irq_type )
    6.67              dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
    6.68                      d->domain_id, vector);
    6.69 @@ -120,8 +109,6 @@ static int map_domain_pirq(struct domain
    6.70          msi.vector = vector;
    6.71  
    6.72          ret = pci_enable_msi(&msi);
    6.73 -
    6.74 -        spin_unlock_irqrestore(&desc->lock, flags);
    6.75          if ( ret )
    6.76              goto done;
    6.77      }
    6.78 @@ -130,6 +117,7 @@ static int map_domain_pirq(struct domain
    6.79      d->arch.vector_pirq[vector] = pirq;
    6.80  
    6.81  done:
    6.82 +    spin_unlock_irqrestore(&desc->lock, flags);
    6.83      return ret;
    6.84  }
    6.85  
    6.86 @@ -139,18 +127,18 @@ static int unmap_domain_pirq(struct doma
    6.87      unsigned long flags;
    6.88      irq_desc_t *desc;
    6.89      int vector, ret = 0;
    6.90 +    bool_t forced_unbind;
    6.91  
    6.92 -    if ( d == NULL || pirq < 0 || pirq >= NR_PIRQS )
    6.93 +    if ( (pirq < 0) || (pirq >= NR_PIRQS) )
    6.94          return -EINVAL;
    6.95  
    6.96      if ( !IS_PRIV(current->domain) )
    6.97          return -EINVAL;
    6.98  
    6.99 -    ASSERT(spin_is_locked(&d->arch.irq_lock));
   6.100 +    ASSERT(spin_is_locked(&d->evtchn_lock));
   6.101  
   6.102      vector = d->arch.pirq_vector[pirq];
   6.103 -
   6.104 -    if ( !vector )
   6.105 +    if ( vector <= 0 )
   6.106      {
   6.107          dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
   6.108                  d->domain_id, pirq);
   6.109 @@ -158,21 +146,35 @@ static int unmap_domain_pirq(struct doma
   6.110          goto done;
   6.111      }
   6.112  
   6.113 +    forced_unbind = (pirq_guest_unbind(d, pirq) == 0);
   6.114 +    if ( forced_unbind )
   6.115 +        dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
   6.116 +                d->domain_id, pirq);
   6.117 +
   6.118      desc = &irq_desc[vector];
   6.119      spin_lock_irqsave(&desc->lock, flags);
   6.120 +
   6.121 +    BUG_ON(vector != d->arch.pirq_vector[pirq]);
   6.122 +
   6.123      if ( desc->msi_desc )
   6.124          pci_disable_msi(vector);
   6.125  
   6.126      if ( desc->handler == &pci_msi_type )
   6.127 +        desc->handler = &no_irq_type;
   6.128 +
   6.129 +    if ( !forced_unbind )
   6.130      {
   6.131 -        /* MSI is not shared, so should be released already */
   6.132 -        BUG_ON(desc->status & IRQ_GUEST);
   6.133 -        irq_desc[vector].handler = &no_irq_type;
   6.134 +        d->arch.pirq_vector[pirq] = 0;
   6.135 +        d->arch.vector_pirq[vector] = 0;
   6.136      }
   6.137 +    else
   6.138 +    {
   6.139 +        d->arch.pirq_vector[pirq] = -vector;
   6.140 +        d->arch.vector_pirq[vector] = -pirq;
   6.141 +    }
   6.142 +
   6.143      spin_unlock_irqrestore(&desc->lock, flags);
   6.144  
   6.145 -    d->arch.pirq_vector[pirq] = d->arch.vector_pirq[vector] = 0;
   6.146 -
   6.147      ret = irq_deny_access(d, pirq);
   6.148      if ( ret )
   6.149          dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
   6.150 @@ -186,7 +188,6 @@ static int physdev_map_pirq(struct physd
   6.151  {
   6.152      struct domain *d;
   6.153      int vector, pirq, ret = 0;
   6.154 -    unsigned long flags;
   6.155  
   6.156      if ( !IS_PRIV(current->domain) )
   6.157          return -EPERM;
   6.158 @@ -243,8 +244,8 @@ static int physdev_map_pirq(struct physd
   6.159              goto free_domain;
   6.160      }
   6.161  
   6.162 -    spin_lock_irqsave(&d->arch.irq_lock, flags);
   6.163 -    if ( map->pirq == -1 )
   6.164 +    spin_lock(&d->evtchn_lock);
   6.165 +    if ( map->pirq < 0 )
   6.166      {
   6.167          if ( d->arch.vector_pirq[vector] )
   6.168          {
   6.169 @@ -252,6 +253,11 @@ static int physdev_map_pirq(struct physd
   6.170                      d->domain_id, map->index, map->pirq,
   6.171                      d->arch.vector_pirq[vector]);
   6.172              pirq = d->arch.vector_pirq[vector];
   6.173 +            if ( pirq < 0 )
   6.174 +            {
   6.175 +                ret = -EBUSY;
   6.176 +                goto done;
   6.177 +            }
   6.178          }
   6.179          else
   6.180          {
   6.181 @@ -284,7 +290,7 @@ static int physdev_map_pirq(struct physd
   6.182      if ( !ret )
   6.183          map->pirq = pirq;
   6.184  done:
   6.185 -    spin_unlock_irqrestore(&d->arch.irq_lock, flags);
   6.186 +    spin_unlock(&d->evtchn_lock);
   6.187  free_domain:
   6.188      rcu_unlock_domain(d);
   6.189      return ret;
   6.190 @@ -293,7 +299,6 @@ free_domain:
   6.191  static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
   6.192  {
   6.193      struct domain *d;
   6.194 -    unsigned long flags;
   6.195      int ret;
   6.196  
   6.197      if ( !IS_PRIV(current->domain) )
   6.198 @@ -307,9 +312,9 @@ static int physdev_unmap_pirq(struct phy
   6.199      if ( d == NULL )
   6.200          return -ESRCH;
   6.201  
   6.202 -    spin_lock_irqsave(&d->arch.irq_lock, flags);
   6.203 +    spin_lock(&d->evtchn_lock);
   6.204      ret = unmap_domain_pirq(d, unmap->pirq);
   6.205 -    spin_unlock_irqrestore(&d->arch.irq_lock, flags);
   6.206 +    spin_unlock(&d->evtchn_lock);
   6.207  
   6.208      rcu_unlock_domain(d);
   6.209  
   6.210 @@ -416,7 +421,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   6.211  
   6.212      case PHYSDEVOP_alloc_irq_vector: {
   6.213          struct physdev_irq irq_op;
   6.214 -        unsigned long flags;
   6.215  
   6.216          ret = -EFAULT;
   6.217          if ( copy_from_guest(&irq_op, arg, 1) != 0 )
   6.218 @@ -437,9 +441,9 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   6.219  
   6.220          irq_op.vector = assign_irq_vector(irq);
   6.221  
   6.222 -        spin_lock_irqsave(&dom0->arch.irq_lock, flags);
   6.223 +        spin_lock(&dom0->evtchn_lock);
   6.224          ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, NULL);
   6.225 -        spin_unlock_irqrestore(&dom0->arch.irq_lock, flags);
   6.226 +        spin_unlock(&dom0->evtchn_lock);
   6.227  
   6.228          if ( copy_to_guest(arg, &irq_op, 1) != 0 )
   6.229              ret = -EFAULT;
     7.1 --- a/xen/common/event_channel.c	Wed Sep 24 10:23:51 2008 +0100
     7.2 +++ b/xen/common/event_channel.c	Wed Sep 24 12:36:55 2008 +0100
     7.3 @@ -387,7 +387,8 @@ static long __evtchn_close(struct domain
     7.4          break;
     7.5  
     7.6      case ECS_PIRQ:
     7.7 -        pirq_guest_unbind(d1, chn1->u.pirq);
     7.8 +        if ( pirq_guest_unbind(d1, chn1->u.pirq) != 0 )
     7.9 +            BUG();
    7.10          d1->pirq_to_evtchn[chn1->u.pirq] = 0;
    7.11          break;
    7.12  
     8.1 --- a/xen/include/asm-x86/domain.h	Wed Sep 24 10:23:51 2008 +0100
     8.2 +++ b/xen/include/asm-x86/domain.h	Wed Sep 24 12:36:55 2008 +0100
     8.3 @@ -235,7 +235,7 @@ struct arch_domain
     8.4      /* Shadow translated domain: P2M mapping */
     8.5      pagetable_t phys_table;
     8.6  
     8.7 -    spinlock_t irq_lock;
     8.8 +    /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
     8.9      int vector_pirq[NR_VECTORS];
    8.10      int pirq_vector[NR_PIRQS];
    8.11  
     9.1 --- a/xen/include/asm-x86/irq.h	Wed Sep 24 10:23:51 2008 +0100
     9.2 +++ b/xen/include/asm-x86/irq.h	Wed Sep 24 12:36:55 2008 +0100
     9.3 @@ -52,6 +52,7 @@ extern atomic_t irq_mis_count;
     9.4  int pirq_acktype(struct domain *d, int irq);
     9.5  int pirq_shared(struct domain *d , int irq);
     9.6  
     9.7 -extern int domain_irq_to_vector(struct domain *d, int irq);
     9.8 -extern int domain_vector_to_irq(struct domain *d, int vector);
     9.9 +#define domain_irq_to_vector(d, irq) ((d)->arch.pirq_vector[(irq)])
    9.10 +#define domain_vector_to_irq(d, vec) ((d)->arch.vector_pirq[(vec)])
    9.11 +
    9.12  #endif /* _ASM_HW_IRQ_H */
    10.1 --- a/xen/include/asm-x86/msi.h	Wed Sep 24 10:23:51 2008 +0100
    10.2 +++ b/xen/include/asm-x86/msi.h	Wed Sep 24 12:36:55 2008 +0100
    10.3 @@ -106,7 +106,7 @@ struct msi_desc {
    10.4   */
    10.5  #define NR_HP_RESERVED_VECTORS 	20
    10.6  
    10.7 -extern int vector_irq[NR_VECTORS];
    10.8 +extern struct hw_interrupt_type pci_msi_type;
    10.9  
   10.10  /*
   10.11   * MSI-X Address Register
    11.1 --- a/xen/include/xen/irq.h	Wed Sep 24 10:23:51 2008 +0100
    11.2 +++ b/xen/include/xen/irq.h	Wed Sep 24 12:36:55 2008 +0100
    11.3 @@ -22,7 +22,6 @@ struct irqaction
    11.4  #define IRQ_PENDING	4	/* IRQ pending - replay on enable */
    11.5  #define IRQ_REPLAY	8	/* IRQ has been replayed but not acked yet */
    11.6  #define IRQ_GUEST       16      /* IRQ is handled by guest OS(es) */
    11.7 -#define IRQ_LEVEL       64      /* IRQ level triggered */
    11.8  #define IRQ_PER_CPU     256     /* IRQ is per CPU */
    11.9  
   11.10  /*
   11.11 @@ -78,7 +77,7 @@ struct vcpu;
   11.12  extern int pirq_guest_eoi(struct domain *d, int irq);
   11.13  extern int pirq_guest_unmask(struct domain *d);
   11.14  extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
   11.15 -extern void pirq_guest_unbind(struct domain *d, int irq);
   11.16 +extern int pirq_guest_unbind(struct domain *d, int irq);
   11.17  
   11.18  static inline void set_native_irq_info(int irq, cpumask_t mask)
   11.19  {