ia64/xen-unstable

changeset 18602:0033c944318f

Rename evtchn_lock to event_lock, since it protects more than just
event-channel state now.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 09 11:17:51 2008 +0100 (2008-10-09)
parents a11ad61bdb5b
children 69f670979660
files xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/irq.c xen/arch/x86/physdev.c xen/common/event_channel.c xen/drivers/passthrough/io.c xen/drivers/passthrough/pci.c xen/drivers/passthrough/vtd/x86/vtd.c xen/include/asm-x86/domain.h xen/include/asm-x86/hvm/irq.h xen/include/xen/sched.h xen/xsm/acm/acm_simple_type_enforcement_hooks.c
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/intr.c	Thu Oct 09 11:14:52 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Thu Oct 09 11:17:51 2008 +0100
     1.3 @@ -124,11 +124,11 @@ static void svm_dirq_assist(struct vcpu 
     1.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
     1.5              continue;
     1.6  
     1.7 -        spin_lock(&d->evtchn_lock);
     1.8 +        spin_lock(&d->event_lock);
     1.9          if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
    1.10          {
    1.11              hvm_pci_msi_assert(d, irq);
    1.12 -            spin_unlock(&d->evtchn_lock);
    1.13 +            spin_unlock(&d->event_lock);
    1.14              continue;
    1.15          }
    1.16  
    1.17 @@ -151,7 +151,7 @@ static void svm_dirq_assist(struct vcpu 
    1.18           */
    1.19          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
    1.20                    NOW() + PT_IRQ_TIME_OUT);
    1.21 -        spin_unlock(&d->evtchn_lock);
    1.22 +        spin_unlock(&d->event_lock);
    1.23      }
    1.24  }
    1.25  
     2.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Thu Oct 09 11:14:52 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Thu Oct 09 11:17:51 2008 +0100
     2.3 @@ -127,11 +127,11 @@ static void vmx_dirq_assist(struct vcpu 
     2.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
     2.5              continue;
     2.6  
     2.7 -        spin_lock(&d->evtchn_lock);
     2.8 +        spin_lock(&d->event_lock);
     2.9          if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
    2.10          {
    2.11              hvm_pci_msi_assert(d, irq);
    2.12 -            spin_unlock(&d->evtchn_lock);
    2.13 +            spin_unlock(&d->event_lock);
    2.14              continue;
    2.15          }
    2.16  
    2.17 @@ -154,7 +154,7 @@ static void vmx_dirq_assist(struct vcpu 
    2.18           */
    2.19          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
    2.20                    NOW() + PT_IRQ_TIME_OUT);
    2.21 -        spin_unlock(&d->evtchn_lock);
    2.22 +        spin_unlock(&d->event_lock);
    2.23      }
    2.24  }
    2.25  
     3.1 --- a/xen/arch/x86/irq.c	Thu Oct 09 11:14:52 2008 +0100
     3.2 +++ b/xen/arch/x86/irq.c	Thu Oct 09 11:17:51 2008 +0100
     3.3 @@ -514,7 +514,7 @@ int pirq_guest_bind(struct vcpu *v, int 
     3.4      int                 rc = 0;
     3.5      cpumask_t           cpumask = CPU_MASK_NONE;
     3.6  
     3.7 -    WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
     3.8 +    WARN_ON(!spin_is_locked(&v->domain->event_lock));
     3.9      BUG_ON(!local_irq_is_enabled());
    3.10  
    3.11   retry:
    3.12 @@ -684,7 +684,7 @@ void pirq_guest_unbind(struct domain *d,
    3.13      irq_desc_t *desc;
    3.14      int vector;
    3.15  
    3.16 -    WARN_ON(!spin_is_locked(&d->evtchn_lock));
    3.17 +    WARN_ON(!spin_is_locked(&d->event_lock));
    3.18  
    3.19      BUG_ON(!local_irq_is_enabled());
    3.20      desc = domain_spin_lock_irq_desc(d, irq, NULL);
    3.21 @@ -711,7 +711,7 @@ int pirq_guest_force_unbind(struct domai
    3.22      irq_guest_action_t *action;
    3.23      int i, bound = 0;
    3.24  
    3.25 -    WARN_ON(!spin_is_locked(&d->evtchn_lock));
    3.26 +    WARN_ON(!spin_is_locked(&d->event_lock));
    3.27  
    3.28      BUG_ON(!local_irq_is_enabled());
    3.29      desc = domain_spin_lock_irq_desc(d, irq, NULL);
    3.30 @@ -738,7 +738,7 @@ int get_free_pirq(struct domain *d, int 
    3.31  {
    3.32      int i;
    3.33  
    3.34 -    ASSERT(spin_is_locked(&d->evtchn_lock));
    3.35 +    ASSERT(spin_is_locked(&d->event_lock));
    3.36  
    3.37      if ( type == MAP_PIRQ_TYPE_GSI )
    3.38      {
    3.39 @@ -768,7 +768,7 @@ int map_domain_pirq(
    3.40      irq_desc_t *desc;
    3.41      unsigned long flags;
    3.42  
    3.43 -    ASSERT(spin_is_locked(&d->evtchn_lock));
    3.44 +    ASSERT(spin_is_locked(&d->event_lock));
    3.45  
    3.46      if ( !IS_PRIV(current->domain) )
    3.47          return -EPERM;
    3.48 @@ -836,7 +836,7 @@ int unmap_domain_pirq(struct domain *d, 
    3.49      if ( !IS_PRIV(current->domain) )
    3.50          return -EINVAL;
    3.51  
    3.52 -    ASSERT(spin_is_locked(&d->evtchn_lock));
    3.53 +    ASSERT(spin_is_locked(&d->event_lock));
    3.54  
    3.55      vector = d->arch.pirq_vector[pirq];
    3.56      if ( vector <= 0 )
    3.57 @@ -892,13 +892,13 @@ void free_domain_pirqs(struct domain *d)
    3.58  {
    3.59      int i;
    3.60  
    3.61 -    spin_lock(&d->evtchn_lock);
    3.62 +    spin_lock(&d->event_lock);
    3.63  
    3.64      for ( i = 0; i < NR_PIRQS; i++ )
    3.65          if ( d->arch.pirq_vector[i] > 0 )
    3.66              unmap_domain_pirq(d, i);
    3.67  
    3.68 -    spin_unlock(&d->evtchn_lock);
    3.69 +    spin_unlock(&d->event_lock);
    3.70  }
    3.71  
    3.72  extern void dump_ioapic_irq_info(void);
     4.1 --- a/xen/arch/x86/physdev.c	Thu Oct 09 11:14:52 2008 +0100
     4.2 +++ b/xen/arch/x86/physdev.c	Thu Oct 09 11:17:51 2008 +0100
     4.3 @@ -100,7 +100,7 @@ static int physdev_map_pirq(struct physd
     4.4      }
     4.5  
     4.6      /* Verify or get pirq. */
     4.7 -    spin_lock(&d->evtchn_lock);
     4.8 +    spin_lock(&d->event_lock);
     4.9      if ( map->pirq < 0 )
    4.10      {
    4.11          if ( d->arch.vector_pirq[vector] )
    4.12 @@ -145,7 +145,7 @@ static int physdev_map_pirq(struct physd
    4.13          map->pirq = pirq;
    4.14  
    4.15  done:
    4.16 -    spin_unlock(&d->evtchn_lock);
    4.17 +    spin_unlock(&d->event_lock);
    4.18      if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
    4.19          free_irq_vector(vector);
    4.20  free_domain:
    4.21 @@ -169,9 +169,9 @@ static int physdev_unmap_pirq(struct phy
    4.22      if ( d == NULL )
    4.23          return -ESRCH;
    4.24  
    4.25 -    spin_lock(&d->evtchn_lock);
    4.26 +    spin_lock(&d->event_lock);
    4.27      ret = unmap_domain_pirq(d, unmap->pirq);
    4.28 -    spin_unlock(&d->evtchn_lock);
    4.29 +    spin_unlock(&d->event_lock);
    4.30  
    4.31      rcu_unlock_domain(d);
    4.32  
    4.33 @@ -298,10 +298,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
    4.34  
    4.35          irq_op.vector = assign_irq_vector(irq);
    4.36  
    4.37 -        spin_lock(&dom0->evtchn_lock);
    4.38 +        spin_lock(&dom0->event_lock);
    4.39          ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
    4.40                                MAP_PIRQ_TYPE_GSI, NULL);
    4.41 -        spin_unlock(&dom0->evtchn_lock);
    4.42 +        spin_unlock(&dom0->event_lock);
    4.43  
    4.44          if ( copy_to_guest(arg, &irq_op, 1) != 0 )
    4.45              ret = -EFAULT;
     5.1 --- a/xen/common/event_channel.c	Thu Oct 09 11:14:52 2008 +0100
     5.2 +++ b/xen/common/event_channel.c	Thu Oct 09 11:17:51 2008 +0100
     5.3 @@ -133,7 +133,7 @@ static long evtchn_alloc_unbound(evtchn_
     5.4      if ( rc )
     5.5          return rc;
     5.6  
     5.7 -    spin_lock(&d->evtchn_lock);
     5.8 +    spin_lock(&d->event_lock);
     5.9  
    5.10      if ( (port = get_free_port(d)) < 0 )
    5.11          ERROR_EXIT_DOM(port, d);
    5.12 @@ -150,7 +150,7 @@ static long evtchn_alloc_unbound(evtchn_
    5.13      alloc->port = port;
    5.14  
    5.15   out:
    5.16 -    spin_unlock(&d->evtchn_lock);
    5.17 +    spin_unlock(&d->event_lock);
    5.18      rcu_unlock_domain(d);
    5.19  
    5.20      return rc;
    5.21 @@ -174,14 +174,14 @@ static long evtchn_bind_interdomain(evtc
    5.22      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
    5.23      if ( ld < rd )
    5.24      {
    5.25 -        spin_lock(&ld->evtchn_lock);
    5.26 -        spin_lock(&rd->evtchn_lock);
    5.27 +        spin_lock(&ld->event_lock);
    5.28 +        spin_lock(&rd->event_lock);
    5.29      }
    5.30      else
    5.31      {
    5.32          if ( ld != rd )
    5.33 -            spin_lock(&rd->evtchn_lock);
    5.34 -        spin_lock(&ld->evtchn_lock);
    5.35 +            spin_lock(&rd->event_lock);
    5.36 +        spin_lock(&ld->event_lock);
    5.37      }
    5.38  
    5.39      if ( (lport = get_free_port(ld)) < 0 )
    5.40 @@ -216,9 +216,9 @@ static long evtchn_bind_interdomain(evtc
    5.41      bind->local_port = lport;
    5.42  
    5.43   out:
    5.44 -    spin_unlock(&ld->evtchn_lock);
    5.45 +    spin_unlock(&ld->event_lock);
    5.46      if ( ld != rd )
    5.47 -        spin_unlock(&rd->evtchn_lock);
    5.48 +        spin_unlock(&rd->event_lock);
    5.49      
    5.50      rcu_unlock_domain(rd);
    5.51  
    5.52 @@ -244,7 +244,7 @@ static long evtchn_bind_virq(evtchn_bind
    5.53           ((v = d->vcpu[vcpu]) == NULL) )
    5.54          return -ENOENT;
    5.55  
    5.56 -    spin_lock(&d->evtchn_lock);
    5.57 +    spin_lock(&d->event_lock);
    5.58  
    5.59      if ( v->virq_to_evtchn[virq] != 0 )
    5.60          ERROR_EXIT(-EEXIST);
    5.61 @@ -260,7 +260,7 @@ static long evtchn_bind_virq(evtchn_bind
    5.62      v->virq_to_evtchn[virq] = bind->port = port;
    5.63  
    5.64   out:
    5.65 -    spin_unlock(&d->evtchn_lock);
    5.66 +    spin_unlock(&d->event_lock);
    5.67  
    5.68      return rc;
    5.69  }
    5.70 @@ -277,7 +277,7 @@ static long evtchn_bind_ipi(evtchn_bind_
    5.71           (d->vcpu[vcpu] == NULL) )
    5.72          return -ENOENT;
    5.73  
    5.74 -    spin_lock(&d->evtchn_lock);
    5.75 +    spin_lock(&d->event_lock);
    5.76  
    5.77      if ( (port = get_free_port(d)) < 0 )
    5.78          ERROR_EXIT(port);
    5.79 @@ -289,7 +289,7 @@ static long evtchn_bind_ipi(evtchn_bind_
    5.80      bind->port = port;
    5.81  
    5.82   out:
    5.83 -    spin_unlock(&d->evtchn_lock);
    5.84 +    spin_unlock(&d->event_lock);
    5.85  
    5.86      return rc;
    5.87  }
    5.88 @@ -308,7 +308,7 @@ static long evtchn_bind_pirq(evtchn_bind
    5.89      if ( !irq_access_permitted(d, pirq) )
    5.90          return -EPERM;
    5.91  
    5.92 -    spin_lock(&d->evtchn_lock);
    5.93 +    spin_lock(&d->event_lock);
    5.94  
    5.95      if ( d->pirq_to_evtchn[pirq] != 0 )
    5.96          ERROR_EXIT(-EEXIST);
    5.97 @@ -333,7 +333,7 @@ static long evtchn_bind_pirq(evtchn_bind
    5.98      bind->port = port;
    5.99  
   5.100   out:
   5.101 -    spin_unlock(&d->evtchn_lock);
   5.102 +    spin_unlock(&d->event_lock);
   5.103  
   5.104      return rc;
   5.105  }
   5.106 @@ -348,7 +348,7 @@ static long __evtchn_close(struct domain
   5.107      long           rc = 0;
   5.108  
   5.109   again:
   5.110 -    spin_lock(&d1->evtchn_lock);
   5.111 +    spin_lock(&d1->event_lock);
   5.112  
   5.113      if ( !port_is_valid(d1, port1) )
   5.114      {
   5.115 @@ -404,12 +404,12 @@ static long __evtchn_close(struct domain
   5.116  
   5.117              if ( d1 < d2 )
   5.118              {
   5.119 -                spin_lock(&d2->evtchn_lock);
   5.120 +                spin_lock(&d2->event_lock);
   5.121              }
   5.122              else if ( d1 != d2 )
   5.123              {
   5.124 -                spin_unlock(&d1->evtchn_lock);
   5.125 -                spin_lock(&d2->evtchn_lock);
   5.126 +                spin_unlock(&d1->event_lock);
   5.127 +                spin_lock(&d2->event_lock);
   5.128                  goto again;
   5.129              }
   5.130          }
   5.131 @@ -454,11 +454,11 @@ static long __evtchn_close(struct domain
   5.132      if ( d2 != NULL )
   5.133      {
   5.134          if ( d1 != d2 )
   5.135 -            spin_unlock(&d2->evtchn_lock);
   5.136 +            spin_unlock(&d2->event_lock);
   5.137          put_domain(d2);
   5.138      }
   5.139  
   5.140 -    spin_unlock(&d1->evtchn_lock);
   5.141 +    spin_unlock(&d1->event_lock);
   5.142  
   5.143      return rc;
   5.144  }
   5.145 @@ -476,11 +476,11 @@ int evtchn_send(struct domain *d, unsign
   5.146      struct vcpu   *rvcpu;
   5.147      int            rport, ret = 0;
   5.148  
   5.149 -    spin_lock(&ld->evtchn_lock);
   5.150 +    spin_lock(&ld->event_lock);
   5.151  
   5.152      if ( unlikely(!port_is_valid(ld, lport)) )
   5.153      {
   5.154 -        spin_unlock(&ld->evtchn_lock);
   5.155 +        spin_unlock(&ld->event_lock);
   5.156          return -EINVAL;
   5.157      }
   5.158  
   5.159 @@ -489,7 +489,7 @@ int evtchn_send(struct domain *d, unsign
   5.160      /* Guest cannot send via a Xen-attached event channel. */
   5.161      if ( unlikely(lchn->consumer_is_xen) )
   5.162      {
   5.163 -        spin_unlock(&ld->evtchn_lock);
   5.164 +        spin_unlock(&ld->event_lock);
   5.165          return -EINVAL;
   5.166      }
   5.167  
   5.168 @@ -527,7 +527,7 @@ int evtchn_send(struct domain *d, unsign
   5.169      }
   5.170  
   5.171  out:
   5.172 -    spin_unlock(&ld->evtchn_lock);
   5.173 +    spin_unlock(&ld->event_lock);
   5.174  
   5.175      return ret;
   5.176  }
   5.177 @@ -656,7 +656,7 @@ static long evtchn_status(evtchn_status_
   5.178      if ( rc )
   5.179          return rc;
   5.180  
   5.181 -    spin_lock(&d->evtchn_lock);
   5.182 +    spin_lock(&d->event_lock);
   5.183  
   5.184      if ( !port_is_valid(d, port) )
   5.185      {
   5.186 @@ -704,7 +704,7 @@ static long evtchn_status(evtchn_status_
   5.187      status->vcpu = chn->notify_vcpu_id;
   5.188  
   5.189   out:
   5.190 -    spin_unlock(&d->evtchn_lock);
   5.191 +    spin_unlock(&d->event_lock);
   5.192      rcu_unlock_domain(d);
   5.193  
   5.194      return rc;
   5.195 @@ -720,7 +720,7 @@ long evtchn_bind_vcpu(unsigned int port,
   5.196      if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
   5.197          return -ENOENT;
   5.198  
   5.199 -    spin_lock(&d->evtchn_lock);
   5.200 +    spin_lock(&d->event_lock);
   5.201  
   5.202      if ( !port_is_valid(d, port) )
   5.203      {
   5.204 @@ -756,7 +756,7 @@ long evtchn_bind_vcpu(unsigned int port,
   5.205      }
   5.206  
   5.207   out:
   5.208 -    spin_unlock(&d->evtchn_lock);
   5.209 +    spin_unlock(&d->event_lock);
   5.210  
   5.211      return rc;
   5.212  }
   5.213 @@ -768,11 +768,11 @@ static long evtchn_unmask(evtchn_unmask_
   5.214      int            port = unmask->port;
   5.215      struct vcpu   *v;
   5.216  
   5.217 -    spin_lock(&d->evtchn_lock);
   5.218 +    spin_lock(&d->event_lock);
   5.219  
   5.220      if ( unlikely(!port_is_valid(d, port)) )
   5.221      {
   5.222 -        spin_unlock(&d->evtchn_lock);
   5.223 +        spin_unlock(&d->event_lock);
   5.224          return -EINVAL;
   5.225      }
   5.226  
   5.227 @@ -790,7 +790,7 @@ static long evtchn_unmask(evtchn_unmask_
   5.228          vcpu_mark_events_pending(v);
   5.229      }
   5.230  
   5.231 -    spin_unlock(&d->evtchn_lock);
   5.232 +    spin_unlock(&d->event_lock);
   5.233  
   5.234      return 0;
   5.235  }
   5.236 @@ -944,7 +944,7 @@ int alloc_unbound_xen_event_channel(
   5.237      struct domain *d = local_vcpu->domain;
   5.238      int            port;
   5.239  
   5.240 -    spin_lock(&d->evtchn_lock);
   5.241 +    spin_lock(&d->event_lock);
   5.242  
   5.243      if ( (port = get_free_port(d)) < 0 )
   5.244          goto out;
   5.245 @@ -956,7 +956,7 @@ int alloc_unbound_xen_event_channel(
   5.246      chn->u.unbound.remote_domid = remote_domid;
   5.247  
   5.248   out:
   5.249 -    spin_unlock(&d->evtchn_lock);
   5.250 +    spin_unlock(&d->event_lock);
   5.251  
   5.252      return port;
   5.253  }
   5.254 @@ -968,11 +968,11 @@ void free_xen_event_channel(
   5.255      struct evtchn *chn;
   5.256      struct domain *d = local_vcpu->domain;
   5.257  
   5.258 -    spin_lock(&d->evtchn_lock);
   5.259 +    spin_lock(&d->event_lock);
   5.260  
   5.261      if ( unlikely(d->is_dying) )
   5.262      {
   5.263 -        spin_unlock(&d->evtchn_lock);
   5.264 +        spin_unlock(&d->event_lock);
   5.265          return;
   5.266      }
   5.267  
   5.268 @@ -981,7 +981,7 @@ void free_xen_event_channel(
   5.269      BUG_ON(!chn->consumer_is_xen);
   5.270      chn->consumer_is_xen = 0;
   5.271  
   5.272 -    spin_unlock(&d->evtchn_lock);
   5.273 +    spin_unlock(&d->event_lock);
   5.274  
   5.275      (void)__evtchn_close(d, port);
   5.276  }
   5.277 @@ -993,7 +993,7 @@ void notify_via_xen_event_channel(int lp
   5.278      struct domain *ld = current->domain, *rd;
   5.279      int            rport;
   5.280  
   5.281 -    spin_lock(&ld->evtchn_lock);
   5.282 +    spin_lock(&ld->event_lock);
   5.283  
   5.284      ASSERT(port_is_valid(ld, lport));
   5.285      lchn = evtchn_from_port(ld, lport);
   5.286 @@ -1007,13 +1007,13 @@ void notify_via_xen_event_channel(int lp
   5.287          evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
   5.288      }
   5.289  
   5.290 -    spin_unlock(&ld->evtchn_lock);
   5.291 +    spin_unlock(&ld->event_lock);
   5.292  }
   5.293  
   5.294  
   5.295  int evtchn_init(struct domain *d)
   5.296  {
   5.297 -    spin_lock_init(&d->evtchn_lock);
   5.298 +    spin_lock_init(&d->event_lock);
   5.299      if ( get_free_port(d) != 0 )
   5.300          return -EINVAL;
   5.301      evtchn_from_port(d, 0)->state = ECS_RESERVED;
   5.302 @@ -1027,7 +1027,7 @@ void evtchn_destroy(struct domain *d)
   5.303  
   5.304      /* After this barrier no new event-channel allocations can occur. */
   5.305      BUG_ON(!d->is_dying);
   5.306 -    spin_barrier(&d->evtchn_lock);
   5.307 +    spin_barrier(&d->event_lock);
   5.308  
   5.309      /* Close all existing event channels. */
   5.310      for ( i = 0; port_is_valid(d, i); i++ )
   5.311 @@ -1037,14 +1037,14 @@ void evtchn_destroy(struct domain *d)
   5.312      }
   5.313  
   5.314      /* Free all event-channel buckets. */
   5.315 -    spin_lock(&d->evtchn_lock);
   5.316 +    spin_lock(&d->event_lock);
   5.317      for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
   5.318      {
   5.319          xsm_free_security_evtchn(d->evtchn[i]);
   5.320          xfree(d->evtchn[i]);
   5.321          d->evtchn[i] = NULL;
   5.322      }
   5.323 -    spin_unlock(&d->evtchn_lock);
   5.324 +    spin_unlock(&d->event_lock);
   5.325  }
   5.326  
   5.327  static void domain_dump_evtchn_info(struct domain *d)
   5.328 @@ -1053,7 +1053,7 @@ static void domain_dump_evtchn_info(stru
   5.329  
   5.330      printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
   5.331  
   5.332 -    if ( !spin_trylock(&d->evtchn_lock) )
   5.333 +    if ( !spin_trylock(&d->event_lock) )
   5.334          return;
   5.335  
   5.336      printk("Event channel information for domain %d:\n",
   5.337 @@ -1094,7 +1094,7 @@ static void domain_dump_evtchn_info(stru
   5.338          printk(" x=%d\n", chn->consumer_is_xen);
   5.339      }
   5.340  
   5.341 -    spin_unlock(&d->evtchn_lock);
   5.342 +    spin_unlock(&d->event_lock);
   5.343  }
   5.344  
   5.345  static void dump_evtchn_info(unsigned char key)
     6.1 --- a/xen/drivers/passthrough/io.c	Thu Oct 09 11:14:52 2008 +0100
     6.2 +++ b/xen/drivers/passthrough/io.c	Thu Oct 09 11:17:51 2008 +0100
     6.3 @@ -30,7 +30,7 @@ static void pt_irq_time_out(void *data)
     6.4      struct dev_intx_gsi_link *digl;
     6.5      uint32_t device, intx;
     6.6  
     6.7 -    spin_lock(&irq_map->dom->evtchn_lock);
     6.8 +    spin_lock(&irq_map->dom->event_lock);
     6.9  
    6.10      dpci = domain_get_irq_dpci(irq_map->dom);
    6.11      ASSERT(dpci);
    6.12 @@ -46,7 +46,7 @@ static void pt_irq_time_out(void *data)
    6.13      clear_bit(machine_gsi, dpci->dirq_mask);
    6.14      vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
    6.15      dpci->mirq[machine_gsi].pending = 0;
    6.16 -    spin_unlock(&irq_map->dom->evtchn_lock);
    6.17 +    spin_unlock(&irq_map->dom->event_lock);
    6.18      pirq_guest_eoi(irq_map->dom, machine_gsi);
    6.19  }
    6.20  
    6.21 @@ -62,7 +62,7 @@ int pt_irq_create_bind_vtd(
    6.22      if ( pirq < 0 || pirq >= NR_PIRQS )
    6.23          return -EINVAL;
    6.24  
    6.25 -    spin_lock(&d->evtchn_lock);
    6.26 +    spin_lock(&d->event_lock);
    6.27  
    6.28      hvm_irq_dpci = domain_get_irq_dpci(d);
    6.29      if ( hvm_irq_dpci == NULL )
    6.30 @@ -70,7 +70,7 @@ int pt_irq_create_bind_vtd(
    6.31          hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
    6.32          if ( hvm_irq_dpci == NULL )
    6.33          {
    6.34 -            spin_unlock(&d->evtchn_lock);
    6.35 +            spin_unlock(&d->event_lock);
    6.36              return -ENOMEM;
    6.37          }
    6.38          memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
    6.39 @@ -81,7 +81,7 @@ int pt_irq_create_bind_vtd(
    6.40      if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
    6.41      {
    6.42          xfree(hvm_irq_dpci);
    6.43 -        spin_unlock(&d->evtchn_lock);
    6.44 +        spin_unlock(&d->event_lock);
    6.45          return -EINVAL;
    6.46      }
    6.47  
    6.48 @@ -101,7 +101,7 @@ int pt_irq_create_bind_vtd(
    6.49                  ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
    6.50  
    6.51          {
    6.52 -            spin_unlock(&d->evtchn_lock);
    6.53 +            spin_unlock(&d->event_lock);
    6.54              return -EBUSY;
    6.55          }
    6.56      }
    6.57 @@ -117,7 +117,7 @@ int pt_irq_create_bind_vtd(
    6.58          digl = xmalloc(struct dev_intx_gsi_link);
    6.59          if ( !digl )
    6.60          {
    6.61 -            spin_unlock(&d->evtchn_lock);
    6.62 +            spin_unlock(&d->event_lock);
    6.63              return -ENOMEM;
    6.64          }
    6.65  
    6.66 @@ -149,7 +149,7 @@ int pt_irq_create_bind_vtd(
    6.67                   "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
    6.68                   machine_gsi, device, intx);
    6.69      }
    6.70 -    spin_unlock(&d->evtchn_lock);
    6.71 +    spin_unlock(&d->event_lock);
    6.72      return 0;
    6.73  }
    6.74  
    6.75 @@ -172,13 +172,13 @@ int pt_irq_destroy_bind_vtd(
    6.76               "pt_irq_destroy_bind_vtd: machine_gsi=%d "
    6.77               "guest_gsi=%d, device=%d, intx=%d.\n",
    6.78               machine_gsi, guest_gsi, device, intx);
    6.79 -    spin_lock(&d->evtchn_lock);
    6.80 +    spin_lock(&d->event_lock);
    6.81  
    6.82      hvm_irq_dpci = domain_get_irq_dpci(d);
    6.83  
    6.84      if ( hvm_irq_dpci == NULL )
    6.85      {
    6.86 -        spin_unlock(&d->evtchn_lock);
    6.87 +        spin_unlock(&d->event_lock);
    6.88          return -EINVAL;
    6.89      }
    6.90  
    6.91 @@ -213,7 +213,7 @@ int pt_irq_destroy_bind_vtd(
    6.92              clear_bit(machine_gsi, hvm_irq_dpci->mapping);
    6.93          }
    6.94      }
    6.95 -    spin_unlock(&d->evtchn_lock);
    6.96 +    spin_unlock(&d->event_lock);
    6.97      gdprintk(XENLOG_INFO,
    6.98               "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
    6.99               machine_gsi, device, intx);
   6.100 @@ -254,7 +254,7 @@ void hvm_dpci_msi_eoi(struct domain *d, 
   6.101      if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
   6.102         return;
   6.103  
   6.104 -    spin_lock(&d->evtchn_lock);
   6.105 +    spin_lock(&d->event_lock);
   6.106      pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
   6.107  
   6.108      if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
   6.109 @@ -265,7 +265,7 @@ void hvm_dpci_msi_eoi(struct domain *d, 
   6.110           desc = domain_spin_lock_irq_desc(d, pirq, NULL);
   6.111           if (!desc)
   6.112           {
   6.113 -            spin_unlock(&d->evtchn_lock);
   6.114 +            spin_unlock(&d->event_lock);
   6.115              return;
   6.116           }
   6.117  
   6.118 @@ -275,7 +275,7 @@ void hvm_dpci_msi_eoi(struct domain *d, 
   6.119           pirq_guest_eoi(d, pirq);
   6.120       }
   6.121  
   6.122 -    spin_unlock(&d->evtchn_lock);
   6.123 +    spin_unlock(&d->event_lock);
   6.124  }
   6.125  
   6.126  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
   6.127 @@ -293,14 +293,14 @@ void hvm_dpci_eoi(struct domain *d, unsi
   6.128          return;
   6.129      }
   6.130  
   6.131 -    spin_lock(&d->evtchn_lock);
   6.132 +    spin_lock(&d->event_lock);
   6.133      hvm_irq_dpci = domain_get_irq_dpci(d);
   6.134  
   6.135      if((hvm_irq_dpci == NULL) ||
   6.136           (guest_gsi >= NR_ISAIRQS &&
   6.137            !hvm_irq_dpci->girq[guest_gsi].valid) )
   6.138      {
   6.139 -        spin_unlock(&d->evtchn_lock);
   6.140 +        spin_unlock(&d->event_lock);
   6.141          return;
   6.142      }
   6.143  
   6.144 @@ -322,5 +322,5 @@ void hvm_dpci_eoi(struct domain *d, unsi
   6.145              pirq_guest_eoi(d, machine_gsi);
   6.146          }
   6.147      }
   6.148 -    spin_unlock(&d->evtchn_lock);
   6.149 +    spin_unlock(&d->event_lock);
   6.150  }
     7.1 --- a/xen/drivers/passthrough/pci.c	Thu Oct 09 11:14:52 2008 +0100
     7.2 +++ b/xen/drivers/passthrough/pci.c	Thu Oct 09 11:17:51 2008 +0100
     7.3 @@ -165,7 +165,7 @@ static void pci_clean_dpci_irqs(struct d
     7.4      if ( !is_hvm_domain(d) && !need_iommu(d) )
     7.5          return;
     7.6  
     7.7 -    spin_lock(&d->evtchn_lock);
     7.8 +    spin_lock(&d->event_lock);
     7.9      hvm_irq_dpci = domain_get_irq_dpci(d);
    7.10      if ( hvm_irq_dpci != NULL )
    7.11      {
    7.12 @@ -189,7 +189,7 @@ static void pci_clean_dpci_irqs(struct d
    7.13          d->arch.hvm_domain.irq.dpci = NULL;
    7.14          xfree(hvm_irq_dpci);
    7.15      }
    7.16 -    spin_unlock(&d->evtchn_lock);
    7.17 +    spin_unlock(&d->event_lock);
    7.18  }
    7.19  
    7.20  void pci_release_devices(struct domain *d)
     8.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Thu Oct 09 11:14:52 2008 +0100
     8.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Thu Oct 09 11:17:51 2008 +0100
     8.3 @@ -93,13 +93,13 @@ void hvm_dpci_isairq_eoi(struct domain *
     8.4      if ( !vtd_enabled)
     8.5          return;
     8.6  
     8.7 -    spin_lock(&d->evtchn_lock);
     8.8 +    spin_lock(&d->event_lock);
     8.9  
    8.10      dpci = domain_get_irq_dpci(d);
    8.11  
    8.12      if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
    8.13      {
    8.14 -        spin_unlock(&d->evtchn_lock);
    8.15 +        spin_unlock(&d->event_lock);
    8.16          return;
    8.17      }
    8.18      /* Multiple mirq may be mapped to one isa irq */
    8.19 @@ -121,5 +121,5 @@ void hvm_dpci_isairq_eoi(struct domain *
    8.20              }
    8.21          }
    8.22      }
    8.23 -    spin_unlock(&d->evtchn_lock);
    8.24 +    spin_unlock(&d->event_lock);
    8.25  }
     9.1 --- a/xen/include/asm-x86/domain.h	Thu Oct 09 11:14:52 2008 +0100
     9.2 +++ b/xen/include/asm-x86/domain.h	Thu Oct 09 11:17:51 2008 +0100
     9.3 @@ -235,7 +235,7 @@ struct arch_domain
     9.4      /* Shadow translated domain: P2M mapping */
     9.5      pagetable_t phys_table;
     9.6  
     9.7 -    /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
     9.8 +    /* NB. protected by d->event_lock and by irq_desc[vector].lock */
     9.9      int vector_pirq[NR_VECTORS];
    9.10      int pirq_vector[NR_PIRQS];
    9.11  
    10.1 --- a/xen/include/asm-x86/hvm/irq.h	Thu Oct 09 11:14:52 2008 +0100
    10.2 +++ b/xen/include/asm-x86/hvm/irq.h	Thu Oct 09 11:17:51 2008 +0100
    10.3 @@ -63,7 +63,7 @@ struct hvm_girq_dpci_mapping {
    10.4  
    10.5  #define NR_ISAIRQS  16
    10.6  #define NR_LINK     4
    10.7 -/* Protected by domain's evtchn_lock */
    10.8 +/* Protected by domain's event_lock */
    10.9  struct hvm_irq_dpci {
   10.10      /* Machine IRQ to guest device/intx mapping. */
   10.11      DECLARE_BITMAP(mapping, NR_PIRQS);
    11.1 --- a/xen/include/xen/sched.h	Thu Oct 09 11:14:52 2008 +0100
    11.2 +++ b/xen/include/xen/sched.h	Thu Oct 09 11:17:51 2008 +0100
    11.3 @@ -188,7 +188,7 @@ struct domain
    11.4  
    11.5      /* Event channel information. */
    11.6      struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
    11.7 -    spinlock_t       evtchn_lock;
    11.8 +    spinlock_t       event_lock;
    11.9  
   11.10      struct grant_table *grant_table;
   11.11  
    12.1 --- a/xen/xsm/acm/acm_simple_type_enforcement_hooks.c	Thu Oct 09 11:14:52 2008 +0100
    12.2 +++ b/xen/xsm/acm/acm_simple_type_enforcement_hooks.c	Thu Oct 09 11:17:51 2008 +0100
    12.3 @@ -248,11 +248,11 @@ ste_init_state(struct acm_sized_buffer *
    12.4          /* a) check for event channel conflicts */
    12.5          for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
    12.6          {
    12.7 -            spin_lock(&d->evtchn_lock);
    12.8 +            spin_lock(&d->event_lock);
    12.9              ports = d->evtchn[bucket];
   12.10              if ( ports == NULL)
   12.11              {
   12.12 -                spin_unlock(&d->evtchn_lock);
   12.13 +                spin_unlock(&d->event_lock);
   12.14                  break;
   12.15              }
   12.16  
   12.17 @@ -280,7 +280,7 @@ ste_init_state(struct acm_sized_buffer *
   12.18                      printkd("%s: Policy violation in event channel domain "
   12.19                              "%x -> domain %x.\n",
   12.20                              __func__, d->domain_id, rdomid);
   12.21 -                    spin_unlock(&d->evtchn_lock);
   12.22 +                    spin_unlock(&d->event_lock);
   12.23  
   12.24                      acm_array_append_tuple(errors,
   12.25                                             ACM_EVTCHN_SHARING_VIOLATION,
   12.26 @@ -288,7 +288,7 @@ ste_init_state(struct acm_sized_buffer *
   12.27                      goto out;
   12.28                  }
   12.29              }
   12.30 -            spin_unlock(&d->evtchn_lock);
   12.31 +            spin_unlock(&d->event_lock);
   12.32          } 
   12.33  
   12.34