ia64/xen-unstable

changeset 19308:6357628c678f

passthrough: allow pass-through devices to share virtual GSI

Allow multiple pass-through devices to use the same guest_gsi.

The motivation for this is:

* Allow multi-function devices to be passed through as multi-function
devices
* Allow more than two pass-through devices.
- This will place more contention on the GSI-space, and allocation
becomes a lot simpler if GSI sharing is allowed.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 11 10:05:00 2009 +0000 (2009-03-11)
parents b5d074255c38
children 1282561a2bf2
files xen/drivers/passthrough/io.c xen/include/xen/hvm/irq.h
line diff
     1.1 --- a/xen/drivers/passthrough/io.c	Wed Mar 11 10:03:54 2009 +0000
     1.2 +++ b/xen/drivers/passthrough/io.c	Wed Mar 11 10:05:00 2009 +0000
     1.3 @@ -36,6 +36,7 @@ static void pt_irq_time_out(void *data)
     1.4      int vector;
     1.5      struct hvm_irq_dpci *dpci = NULL;
     1.6      struct dev_intx_gsi_link *digl;
     1.7 +    struct hvm_girq_dpci_mapping *girq;
     1.8      uint32_t device, intx;
     1.9      DECLARE_BITMAP(machine_gsi_map, NR_IRQS);
    1.10  
    1.11 @@ -48,8 +49,11 @@ static void pt_irq_time_out(void *data)
    1.12      list_for_each_entry ( digl, &irq_map->digl_list, list )
    1.13      {
    1.14          guest_gsi = digl->gsi;
    1.15 -        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
    1.16 -        set_bit(machine_gsi, machine_gsi_map);
    1.17 +        list_for_each_entry ( girq, &dpci->girq[guest_gsi], list )
    1.18 +        {
    1.19 +            machine_gsi = girq->machine_gsi;
    1.20 +            set_bit(machine_gsi, machine_gsi_map);
    1.21 +        }
    1.22          device = digl->device;
    1.23          intx = digl->intx;
    1.24          hvm_pci_intx_deassert(irq_map->dom, device, intx);
    1.25 @@ -83,6 +87,7 @@ int pt_irq_create_bind_vtd(
    1.26      uint32_t machine_gsi, guest_gsi;
    1.27      uint32_t device, intx, link;
    1.28      struct dev_intx_gsi_link *digl;
    1.29 +    struct hvm_girq_dpci_mapping *girq;
    1.30      int rc, pirq = pt_irq_bind->machine_irq;
    1.31  
    1.32      if ( pirq < 0 || pirq >= NR_IRQS )
    1.33 @@ -101,7 +106,10 @@ int pt_irq_create_bind_vtd(
    1.34          }
    1.35          memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
    1.36          for ( int i = 0; i < NR_IRQS; i++ )
    1.37 +        {
    1.38              INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
    1.39 +            INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);
    1.40 +        }
    1.41  
    1.42          if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
    1.43          {
    1.44 @@ -164,6 +172,14 @@ int pt_irq_create_bind_vtd(
    1.45              return -ENOMEM;
    1.46          }
    1.47  
    1.48 +        girq = xmalloc(struct hvm_girq_dpci_mapping);
    1.49 +        if ( !girq )
    1.50 +        {
    1.51 +            xfree(digl);
    1.52 +            spin_unlock(&d->event_lock);
    1.53 +            return -ENOMEM;
    1.54 +        }
    1.55 +
    1.56          digl->device = device;
    1.57          digl->intx = intx;
    1.58          digl->gsi = guest_gsi;
    1.59 @@ -171,10 +187,10 @@ int pt_irq_create_bind_vtd(
    1.60          list_add_tail(&digl->list,
    1.61                        &hvm_irq_dpci->mirq[machine_gsi].digl_list);
    1.62  
    1.63 -        hvm_irq_dpci->girq[guest_gsi].valid = 1;
    1.64 -        hvm_irq_dpci->girq[guest_gsi].device = device;
    1.65 -        hvm_irq_dpci->girq[guest_gsi].intx = intx;
    1.66 -        hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
    1.67 +        girq->device = device;
    1.68 +        girq->intx = intx;
    1.69 +        girq->machine_gsi = machine_gsi;
    1.70 +        list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
    1.71  
    1.72          /* Bind the same mirq once in the same domain */
    1.73          if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
    1.74 @@ -209,10 +225,8 @@ int pt_irq_create_bind_vtd(
    1.75                      kill_timer(&hvm_irq_dpci->hvm_timer[vector]);
    1.76                  hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
    1.77                  clear_bit(machine_gsi, hvm_irq_dpci->mapping);
    1.78 -                hvm_irq_dpci->girq[guest_gsi].machine_gsi = 0;
    1.79 -                hvm_irq_dpci->girq[guest_gsi].intx = 0;
    1.80 -                hvm_irq_dpci->girq[guest_gsi].device = 0;
    1.81 -                hvm_irq_dpci->girq[guest_gsi].valid = 0;
    1.82 +                list_del(&girq->list);
    1.83 +                xfree(girq);
    1.84                  list_del(&digl->list);
    1.85                  hvm_irq_dpci->link_cnt[link]--;
    1.86                  spin_unlock(&d->event_lock);
    1.87 @@ -237,6 +251,7 @@ int pt_irq_destroy_bind_vtd(
    1.88      uint32_t device, intx, link;
    1.89      struct list_head *digl_list, *tmp;
    1.90      struct dev_intx_gsi_link *digl;
    1.91 +    struct hvm_girq_dpci_mapping *girq;
    1.92  
    1.93      machine_gsi = pt_irq_bind->machine_irq;
    1.94      device = pt_irq_bind->u.pci.device;
    1.95 @@ -259,8 +274,16 @@ int pt_irq_destroy_bind_vtd(
    1.96      }
    1.97  
    1.98      hvm_irq_dpci->link_cnt[link]--;
    1.99 -    memset(&hvm_irq_dpci->girq[guest_gsi], 0,
   1.100 -           sizeof(struct hvm_girq_dpci_mapping));
   1.101 +
   1.102 +    list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
   1.103 +    {
   1.104 +        if ( girq->machine_gsi == machine_gsi )
   1.105 +        {
   1.106 +                list_del(&girq->list);
   1.107 +                xfree(girq);
   1.108 +                break;
   1.109 +        }
   1.110 +    }
   1.111  
   1.112      /* clear the mirq info */
   1.113      if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
   1.114 @@ -429,13 +452,39 @@ void hvm_dirq_assist(struct vcpu *v)
   1.115      }
   1.116  }
   1.117  
   1.118 +static void __hvm_dpci_eoi(struct domain *d,
   1.119 +                           struct hvm_irq_dpci *hvm_irq_dpci,
   1.120 +                           struct hvm_girq_dpci_mapping *girq,
   1.121 +                           union vioapic_redir_entry *ent)
   1.122 +{
   1.123 +    uint32_t device, intx, machine_gsi;
   1.124 +
   1.125 +    device = girq->device;
   1.126 +    intx = girq->intx;
   1.127 +    hvm_pci_intx_deassert(d, device, intx);
   1.128 +
   1.129 +    machine_gsi = girq->machine_gsi;
   1.130 +
   1.131 +    /*
   1.132 +     * No need to get vector lock for timer
   1.133 +     * since interrupt is still not EOIed
   1.134 +     */
   1.135 +    if ( --hvm_irq_dpci->mirq[machine_gsi].pending ||
   1.136 +         ( ent && ent->fields.mask ) ||
   1.137 +         ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
   1.138 +        return;
   1.139 +
   1.140 +    stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
   1.141 +    pirq_guest_eoi(d, machine_gsi);
   1.142 +}
   1.143 +
   1.144  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
   1.145                    union vioapic_redir_entry *ent)
   1.146  {
   1.147 -    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
   1.148 -    uint32_t device, intx, machine_gsi;
   1.149 +    struct hvm_irq_dpci *hvm_irq_dpci;
   1.150 +    struct hvm_girq_dpci_mapping *girq;
   1.151  
   1.152 -    if ( !iommu_enabled)
   1.153 +    if ( !iommu_enabled )
   1.154          return;
   1.155  
   1.156      if ( guest_gsi < NR_ISAIRQS )
   1.157 @@ -447,34 +496,12 @@ void hvm_dpci_eoi(struct domain *d, unsi
   1.158      spin_lock(&d->event_lock);
   1.159      hvm_irq_dpci = domain_get_irq_dpci(d);
   1.160  
   1.161 -    if((hvm_irq_dpci == NULL) ||
   1.162 -         (guest_gsi >= NR_ISAIRQS &&
   1.163 -          !hvm_irq_dpci->girq[guest_gsi].valid) )
   1.164 -    {
   1.165 -        spin_unlock(&d->event_lock);
   1.166 -        return;
   1.167 -    }
   1.168 -
   1.169 -    device = hvm_irq_dpci->girq[guest_gsi].device;
   1.170 -    intx = hvm_irq_dpci->girq[guest_gsi].intx;
   1.171 -    hvm_pci_intx_deassert(d, device, intx);
   1.172 +    if ( !hvm_irq_dpci )
   1.173 +        goto unlock;
   1.174  
   1.175 -    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
   1.176 -    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
   1.177 -    {
   1.178 -        if ( (ent == NULL) || !ent->fields.mask )
   1.179 -        {
   1.180 -            /*
   1.181 -             * No need to get vector lock for timer
   1.182 -             * since interrupt is still not EOIed
   1.183 -             */
   1.184 -            if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
   1.185 -            {
   1.186 -                stop_timer(&hvm_irq_dpci->hvm_timer[
   1.187 -                    domain_irq_to_vector(d, machine_gsi)]);
   1.188 -                pirq_guest_eoi(d, machine_gsi);
   1.189 -            }
   1.190 -        }
   1.191 -    }
   1.192 +    list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
   1.193 +        __hvm_dpci_eoi(d, hvm_irq_dpci, girq, ent);
   1.194 +
   1.195 +unlock:
   1.196      spin_unlock(&d->event_lock);
   1.197  }
     2.1 --- a/xen/include/xen/hvm/irq.h	Wed Mar 11 10:03:54 2009 +0000
     2.2 +++ b/xen/include/xen/hvm/irq.h	Wed Mar 11 10:05:00 2009 +0000
     2.3 @@ -60,7 +60,7 @@ struct hvm_mirq_dpci_mapping {
     2.4  };
     2.5  
     2.6  struct hvm_girq_dpci_mapping {
     2.7 -    uint8_t valid;
     2.8 +    struct list_head list;
     2.9      uint8_t device;
    2.10      uint8_t intx;
    2.11      uint8_t machine_gsi;
    2.12 @@ -75,7 +75,7 @@ struct hvm_irq_dpci {
    2.13      DECLARE_BITMAP(mapping, NR_IRQS);
    2.14      struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
    2.15      /* Guest IRQ to guest device/intx mapping. */
    2.16 -    struct hvm_girq_dpci_mapping girq[NR_IRQS];
    2.17 +    struct list_head girq[NR_IRQS];
    2.18      uint8_t msi_gvec_pirq[NR_VECTORS];
    2.19      DECLARE_BITMAP(dirq_mask, NR_IRQS);
    2.20      /* Record of mapped ISA IRQs */