ia64/xen-unstable

changeset 16048:c2871913c5c2

vtd: Dynamically allocate IRQ-tracking structures, only for those
domains that actually have PCI-passthru devices. Greatly reduces size
of 'struct domain'.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Tue Oct 02 16:59:07 2007 +0100 (2007-10-02)
parents e1b574bc36b5
children 385b9b6bb61f
files xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vtd/io.c xen/include/asm-x86/hvm/irq.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Tue Oct 02 16:28:58 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Tue Oct 02 16:59:07 2007 +0100
     1.3 @@ -107,19 +107,23 @@ static void enable_intr_window(struct vc
     1.4      }
     1.5  }
     1.6  
     1.7 -static void vmx_dirq_assist(struct domain *d)
     1.8 +static void vmx_dirq_assist(struct vcpu *v)
     1.9  {
    1.10      unsigned int irq;
    1.11      uint32_t device, intx;
    1.12 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    1.13 +    struct domain *d = v->domain;
    1.14 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    1.15  
    1.16 -    for ( irq = find_first_bit(hvm_irq->dirq_mask, NR_IRQS);
    1.17 +    if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
    1.18 +        return;
    1.19 +
    1.20 +    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
    1.21            irq < NR_IRQS;
    1.22 -          irq = find_next_bit(hvm_irq->dirq_mask, NR_IRQS, irq + 1) )
    1.23 +          irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
    1.24      {
    1.25 -        test_and_clear_bit(irq, &hvm_irq->dirq_mask);
    1.26 -        device = hvm_irq->mirq[irq].device;
    1.27 -        intx = hvm_irq->mirq[irq].intx;
    1.28 +        test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask);
    1.29 +        device = hvm_irq_dpci->mirq[irq].device;
    1.30 +        intx = hvm_irq_dpci->mirq[irq].intx;
    1.31          hvm_pci_intx_assert(d, device, intx);
    1.32      }
    1.33  }
    1.34 @@ -134,8 +138,7 @@ asmlinkage void vmx_intr_assist(void)
    1.35      /* Crank the handle on interrupt state. */
    1.36      pt_update_irq(v);
    1.37  
    1.38 -    if ( vtd_enabled && (v->vcpu_id == 0) )
    1.39 -        vmx_dirq_assist(v->domain);
    1.40 +    vmx_dirq_assist(v);
    1.41    
    1.42      hvm_set_callback_irq_level();
    1.43  
     2.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c	Tue Oct 02 16:28:58 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c	Tue Oct 02 16:59:07 2007 +0100
     2.3 @@ -46,27 +46,41 @@
     2.4  #include <public/domctl.h>
     2.5  
     2.6  int pt_irq_create_bind_vtd(
     2.7 -    struct domain *d,
     2.8 -    xen_domctl_bind_pt_irq_t * pt_irq_bind)
     2.9 +    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
    2.10  {
    2.11 -    struct hvm_domain *hd = &d->arch.hvm_domain;
    2.12 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    2.13      uint32_t machine_gsi, guest_gsi;
    2.14      uint32_t device, intx;
    2.15  
    2.16 +    if ( hvm_irq_dpci == NULL )
    2.17 +    {
    2.18 +        hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
    2.19 +        if ( hvm_irq_dpci == NULL )
    2.20 +            return -ENOMEM;
    2.21 +
    2.22 +        memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
    2.23 +
    2.24 +        if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
    2.25 +                     0, (unsigned long)hvm_irq_dpci) != 0 )
    2.26 +            xfree(hvm_irq_dpci);
    2.27 +
    2.28 +        hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    2.29 +    }
    2.30 +
    2.31      machine_gsi = pt_irq_bind->machine_irq;
    2.32      device = pt_irq_bind->u.pci.device;
    2.33      intx = pt_irq_bind->u.pci.intx;
    2.34      guest_gsi = hvm_pci_intx_gsi(device, intx);
    2.35  
    2.36 -    hd->irq.mirq[machine_gsi].valid = 1;
    2.37 -    hd->irq.mirq[machine_gsi].device = device;
    2.38 -    hd->irq.mirq[machine_gsi].intx = intx;
    2.39 -    hd->irq.mirq[machine_gsi].guest_gsi = guest_gsi;
    2.40 +    hvm_irq_dpci->mirq[machine_gsi].valid = 1;
    2.41 +    hvm_irq_dpci->mirq[machine_gsi].device = device;
    2.42 +    hvm_irq_dpci->mirq[machine_gsi].intx = intx;
    2.43 +    hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi;
    2.44  
    2.45 -    hd->irq.girq[guest_gsi].valid = 1;
    2.46 -    hd->irq.girq[guest_gsi].device = device;
    2.47 -    hd->irq.girq[guest_gsi].intx = intx;
    2.48 -    hd->irq.girq[guest_gsi].machine_gsi = machine_gsi;
    2.49 +    hvm_irq_dpci->girq[guest_gsi].valid = 1;
    2.50 +    hvm_irq_dpci->girq[guest_gsi].device = device;
    2.51 +    hvm_irq_dpci->girq[guest_gsi].intx = intx;
    2.52 +    hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
    2.53  
    2.54      /* Deal with gsi for legacy devices */
    2.55      pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
    2.56 @@ -76,31 +90,31 @@ int pt_irq_create_bind_vtd(
    2.57  
    2.58      return 0;
    2.59  }
    2.60 +
    2.61  int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
    2.62  {
    2.63      uint32_t device, intx;
    2.64      uint32_t link, isa_irq;
    2.65 -    struct hvm_irq *hvm_irq;
    2.66 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    2.67  
    2.68 -    if ( !vtd_enabled || (d == dom0) ||
    2.69 -         !d->arch.hvm_domain.irq.mirq[mirq].valid )
    2.70 +    if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
    2.71 +         !hvm_irq->dpci->mirq[mirq].valid )
    2.72          return 0;
    2.73  
    2.74 -    device = d->arch.hvm_domain.irq.mirq[mirq].device;
    2.75 -    intx = d->arch.hvm_domain.irq.mirq[mirq].intx;
    2.76 +    device = hvm_irq->dpci->mirq[mirq].device;
    2.77 +    intx = hvm_irq->dpci->mirq[mirq].intx;
    2.78      link = hvm_pci_intx_link(device, intx);
    2.79 -    hvm_irq = &d->arch.hvm_domain.irq;
    2.80      isa_irq = hvm_irq->pci_link.route[link];
    2.81  
    2.82 -    if ( !d->arch.hvm_domain.irq.girq[isa_irq].valid )
    2.83 +    if ( !hvm_irq->dpci->girq[isa_irq].valid )
    2.84      {
    2.85 -        d->arch.hvm_domain.irq.girq[isa_irq].valid = 1;
    2.86 -        d->arch.hvm_domain.irq.girq[isa_irq].device = device;
    2.87 -        d->arch.hvm_domain.irq.girq[isa_irq].intx = intx;
    2.88 -        d->arch.hvm_domain.irq.girq[isa_irq].machine_gsi = mirq;
    2.89 +        hvm_irq->dpci->girq[isa_irq].valid = 1;
    2.90 +        hvm_irq->dpci->girq[isa_irq].device = device;
    2.91 +        hvm_irq->dpci->girq[isa_irq].intx = intx;
    2.92 +        hvm_irq->dpci->girq[isa_irq].machine_gsi = mirq;
    2.93      }
    2.94  
    2.95 -    if ( !test_and_set_bit(mirq, d->arch.hvm_domain.irq.dirq_mask) )
    2.96 +    if ( !test_and_set_bit(mirq, hvm_irq->dpci->dirq_mask) )
    2.97      {
    2.98          vcpu_kick(d->vcpu[0]);
    2.99          return 1;
   2.100 @@ -113,17 +127,19 @@ int hvm_do_IRQ_dpci(struct domain *d, un
   2.101  void hvm_dpci_eoi(unsigned int guest_gsi, union vioapic_redir_entry *ent)
   2.102  {
   2.103      struct domain *d = current->domain;
   2.104 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   2.105      uint32_t device, intx, machine_gsi;
   2.106      irq_desc_t *desc;
   2.107  
   2.108      ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
   2.109  
   2.110 -    if ( !vtd_enabled || !d->arch.hvm_domain.irq.girq[guest_gsi].valid )
   2.111 +    if ( !vtd_enabled || (hvm_irq_dpci == NULL) ||
   2.112 +         !hvm_irq_dpci->girq[guest_gsi].valid )
   2.113          return;
   2.114  
   2.115 -    device = d->arch.hvm_domain.irq.girq[guest_gsi].device;
   2.116 -    intx = d->arch.hvm_domain.irq.girq[guest_gsi].intx;
   2.117 -    machine_gsi = d->arch.hvm_domain.irq.girq[guest_gsi].machine_gsi;
   2.118 +    device = hvm_irq_dpci->girq[guest_gsi].device;
   2.119 +    intx = hvm_irq_dpci->girq[guest_gsi].intx;
   2.120 +    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
   2.121      gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
   2.122               device, intx);
   2.123      __hvm_pci_intx_deassert(d, device, intx);
   2.124 @@ -136,15 +152,20 @@ void hvm_dpci_eoi(unsigned int guest_gsi
   2.125  
   2.126  void iommu_domain_destroy(struct domain *d)
   2.127  {
   2.128 -    struct hvm_domain *hd = &d->arch.hvm_domain;
   2.129 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   2.130      uint32_t i;
   2.131  
   2.132      if ( !vtd_enabled )
   2.133          return;
   2.134  
   2.135 -    for ( i = 0; i < NR_IRQS; i++ )
   2.136 -        if ( hd->irq.mirq[i].valid )
   2.137 -            pirq_guest_unbind(d, i);
   2.138 +    if ( hvm_irq_dpci != NULL )
   2.139 +    {
   2.140 +        for ( i = 0; i < NR_IRQS; i++ )
   2.141 +            if ( hvm_irq_dpci->mirq[i].valid )
   2.142 +                pirq_guest_unbind(d, i);
   2.143 +        d->arch.hvm_domain.irq.dpci = NULL;
   2.144 +        xfree(hvm_irq_dpci);
   2.145 +    }
   2.146  
   2.147      iommu_domain_teardown(d);
   2.148  }
     3.1 --- a/xen/include/asm-x86/hvm/irq.h	Tue Oct 02 16:28:58 2007 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/irq.h	Tue Oct 02 16:59:07 2007 +0100
     3.3 @@ -29,7 +29,7 @@
     3.4  #include <asm/hvm/vioapic.h>
     3.5  #include <public/hvm/save.h>
     3.6  
     3.7 -struct hvm_irq_mapping {
     3.8 +struct hvm_irq_dpci_mapping {
     3.9      uint8_t valid;
    3.10      uint8_t device;
    3.11      uint8_t intx;
    3.12 @@ -39,6 +39,14 @@ struct hvm_irq_mapping {
    3.13      };
    3.14  };
    3.15  
    3.16 +struct hvm_irq_dpci {
    3.17 +    /* Machine IRQ to guest device/intx mapping. */
    3.18 +    struct hvm_irq_dpci_mapping mirq[NR_IRQS];
    3.19 +    /* Guest IRQ to guest device/intx mapping. */
    3.20 +    struct hvm_irq_dpci_mapping girq[NR_IRQS];
    3.21 +    DECLARE_BITMAP(dirq_mask, NR_IRQS);
    3.22 +};
    3.23 +
    3.24  struct hvm_irq {
    3.25      /*
    3.26       * Virtual interrupt wires for a single PCI bus.
    3.27 @@ -99,11 +107,7 @@ struct hvm_irq {
    3.28      /* Last VCPU that was delivered a LowestPrio interrupt. */
    3.29      u8 round_robin_prev_vcpu;
    3.30  
    3.31 -    /* machine irq to guest device/intx mapping */
    3.32 -    struct hvm_irq_mapping mirq[NR_IRQS];
    3.33 -    /* guest irq to guest device/intx mapping */
    3.34 -    struct hvm_irq_mapping girq[NR_IRQS];
    3.35 -    DECLARE_BITMAP(dirq_mask, NR_IRQS);
    3.36 +    struct hvm_irq_dpci *dpci;
    3.37  };
    3.38  
    3.39  #define hvm_pci_intx_gsi(dev, intx)  \