ia64/xen-unstable

changeset 17411:a8ce3e934abd

Share VT-d code between x86 and IA64

Declare arch-dependent functions in vtd.h, and implement them for x86.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 09 13:35:44 2008 +0100 (2008-04-09)
parents cb1f41538756
children 13cc6b2b8b61
files xen/drivers/passthrough/io.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/Makefile xen/drivers/passthrough/vtd/vtd.h xen/drivers/passthrough/vtd/x86/Makefile xen/drivers/passthrough/vtd/x86/vtd.c
line diff
     1.1 --- a/xen/drivers/passthrough/io.c	Wed Apr 09 13:32:21 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/io.c	Wed Apr 09 13:35:44 2008 +0100
     1.3 @@ -25,7 +25,7 @@ static void pt_irq_time_out(void *data)
     1.4  {
     1.5      struct hvm_mirq_dpci_mapping *irq_map = data;
     1.6      unsigned int guest_gsi, machine_gsi = 0;
     1.7 -    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
     1.8 +    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
     1.9      struct dev_intx_gsi_link *digl;
    1.10      uint32_t device, intx;
    1.11  
    1.12 @@ -49,7 +49,7 @@ static void pt_irq_time_out(void *data)
    1.13  int pt_irq_create_bind_vtd(
    1.14      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
    1.15  {
    1.16 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    1.17 +    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
    1.18      uint32_t machine_gsi, guest_gsi;
    1.19      uint32_t device, intx, link;
    1.20      struct dev_intx_gsi_link *digl;
    1.21 @@ -65,11 +65,8 @@ int pt_irq_create_bind_vtd(
    1.22          for ( int i = 0; i < NR_IRQS; i++ )
    1.23              INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
    1.24  
    1.25 -        if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
    1.26 -                     0, (unsigned long)hvm_irq_dpci) != 0 )
    1.27 +        if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
    1.28              xfree(hvm_irq_dpci);
    1.29 -
    1.30 -        hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    1.31      }
    1.32  
    1.33      machine_gsi = pt_irq_bind->machine_irq;
    1.34 @@ -116,7 +113,7 @@ int pt_irq_create_bind_vtd(
    1.35  int pt_irq_destroy_bind_vtd(
    1.36      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
    1.37  {
    1.38 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    1.39 +    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
    1.40      uint32_t machine_gsi, guest_gsi;
    1.41      uint32_t device, intx, link;
    1.42      struct list_head *digl_list, *tmp;
    1.43 @@ -133,14 +130,15 @@ int pt_irq_destroy_bind_vtd(
    1.44      hvm_irq_dpci->link_cnt[link]--;
    1.45  
    1.46      gdprintk(XENLOG_INFO,
    1.47 -            "pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n",
    1.48 -            machine_gsi, guest_gsi, device, intx);
    1.49 -    memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping));
    1.50 +             "pt_irq_destroy_bind_vtd: machine_gsi=%d "
    1.51 +             "guest_gsi=%d, device=%d, intx=%d.\n",
    1.52 +             machine_gsi, guest_gsi, device, intx);
    1.53 +    memset(&hvm_irq_dpci->girq[guest_gsi], 0,
    1.54 +           sizeof(struct hvm_girq_dpci_mapping));
    1.55  
    1.56      /* clear the mirq info */
    1.57      if ( hvm_irq_dpci->mirq[machine_gsi].valid )
    1.58      {
    1.59 -
    1.60          list_for_each_safe ( digl_list, tmp,
    1.61                  &hvm_irq_dpci->mirq[machine_gsi].digl_list )
    1.62          {
    1.63 @@ -174,10 +172,10 @@ int pt_irq_destroy_bind_vtd(
    1.64  
    1.65  int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
    1.66  {
    1.67 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    1.68 +    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
    1.69  
    1.70 -    if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
    1.71 -         !hvm_irq->dpci->mirq[mirq].valid )
    1.72 +    if ( !iommu_enabled || (d == dom0) || !dpci ||
    1.73 +         !dpci->mirq[mirq].valid )
    1.74          return 0;
    1.75  
    1.76      /*
    1.77 @@ -186,58 +184,18 @@ int hvm_do_IRQ_dpci(struct domain *d, un
    1.78       * this case the guest may not pick up the interrupt (e.g., masked at the
    1.79       * PIC) and we need to detect that.
    1.80       */
    1.81 -    set_bit(mirq, hvm_irq->dpci->dirq_mask);
    1.82 -    set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)],
    1.83 +    set_bit(mirq, dpci->dirq_mask);
    1.84 +    set_timer(&dpci->hvm_timer[irq_to_vector(mirq)],
    1.85                NOW() + PT_IRQ_TIME_OUT);
    1.86      vcpu_kick(d->vcpu[0]);
    1.87  
    1.88      return 1;
    1.89  }
    1.90  
    1.91 -static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
    1.92 -{
    1.93 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    1.94 -    struct hvm_irq_dpci *dpci = hvm_irq->dpci;
    1.95 -    struct dev_intx_gsi_link *digl, *tmp;
    1.96 -    int i;
    1.97 -
    1.98 -    ASSERT(isairq < NR_ISAIRQS);
    1.99 -    if ( !iommu_enabled || !dpci ||
   1.100 -         !test_bit(isairq, dpci->isairq_map) )
   1.101 -        return;
   1.102 -
   1.103 -    /* Multiple mirq may be mapped to one isa irq */
   1.104 -    for ( i = 0; i < NR_IRQS; i++ )
   1.105 -    {
   1.106 -        if ( !dpci->mirq[i].valid )
   1.107 -            continue;
   1.108 -
   1.109 -        list_for_each_entry_safe ( digl, tmp,
   1.110 -            &dpci->mirq[i].digl_list, list )
   1.111 -        {
   1.112 -            if ( hvm_irq->pci_link.route[digl->link] == isairq )
   1.113 -            {
   1.114 -                hvm_pci_intx_deassert(d, digl->device, digl->intx);
   1.115 -                spin_lock(&dpci->dirq_lock);
   1.116 -                if ( --dpci->mirq[i].pending == 0 )
   1.117 -                {
   1.118 -                    spin_unlock(&dpci->dirq_lock);
   1.119 -                    gdprintk(XENLOG_INFO VTDPREFIX,
   1.120 -                             "hvm_dpci_isairq_eoi:: mirq = %x\n", i);
   1.121 -                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
   1.122 -                    pirq_guest_eoi(d, i);
   1.123 -                }
   1.124 -                else
   1.125 -                    spin_unlock(&dpci->dirq_lock);
   1.126 -            }
   1.127 -        }
   1.128 -    }
   1.129 -}
   1.130 -
   1.131  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
   1.132                    union vioapic_redir_entry *ent)
   1.133  {
   1.134 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   1.135 +    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
   1.136      uint32_t device, intx, machine_gsi;
   1.137  
   1.138      if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
     2.1 --- a/xen/drivers/passthrough/iommu.c	Wed Apr 09 13:32:21 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/iommu.c	Wed Apr 09 13:35:44 2008 +0100
     2.3 @@ -58,7 +58,7 @@ int assign_device(struct domain *d, u8 b
     2.4  
     2.5  void iommu_domain_destroy(struct domain *d)
     2.6  {
     2.7 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     2.8 +    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
     2.9      uint32_t i;
    2.10      struct hvm_iommu *hd  = domain_hvm_iommu(d);
    2.11      struct list_head *ioport_list, *digl_list, *tmp;
     3.1 --- a/xen/drivers/passthrough/vtd/Makefile	Wed Apr 09 13:32:21 2008 +0100
     3.2 +++ b/xen/drivers/passthrough/vtd/Makefile	Wed Apr 09 13:35:44 2008 +0100
     3.3 @@ -1,3 +1,5 @@
     3.4 +subdir-$(x86) += x86
     3.5 +
     3.6  obj-y += iommu.o
     3.7  obj-y += dmar.o
     3.8  obj-y += utils.o
     4.1 --- a/xen/drivers/passthrough/vtd/vtd.h	Wed Apr 09 13:32:21 2008 +0100
     4.2 +++ b/xen/drivers/passthrough/vtd/vtd.h	Wed Apr 09 13:35:44 2008 +0100
     4.3 @@ -42,4 +42,13 @@ struct IO_APIC_route_remap_entry {
     4.4      };
     4.5  };
     4.6  
     4.7 +unsigned int get_clflush_size(void);
     4.8 +u64 alloc_pgtable_maddr(void);
     4.9 +void free_pgtable_maddr(u64 maddr);
    4.10 +void *map_vtd_domain_page(u64 maddr);
    4.11 +void unmap_vtd_domain_page(void *va);
    4.12 +
    4.13 +void iommu_flush_cache_entry(struct iommu *iommu, void *addr);
    4.14 +void iommu_flush_cache_page(struct iommu *iommu, void *addr);
    4.15 +
    4.16  #endif // _VTD_H_
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/drivers/passthrough/vtd/x86/Makefile	Wed Apr 09 13:35:44 2008 +0100
     5.3 @@ -0,0 +1,1 @@
     5.4 +obj-y += vtd.o
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Wed Apr 09 13:35:44 2008 +0100
     6.3 @@ -0,0 +1,282 @@
     6.4 +/*
     6.5 + * Copyright (c) 2008, Intel Corporation.
     6.6 + *
     6.7 + * This program is free software; you can redistribute it and/or modify it
     6.8 + * under the terms and conditions of the GNU General Public License,
     6.9 + * version 2, as published by the Free Software Foundation.
    6.10 + *
    6.11 + * This program is distributed in the hope it will be useful, but WITHOUT
    6.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    6.14 + * more details.
    6.15 + *
    6.16 + * You should have received a copy of the GNU General Public License along with
    6.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    6.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    6.19 + *
    6.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
    6.21 + * Copyright (C) Weidong Han <weidong.han@intel.com>
    6.22 + */
    6.23 +
    6.24 +#include <xen/sched.h>
    6.25 +#include <xen/domain_page.h>
    6.26 +#include <xen/iommu.h>
    6.27 +#include "../iommu.h"
    6.28 +#include "../dmar.h"
    6.29 +#include "../vtd.h"
    6.30 +
    6.31 +void *map_vtd_domain_page(u64 maddr)
    6.32 +{
    6.33 +    return map_domain_page(maddr >> PAGE_SHIFT_4K);
    6.34 +}
    6.35 +
    6.36 +void unmap_vtd_domain_page(void *va)
    6.37 +{
    6.38 +    unmap_domain_page(va);
    6.39 +}
    6.40 +
    6.41 +void iommu_set_pgd(struct domain *d)
    6.42 +{
    6.43 +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
    6.44 +    unsigned long p2m_table;
    6.45 +    int level = agaw_to_level(hd->agaw);
    6.46 +    l3_pgentry_t *l3e;
    6.47 +    unsigned long flags;
    6.48 +
    6.49 +    p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table));
    6.50 +
    6.51 +#if CONFIG_PAGING_LEVELS == 3
    6.52 +    {
    6.53 +        struct dma_pte *pte = NULL, *pgd_vaddr = NULL, *pmd_vaddr = NULL;
    6.54 +        int i;
    6.55 +        u64 pmd_maddr;
    6.56 +
    6.57 +        spin_lock_irqsave(&hd->mapping_lock, flags);
    6.58 +        hd->pgd_maddr = alloc_pgtable_maddr();
    6.59 +        if ( hd->pgd_maddr == 0 )
    6.60 +        {
    6.61 +            spin_unlock_irqrestore(&hd->mapping_lock, flags);
    6.62 +            gdprintk(XENLOG_ERR VTDPREFIX,
    6.63 +                     "Allocate pgd memory failed!\n");
    6.64 +            return;
    6.65 +        }
    6.66 +
    6.67 +        pgd_vaddr = map_vtd_domain_page(hd->pgd_maddr);
    6.68 +        l3e = map_domain_page(p2m_table);
    6.69 +        switch ( level )
    6.70 +        {
    6.71 +        case VTD_PAGE_TABLE_LEVEL_3:        /* Weybridge */
    6.72 +            /* We only support 8 entries for the PAE L3 p2m table */
    6.73 +            for ( i = 0; i < 8 ; i++ )
    6.74 +            {
    6.75 +                /* Don't create new L2 entry, use ones from p2m table */
    6.76 +                pgd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
    6.77 +            }
    6.78 +            break;
    6.79 +
    6.80 +        case VTD_PAGE_TABLE_LEVEL_4:        /* Stoakley */
    6.81 +            /* We allocate one more page for the top vtd page table. */
    6.82 +            pmd_maddr = alloc_pgtable_maddr();
    6.83 +            if ( pmd_maddr == 0 )
    6.84 +            {
    6.85 +                unmap_vtd_domain_page(pgd_vaddr);
    6.86 +                unmap_domain_page(l3e);
    6.87 +                spin_unlock_irqrestore(&hd->mapping_lock, flags);
    6.88 +                gdprintk(XENLOG_ERR VTDPREFIX,
    6.89 +                         "Allocate pmd memory failed!\n");
    6.90 +                return;
    6.91 +            }
    6.92 +
    6.93 +            pte = &pgd_vaddr[0];
    6.94 +            dma_set_pte_addr(*pte, pmd_maddr);
    6.95 +            dma_set_pte_readable(*pte);
    6.96 +            dma_set_pte_writable(*pte);
    6.97 +
    6.98 +            pmd_vaddr = map_vtd_domain_page(pmd_maddr);
    6.99 +            for ( i = 0; i < 8; i++ )
   6.100 +            {
   6.101 +                /* Don't create new L2 entry, use ones from p2m table */
   6.102 +                pmd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
   6.103 +            }
   6.104 +
   6.105 +            unmap_vtd_domain_page(pmd_vaddr);
   6.106 +            break;
   6.107 +        default:
   6.108 +            gdprintk(XENLOG_ERR VTDPREFIX,
   6.109 +                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
   6.110 +            break;
   6.111 +        }
   6.112 +
   6.113 +        unmap_vtd_domain_page(pgd_vaddr);
   6.114 +        unmap_domain_page(l3e);
   6.115 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.116 +    }
   6.117 +#elif CONFIG_PAGING_LEVELS == 4
   6.118 +    {
   6.119 +        mfn_t pgd_mfn;
   6.120 +
   6.121 +        spin_lock_irqsave(&hd->mapping_lock, flags);
   6.122 +        hd->pgd_maddr = alloc_pgtable_maddr();
   6.123 +        if ( hd->pgd_maddr == 0 )
   6.124 +        {
   6.125 +            spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.126 +            gdprintk(XENLOG_ERR VTDPREFIX,
   6.127 +                     "Allocate pgd memory failed!\n");
   6.128 +            return;
   6.129 +        }
   6.130 +
   6.131 +        switch ( level )
   6.132 +        {
   6.133 +        case VTD_PAGE_TABLE_LEVEL_3:
   6.134 +            l3e = map_domain_page(p2m_table);
   6.135 +            if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
   6.136 +            {
   6.137 +                spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.138 +                gdprintk(XENLOG_ERR VTDPREFIX,
   6.139 +                         "iommu_set_pgd: second level wasn't there\n");
   6.140 +                unmap_domain_page(l3e);
   6.141 +                return;
   6.142 +            }
   6.143 +
   6.144 +            pgd_mfn = _mfn(l3e_get_pfn(*l3e));
   6.145 +            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
   6.146 +            unmap_domain_page(l3e);
   6.147 +            break;
   6.148 +        case VTD_PAGE_TABLE_LEVEL_4:
   6.149 +            pgd_mfn = _mfn(p2m_table);
   6.150 +            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
   6.151 +            break;
   6.152 +        default:
   6.153 +            gdprintk(XENLOG_ERR VTDPREFIX,
   6.154 +                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
   6.155 +            break;
   6.156 +        }
   6.157 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.158 +    }
   6.159 +#endif
   6.160 +}
   6.161 +
   6.162 +void iommu_free_pgd(struct domain *d)
   6.163 +{
   6.164 +#if CONFIG_PAGING_LEVELS == 3
   6.165 +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
   6.166 +    int level = agaw_to_level(hd->agaw);
   6.167 +    struct dma_pte *pgd_vaddr = NULL;
   6.168 +
   6.169 +    switch ( level )
   6.170 +    {
   6.171 +    case VTD_PAGE_TABLE_LEVEL_3:
   6.172 +        if ( hd->pgd_maddr != 0 )
   6.173 +        {
   6.174 +            free_pgtable_maddr(hd->pgd_maddr);
   6.175 +            hd->pgd_maddr = 0;
   6.176 +        }
   6.177 +        break;
   6.178 +    case VTD_PAGE_TABLE_LEVEL_4:
   6.179 +        if ( hd->pgd_maddr != 0 )
   6.180 +        {
   6.181 +            pgd_vaddr = (struct dma_pte*)map_vtd_domain_page(hd->pgd_maddr);
   6.182 +            if ( pgd_vaddr[0].val != 0 )
   6.183 +                free_pgtable_maddr(pgd_vaddr[0].val);
   6.184 +            unmap_vtd_domain_page(pgd_vaddr);
   6.185 +            free_pgtable_maddr(hd->pgd_maddr);
   6.186 +            hd->pgd_maddr = 0;
   6.187 +        }
   6.188 +        break;
   6.189 +    default:
   6.190 +        gdprintk(XENLOG_ERR VTDPREFIX,
   6.191 +                 "Unsupported p2m table sharing level!\n");
   6.192 +        break;
   6.193 +    }
   6.194 +#endif
   6.195 +}
   6.196 +
   6.197 +/* Allocate page table, return its machine address */
   6.198 +u64 alloc_pgtable_maddr(void)
   6.199 +{
   6.200 +    struct page_info *pg;
   6.201 +    u64 *vaddr;
   6.202 +    struct acpi_drhd_unit *drhd;
   6.203 +    struct iommu *iommu;
   6.204 +
   6.205 +    pg = alloc_domheap_page(NULL, 0);
   6.206 +    vaddr = map_domain_page(page_to_mfn(pg));
   6.207 +    if ( !vaddr )
   6.208 +        return 0;
   6.209 +    memset(vaddr, 0, PAGE_SIZE);
   6.210 +
   6.211 +    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
   6.212 +    iommu = drhd->iommu;
   6.213 +    iommu_flush_cache_page(iommu, vaddr);
   6.214 +    unmap_domain_page(vaddr);
   6.215 +
   6.216 +    return page_to_maddr(pg);
   6.217 +}
   6.218 +
   6.219 +void free_pgtable_maddr(u64 maddr)
   6.220 +{
   6.221 +    if ( maddr != 0 )
   6.222 +        free_domheap_page(maddr_to_page(maddr));
   6.223 +}
   6.224 +
   6.225 +unsigned int get_clflush_size(void)
   6.226 +{
   6.227 +    return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
   6.228 +}
   6.229 +
   6.230 +struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
   6.231 +{
   6.232 +    if ( !domain )
   6.233 +        return NULL;
   6.234 +
   6.235 +    return domain->arch.hvm_domain.irq.dpci;
   6.236 +}
   6.237 +
   6.238 +int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
   6.239 +{
   6.240 +    if ( !domain || !dpci )
   6.241 +        return 0;
   6.242 +
   6.243 +    domain->arch.hvm_domain.irq.dpci = dpci;
   6.244 +    return 1;
   6.245 +}
   6.246 +
   6.247 +void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
   6.248 +{
   6.249 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   6.250 +    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
   6.251 +    struct dev_intx_gsi_link *digl, *tmp;
   6.252 +    int i;
   6.253 +
   6.254 +    ASSERT(isairq < NR_ISAIRQS);
   6.255 +    if ( !vtd_enabled || !dpci ||
   6.256 +         !test_bit(isairq, dpci->isairq_map) )
   6.257 +        return;
   6.258 +
   6.259 +    /* Multiple mirq may be mapped to one isa irq */
   6.260 +    for ( i = 0; i < NR_IRQS; i++ )
   6.261 +    {
   6.262 +        if ( !dpci->mirq[i].valid )
   6.263 +            continue;
   6.264 +
   6.265 +        list_for_each_entry_safe ( digl, tmp,
   6.266 +            &dpci->mirq[i].digl_list, list )
   6.267 +        {
   6.268 +            if ( hvm_irq->pci_link.route[digl->link] == isairq )
   6.269 +            {
   6.270 +                hvm_pci_intx_deassert(d, digl->device, digl->intx);
   6.271 +                spin_lock(&dpci->dirq_lock);
   6.272 +                if ( --dpci->mirq[i].pending == 0 )
   6.273 +                {
   6.274 +                    spin_unlock(&dpci->dirq_lock);
   6.275 +                    gdprintk(XENLOG_INFO VTDPREFIX,
   6.276 +                             "hvm_dpci_isairq_eoi:: mirq = %x\n", i);
   6.277 +                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
   6.278 +                    pirq_guest_eoi(d, i);
   6.279 +                }
   6.280 +                else
   6.281 +                    spin_unlock(&dpci->dirq_lock);
   6.282 +            }
   6.283 +        }
   6.284 +    }
   6.285 +}