direct-io.hg

changeset 15529:9ce39dc1425f

allen: added ioapic mask checking before unmask physical interrupt, changed iommu_found() to vtd_enabled(), moved vtd interrupt functions into vtd/io.c
author root@lweybridge0-64-fc6.sc.intel.com.sc.intel.com
date Thu Aug 23 13:42:03 2007 -0700 (2007-08-23)
parents 2665a74a1351
children 69ec2ef3d132
files xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/vtd/Makefile xen/arch/x86/hvm/vmx/vtd/io.c xen/arch/x86/hvm/vpic.c xen/arch/x86/io_apic.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/mm/p2m.c xen/common/grant_table.c xen/common/page_alloc.c xen/include/asm-x86/hvm/io.h xen/include/asm-x86/iommu.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Aug 17 17:05:09 2007 -0700
     1.2 +++ b/xen/arch/x86/domain.c	Thu Aug 23 13:42:03 2007 -0700
     1.3 @@ -486,7 +486,7 @@ int arch_domain_create(struct domain *d)
     1.4              virt_to_page(d->shared_info), d, XENSHARE_writable);
     1.5      }
     1.6  
     1.7 -    if ( iommu_found() )
     1.8 +    if ( vtd_enabled() )
     1.9          iommu_domain_init(d);
    1.10  
    1.11      if ( is_hvm_domain(d) )
     2.1 --- a/xen/arch/x86/domctl.c	Fri Aug 17 17:05:09 2007 -0700
     2.2 +++ b/xen/arch/x86/domctl.c	Thu Aug 23 13:42:03 2007 -0700
     2.3 @@ -448,7 +448,7 @@ long arch_do_domctl(
     2.4          bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
     2.5          devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
     2.6  
     2.7 -        if (iommu_found())
     2.8 +        if (vtd_enabled())
     2.9              ret = assign_device(d, bus, devfn);
    2.10          else {
    2.11              pdev = xmalloc(struct pci_dev);
    2.12 @@ -474,7 +474,7 @@ long arch_do_domctl(
    2.13  
    2.14          bind = &(domctl->u.bind_pt_irq);
    2.15          
    2.16 -        if ( iommu_found() )
    2.17 +        if ( vtd_enabled() )
    2.18              ret = pt_irq_create_bind_vtd(d, bind);
    2.19          else
    2.20              ret = pt_irq_create_bind_neo(bind);
     3.1 --- a/xen/arch/x86/hvm/io.c	Fri Aug 17 17:05:09 2007 -0700
     3.2 +++ b/xen/arch/x86/hvm/io.c	Thu Aug 23 13:42:03 2007 -0700
     3.3 @@ -1003,87 +1003,6 @@ int dpci_ioport_intercept(ioreq_t *p)
     3.4      return ret;
     3.5  }
     3.6  
     3.7 -int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
     3.8 -{
     3.9 -    uint32_t device, intx;
    3.10 -    uint32_t link, isa_irq;
    3.11 -    struct hvm_irq *hvm_irq;
    3.12 -
    3.13 -    if ((d == dom0) || !iommu_found())
    3.14 -        return 0;
    3.15 -
    3.16 -    if (d->arch.hvm_domain.irq.mirq[mirq].valid)
    3.17 -    {
    3.18 -        device = d->arch.hvm_domain.irq.mirq[mirq].device;
    3.19 -        intx = d->arch.hvm_domain.irq.mirq[mirq].intx;
    3.20 -        link = hvm_pci_intx_link(device, intx);
    3.21 -        hvm_irq = &d->arch.hvm_domain.irq;
    3.22 -        isa_irq = hvm_irq->pci_link.route[link];
    3.23 -
    3.24 -        if ( !d->arch.hvm_domain.irq.girq[isa_irq].valid )
    3.25 -        {
    3.26 -            d->arch.hvm_domain.irq.girq[isa_irq].valid = 1;
    3.27 -            d->arch.hvm_domain.irq.girq[isa_irq].device = device;
    3.28 -            d->arch.hvm_domain.irq.girq[isa_irq].intx = intx;
    3.29 -            d->arch.hvm_domain.irq.girq[isa_irq].machine_gsi = mirq;
    3.30 -        }
    3.31 -
    3.32 -        if ( !test_and_set_bit(mirq, d->arch.hvm_domain.irq.dirq_mask) )
    3.33 -        {
    3.34 -            vcpu_kick(d->vcpu[0]);
    3.35 -            return 1;
    3.36 -        }
    3.37 -        else
    3.38 -            dprintk(XENLOG_INFO, "Want to pending mirq, but failed\n");
    3.39 -    }
    3.40 -    return 0;
    3.41 -}
    3.42 -
    3.43 -void hvm_dpci_eoi(unsigned int guest_gsi)
    3.44 -{
    3.45 -    struct domain *d = current->domain;
    3.46 -    uint32_t device, intx, machine_gsi;
    3.47 -    irq_desc_t *desc;
    3.48 -
    3.49 -    if (d->arch.hvm_domain.irq.girq[guest_gsi].valid)
    3.50 -    {
    3.51 -        device = d->arch.hvm_domain.irq.girq[guest_gsi].device;
    3.52 -        intx = d->arch.hvm_domain.irq.girq[guest_gsi].intx;
    3.53 -        machine_gsi = d->arch.hvm_domain.irq.girq[guest_gsi].machine_gsi;
    3.54 -        gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
    3.55 -            device, intx);
    3.56 -        hvm_pci_intx_deassert(d, device, intx);
    3.57 -        desc = &irq_desc[irq_to_vector(machine_gsi)];
    3.58 -        desc->handler->end(irq_to_vector(machine_gsi));
    3.59 -    }
    3.60 -}
    3.61 -
    3.62 -int release_devices(struct vcpu *v)
    3.63 -{
    3.64 -    int ret = 0;
    3.65 -    struct domain *d = v->domain;
    3.66 -    struct hvm_domain *hd = &d->arch.hvm_domain;
    3.67 -    uint32_t i;
    3.68 -
    3.69 -    /* unbind irq */
    3.70 -    for (i = 0; i < NR_IRQS; i++) {
    3.71 -        if (hd->irq.mirq[i].valid)
    3.72 -            ret = pirq_guest_unbind(d, i);
    3.73 -    }
    3.74 -    if (iommu_found())
    3.75 -        iommu_domain_teardown(d);
    3.76 -    else {
    3.77 -        struct pci_dev *pdev;
    3.78 -        struct hvm_iommu *iommu = &d->arch.hvm_domain.hvm_iommu;
    3.79 -
    3.80 -        list_for_each_entry(pdev, &(iommu->pdev_list), list) {
    3.81 -            list_del(&(pdev->list));
    3.82 -            xfree(pdev);
    3.83 -        }
    3.84 -    }
    3.85 -    return ret;
    3.86 -}
    3.87 -
    3.88  /*
    3.89   * Local variables:
    3.90   * mode: C
     4.1 --- a/xen/arch/x86/hvm/vioapic.c	Fri Aug 17 17:05:09 2007 -0700
     4.2 +++ b/xen/arch/x86/hvm/vioapic.c	Thu Aug 23 13:42:03 2007 -0700
     4.3 @@ -463,9 +463,10 @@ void vioapic_update_EOI(struct domain *d
     4.4  
     4.5      ent->fields.remote_irr = 0;
     4.6  
     4.7 -    if (iommu_found()) {
     4.8 +    if (vtd_enabled())
     4.9 +    {
    4.10          spin_unlock(&d->arch.hvm_domain.irq_lock);
    4.11 -        hvm_dpci_eoi(gsi);
    4.12 +        hvm_dpci_eoi(gsi, ent);
    4.13          return;
    4.14      }
    4.15  
     5.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Fri Aug 17 17:05:09 2007 -0700
     5.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Thu Aug 23 13:42:03 2007 -0700
     5.3 @@ -159,7 +159,7 @@ asmlinkage void vmx_intr_assist(void)
     5.4      pt_update_irq(v);
     5.5  
     5.6      /* Pass-through interrupts handling */
     5.7 -    if ( iommu_found() )
     5.8 +    if ( vtd_enabled() )
     5.9      {
    5.10          if  (v->vcpu_id == 0)
    5.11              vmx_dirq_assist(v->domain);
     6.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Aug 17 17:05:09 2007 -0700
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Aug 23 13:42:03 2007 -0700
     6.3 @@ -71,6 +71,7 @@ static int vmx_domain_initialise(struct 
     6.4  static void vmx_domain_destroy(struct domain *d)
     6.5  {
     6.6      vmx_free_vlapic_mapping(d);
     6.7 +    release_devices(d);
     6.8  }
     6.9  
    6.10  static int vmx_vcpu_initialise(struct vcpu *v)
    6.11 @@ -99,8 +100,6 @@ static int vmx_vcpu_initialise(struct vc
    6.12  static void vmx_vcpu_destroy(struct vcpu *v)
    6.13  {
    6.14      vmx_destroy_vmcs(v);
    6.15 -    if (iommu_found())
    6.16 -        release_devices(v);
    6.17  }
    6.18  
    6.19  static int vmx_paging_enabled(struct vcpu *v)
     7.1 --- a/xen/arch/x86/hvm/vmx/vtd/Makefile	Fri Aug 17 17:05:09 2007 -0700
     7.2 +++ b/xen/arch/x86/hvm/vmx/vtd/Makefile	Thu Aug 23 13:42:03 2007 -0700
     7.3 @@ -1,3 +1,4 @@
     7.4  obj-y += intel-iommu.o
     7.5  obj-y += dmar.o
     7.6  obj-y += utils.o
     7.7 +obj-y += io.o
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c	Thu Aug 23 13:42:03 2007 -0700
     8.3 @@ -0,0 +1,130 @@
     8.4 +/*
     8.5 + * Copyright (c) 2006, Intel Corporation.
     8.6 + *
     8.7 + * This program is free software; you can redistribute it and/or modify it
     8.8 + * under the terms and conditions of the GNU General Public License,
     8.9 + * version 2, as published by the Free Software Foundation.
    8.10 + *
    8.11 + * This program is distributed in the hope it will be useful, but WITHOUT
    8.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    8.14 + * more details.
    8.15 + *
    8.16 + * You should have received a copy of the GNU General Public License along with
    8.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    8.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    8.19 + *
    8.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
    8.21 + */
    8.22 +
    8.23 +#include <xen/init.h>
    8.24 +#include <xen/config.h>
    8.25 +#include <xen/init.h>
    8.26 +#include <xen/mm.h>
    8.27 +#include <xen/lib.h>
    8.28 +#include <xen/errno.h>
    8.29 +#include <xen/trace.h>
    8.30 +#include <xen/event.h>
    8.31 +#include <xen/hypercall.h>
    8.32 +#include <asm/current.h>
    8.33 +#include <asm/cpufeature.h>
    8.34 +#include <asm/processor.h>
    8.35 +#include <asm/msr.h>
    8.36 +#include <asm/apic.h>
    8.37 +#include <asm/paging.h>
    8.38 +#include <asm/shadow.h>
    8.39 +#include <asm/p2m.h>
    8.40 +#include <asm/hvm/hvm.h>
    8.41 +#include <asm/hvm/support.h>
    8.42 +#include <asm/hvm/vpt.h>
    8.43 +#include <asm/hvm/vpic.h>
    8.44 +#include <asm/hvm/vlapic.h>
    8.45 +#include <public/sched.h>
    8.46 +#include <xen/iocap.h>
    8.47 +#include <public/hvm/ioreq.h>
    8.48 +
    8.49 +int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
    8.50 +{
    8.51 +    uint32_t device, intx;
    8.52 +    uint32_t link, isa_irq;
    8.53 +    struct hvm_irq *hvm_irq;
    8.54 +
    8.55 +    if ((d == dom0) || !vtd_enabled())
    8.56 +        return 0;
    8.57 +
    8.58 +    if (d->arch.hvm_domain.irq.mirq[mirq].valid)
    8.59 +    {
    8.60 +        device = d->arch.hvm_domain.irq.mirq[mirq].device;
    8.61 +        intx = d->arch.hvm_domain.irq.mirq[mirq].intx;
    8.62 +        link = hvm_pci_intx_link(device, intx);
    8.63 +        hvm_irq = &d->arch.hvm_domain.irq;
    8.64 +        isa_irq = hvm_irq->pci_link.route[link];
    8.65 +
    8.66 +        if ( !d->arch.hvm_domain.irq.girq[isa_irq].valid )
    8.67 +        {
    8.68 +            d->arch.hvm_domain.irq.girq[isa_irq].valid = 1;
    8.69 +            d->arch.hvm_domain.irq.girq[isa_irq].device = device;
    8.70 +            d->arch.hvm_domain.irq.girq[isa_irq].intx = intx;
    8.71 +            d->arch.hvm_domain.irq.girq[isa_irq].machine_gsi = mirq;
    8.72 +        }
    8.73 +
    8.74 +        if ( !test_and_set_bit(mirq, d->arch.hvm_domain.irq.dirq_mask) )
    8.75 +        {
    8.76 +            vcpu_kick(d->vcpu[0]);
    8.77 +            return 1;
    8.78 +        }
    8.79 +        else
    8.80 +            dprintk(XENLOG_INFO, "Want to pending mirq, but failed\n");
    8.81 +    }
    8.82 +    return 0;
    8.83 +}
    8.84 +
    8.85 +void hvm_dpci_eoi(unsigned int guest_gsi, union vioapic_redir_entry *ent)
    8.86 +{
    8.87 +    struct domain *d = current->domain;
    8.88 +    uint32_t device, intx, machine_gsi;
    8.89 +    irq_desc_t *desc;
    8.90 +
    8.91 +    if (d->arch.hvm_domain.irq.girq[guest_gsi].valid)
    8.92 +    {
    8.93 +        device = d->arch.hvm_domain.irq.girq[guest_gsi].device;
    8.94 +        intx = d->arch.hvm_domain.irq.girq[guest_gsi].intx;
    8.95 +        machine_gsi = d->arch.hvm_domain.irq.girq[guest_gsi].machine_gsi;
    8.96 +        gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
    8.97 +            device, intx);
    8.98 +        hvm_pci_intx_deassert(d, device, intx);
    8.99 +        if ( (ent == NULL) || (ent && ent->fields.mask == 0) ) {
   8.100 +            desc = &irq_desc[irq_to_vector(machine_gsi)];
   8.101 +            desc->handler->end(irq_to_vector(machine_gsi));
   8.102 +        }
   8.103 +    }
   8.104 +}
   8.105 +
   8.106 +int release_devices(struct domain *d)
   8.107 +{
   8.108 +    int ret = 0;
   8.109 +    struct hvm_domain *hd = &d->arch.hvm_domain;
   8.110 +    uint32_t i;
   8.111 +
   8.112 +    if (!vtd_enabled())
   8.113 +        return ret;
   8.114 +
   8.115 +    /* unbind irq */
   8.116 +    for (i = 0; i < NR_IRQS; i++) {
   8.117 +        if (hd->irq.mirq[i].valid)
   8.118 +            ret = pirq_guest_unbind(d, i);
   8.119 +    }
   8.120 +    if (vtd_enabled())
   8.121 +        iommu_domain_teardown(d);
   8.122 +    else {
   8.123 +        struct pci_dev *pdev;
   8.124 +        struct hvm_iommu *iommu = &d->arch.hvm_domain.hvm_iommu;
   8.125 +
   8.126 +        list_for_each_entry(pdev, &(iommu->pdev_list), list) {
   8.127 +            list_del(&(pdev->list));
   8.128 +            xfree(pdev);
   8.129 +        }
   8.130 +    }
   8.131 +    return ret;
   8.132 +}
   8.133 +
     9.1 --- a/xen/arch/x86/hvm/vpic.c	Fri Aug 17 17:05:09 2007 -0700
     9.2 +++ b/xen/arch/x86/hvm/vpic.c	Thu Aug 23 13:42:03 2007 -0700
     9.3 @@ -251,7 +251,7 @@ static void vpic_ioport_write(
     9.4                  vpic->isr &= ~(1 << irq);
     9.5                  if ( cmd == 7 )
     9.6                      vpic->priority_add = (irq + 1) & 7;
     9.7 -                if (iommu_found()) {
     9.8 +                if (vtd_enabled()) {
     9.9                      /* chipset register 0xa0 is i8259 EOI register */
    9.10                      if (( old_addr & 0xa0) == 0xa0 )
    9.11                          irq = irq | 0x8;
    9.12 @@ -304,9 +304,9 @@ static void vpic_ioport_write(
    9.13  
    9.14      vpic_unlock(vpic);
    9.15  
    9.16 -    if (iommu_found() && pic_eoi && irq != 0)
    9.17 +    if (vtd_enabled() && pic_eoi && irq != 0)
    9.18      {
    9.19 -        hvm_dpci_eoi(irq);
    9.20 +        hvm_dpci_eoi(irq, NULL);
    9.21          pic_eoi = 0;
    9.22      }
    9.23  }
    10.1 --- a/xen/arch/x86/io_apic.c	Fri Aug 17 17:05:09 2007 -0700
    10.2 +++ b/xen/arch/x86/io_apic.c	Thu Aug 23 13:42:03 2007 -0700
    10.3 @@ -1421,7 +1421,7 @@ static void mask_and_ack_level_ioapic_ir
    10.4      if ( ioapic_ack_new )
    10.5          return;
    10.6  
    10.7 -    if (iommu_found())
    10.8 +    if (vtd_enabled())
    10.9          write_fake_IO_APIC_vector(irq);
   10.10      else
   10.11          mask_IO_APIC_irq(irq);
   10.12 @@ -1467,7 +1467,7 @@ static void end_level_ioapic_irq (unsign
   10.13      if ( !ioapic_ack_new )
   10.14      {
   10.15          if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) ) {
   10.16 -            if (iommu_found())
   10.17 +            if (vtd_enabled())
   10.18                  restore_real_IO_APIC_vector(irq);
   10.19              else
   10.20                  unmask_IO_APIC_irq(irq);
    11.1 --- a/xen/arch/x86/irq.c	Fri Aug 17 17:05:09 2007 -0700
    11.2 +++ b/xen/arch/x86/irq.c	Thu Aug 23 13:42:03 2007 -0700
    11.3 @@ -93,7 +93,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
    11.4      spin_lock(&desc->lock);
    11.5      desc->handler->ack(vector);
    11.6  
    11.7 -    if ( !iommu_found() && do_IRQ_pt(vector) )
    11.8 +    if ( !vtd_enabled() && do_IRQ_pt(vector) )
    11.9          goto out; 
   11.10  
   11.11      if ( likely(desc->status & IRQ_GUEST) )
    12.1 --- a/xen/arch/x86/mm.c	Fri Aug 17 17:05:09 2007 -0700
    12.2 +++ b/xen/arch/x86/mm.c	Thu Aug 23 13:42:03 2007 -0700
    12.3 @@ -2619,7 +2619,7 @@ static int create_grant_va_mapping(
    12.4      if ( !okay )
    12.5              return GNTST_general_error;
    12.6  
    12.7 -    if ( iommu_found() )
    12.8 +    if ( vtd_enabled() )
    12.9          iommu_map_page(d, l1e_get_pfn(nl1e), l1e_get_pfn(nl1e));
   12.10  
   12.11      if ( !paging_mode_refcounts(d) )
   12.12 @@ -2661,7 +2661,7 @@ static int replace_grant_va_mapping(
   12.13          goto out;
   12.14      }
   12.15  
   12.16 -    if ( iommu_found() )
   12.17 +    if ( vtd_enabled() )
   12.18          iommu_unmap_page(v->domain, mfn_to_gfn(d, _mfn(l1e_get_pfn(ol1e))));
   12.19  
   12.20   out:
   12.21 @@ -2775,7 +2775,7 @@ int steal_page(
   12.22      if ( !(memflags & MEMF_no_refcount) )
   12.23          d->tot_pages--;
   12.24      list_del(&page->list);
   12.25 -    if (iommu_found())
   12.26 +    if (vtd_enabled())
   12.27          iommu_unmap_page(d, page_to_mfn(page));
   12.28  
   12.29      spin_unlock(&d->page_alloc_lock);
    13.1 --- a/xen/arch/x86/mm/p2m.c	Fri Aug 17 17:05:09 2007 -0700
    13.2 +++ b/xen/arch/x86/mm/p2m.c	Thu Aug 23 13:42:03 2007 -0700
    13.3 @@ -235,7 +235,7 @@ set_p2m_entry(struct domain *d, unsigned
    13.4      /* Success */
    13.5      rv = 1;
    13.6   
    13.7 -    if (iommu_found() && is_hvm_domain(d) && mfn_valid(mfn))
    13.8 +    if (vtd_enabled() && is_hvm_domain(d) && mfn_valid(mfn))
    13.9          iommu_flush(d, gfn, (u64*)p2m_entry);
   13.10  
   13.11   out:
   13.12 @@ -374,7 +374,7 @@ int p2m_alloc_table(struct domain *d,
   13.13      }
   13.14  
   13.15  #if CONFIG_PAGING_LEVELS >= 3
   13.16 -    if (iommu_found() && is_hvm_domain(d))
   13.17 +    if (vtd_enabled() && is_hvm_domain(d))
   13.18          iommu_set_pgd(d);
   13.19  #endif
   13.20  
    14.1 --- a/xen/common/grant_table.c	Fri Aug 17 17:05:09 2007 -0700
    14.2 +++ b/xen/common/grant_table.c	Thu Aug 23 13:42:03 2007 -0700
    14.3 @@ -949,7 +949,7 @@ gnttab_transfer(
    14.4          if ( unlikely(e->tot_pages++ == 0) )
    14.5              get_knownalive_domain(e);
    14.6          list_add_tail(&page->list, &e->page_list);
    14.7 -        if (iommu_found())
    14.8 +        if (vtd_enabled())
    14.9              iommu_map_page(e, mfn, mfn);
   14.10          page_set_owner(page, e);
   14.11  
    15.1 --- a/xen/common/page_alloc.c	Fri Aug 17 17:05:09 2007 -0700
    15.2 +++ b/xen/common/page_alloc.c	Thu Aug 23 13:42:03 2007 -0700
    15.3 @@ -810,7 +810,7 @@ int assign_pages(
    15.4          pg[i].count_info = PGC_allocated | 1;
    15.5          list_add_tail(&pg[i].list, &d->page_list);
    15.6  
    15.7 -        if (iommu_found() && !is_hvm_domain(d) && (dom0 != NULL))
    15.8 +        if (vtd_enabled() && !is_hvm_domain(d) && (dom0 != NULL))
    15.9              iommu_map_page(d, page_to_mfn(&pg[i]), page_to_mfn(&pg[i]));
   15.10      }
   15.11  
   15.12 @@ -905,7 +905,7 @@ void free_domheap_pages(struct page_info
   15.13          {
   15.14              BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
   15.15              list_del(&pg[i].list);
   15.16 -            if ( iommu_found() && !is_hvm_domain(d) )
   15.17 +            if ( vtd_enabled() && !is_hvm_domain(d) )
   15.18                  iommu_unmap_page(d, page_to_mfn(&pg[i]));
   15.19          }
   15.20  
    16.1 --- a/xen/include/asm-x86/hvm/io.h	Fri Aug 17 17:05:09 2007 -0700
    16.2 +++ b/xen/include/asm-x86/hvm/io.h	Thu Aug 23 13:42:03 2007 -0700
    16.3 @@ -151,7 +151,7 @@ void send_invalidate_req(void);
    16.4  extern void handle_mmio(unsigned long gpa);
    16.5  extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
    16.6  extern void hvm_io_assist(void);
    16.7 -extern void hvm_dpci_eoi(unsigned int guest_irq);
    16.8 +extern void hvm_dpci_eoi(unsigned int guest_irq, union vioapic_redir_entry *ent);
    16.9  
   16.10  #endif /* __ASM_X86_HVM_IO_H__ */
   16.11  
    17.1 --- a/xen/include/asm-x86/iommu.h	Fri Aug 17 17:05:09 2007 -0700
    17.2 +++ b/xen/include/asm-x86/iommu.h	Thu Aug 23 13:42:03 2007 -0700
    17.3 @@ -29,7 +29,7 @@
    17.4  #include <asm/hvm/vmx/intel-iommu.h>
    17.5  #include <public/hvm/ioreq.h>
    17.6  
    17.7 -#define iommu_found()    (!list_empty(&acpi_drhd_units))
    17.8 +#define vtd_enabled()    (!list_empty(&acpi_drhd_units))
    17.9  #define dev_assigned(d)  (!list_empty(&d->arch.hvm_domain.hvm_iommu.pdev_list))
   17.10  #define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
   17.11  #define domain_vmx_iommu(d)     (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
   17.12 @@ -68,7 +68,7 @@ struct iommu {
   17.13  int iommu_setup(void);
   17.14  int iommu_domain_init(struct domain *d);
   17.15  int assign_device(struct domain *d, u8 bus, u8 devfn);
   17.16 -int release_devices(struct vcpu *v);
   17.17 +int release_devices(struct domain *d);
   17.18  int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn);
   17.19  int iommu_unmap_page(struct domain *d, dma_addr_t gfn);
   17.20  void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry);