ia64/xen-unstable

changeset 17212:bf8a3fc79093

Move iommu code to arch-generic locations, and also clean up some VT-d code.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 17 10:45:24 2008 +0000 (2008-03-17)
parents af33f2054f47
children fea44c1d3e41
files xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/Makefile xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/iommu.c xen/arch/x86/irq.c xen/arch/x86/mm/p2m.c xen/drivers/passthrough/Makefile xen/drivers/passthrough/amd/iommu_detect.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/io.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/Makefile xen/drivers/passthrough/vtd/dmar.h xen/drivers/passthrough/vtd/extern.h xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/io.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/iommu.h xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/utils.c xen/drivers/passthrough/vtd/vtd.h xen/include/asm-x86/fixmap.h xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/iommu.h xen/include/asm-x86/hvm/vmx/intel-iommu.h xen/include/asm-x86/io_apic.h xen/include/asm-x86/iommu.h xen/include/xen/hvm/iommu.h xen/include/xen/iommu.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sun Mar 16 14:11:34 2008 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Mon Mar 17 10:45:24 2008 +0000
     1.3 @@ -46,7 +46,7 @@
     1.4  #include <asm/debugreg.h>
     1.5  #include <asm/msr.h>
     1.6  #include <asm/nmi.h>
     1.7 -#include <asm/iommu.h>
     1.8 +#include <xen/iommu.h>
     1.9  #ifdef CONFIG_COMPAT
    1.10  #include <compat/vcpu.h>
    1.11  #endif
     2.1 --- a/xen/arch/x86/domctl.c	Sun Mar 16 14:11:34 2008 +0000
     2.2 +++ b/xen/arch/x86/domctl.c	Mon Mar 17 10:45:24 2008 +0000
     2.3 @@ -26,7 +26,7 @@
     2.4  #include <asm/hvm/cacheattr.h>
     2.5  #include <asm/processor.h>
     2.6  #include <xsm/xsm.h>
     2.7 -#include <asm/iommu.h>
     2.8 +#include <xen/iommu.h>
     2.9  
    2.10  long arch_do_domctl(
    2.11      struct xen_domctl *domctl,
     3.1 --- a/xen/arch/x86/hvm/Makefile	Sun Mar 16 14:11:34 2008 +0000
     3.2 +++ b/xen/arch/x86/hvm/Makefile	Mon Mar 17 10:45:24 2008 +0000
     3.3 @@ -6,7 +6,6 @@ obj-y += hvm.o
     3.4  obj-y += i8254.o
     3.5  obj-y += intercept.o
     3.6  obj-y += io.o
     3.7 -obj-y += iommu.o
     3.8  obj-y += irq.o
     3.9  obj-y += mtrr.o
    3.10  obj-y += pmtimer.o
     4.1 --- a/xen/arch/x86/hvm/intercept.c	Sun Mar 16 14:11:34 2008 +0000
     4.2 +++ b/xen/arch/x86/hvm/intercept.c	Mon Mar 17 10:45:24 2008 +0000
     4.3 @@ -30,7 +30,7 @@
     4.4  #include <asm/current.h>
     4.5  #include <io_ports.h>
     4.6  #include <xen/event.h>
     4.7 -#include <asm/iommu.h>
     4.8 +#include <xen/iommu.h>
     4.9  
    4.10  extern struct hvm_mmio_handler hpet_mmio_handler;
    4.11  extern struct hvm_mmio_handler vlapic_mmio_handler;
     5.1 --- a/xen/arch/x86/hvm/iommu.c	Sun Mar 16 14:11:34 2008 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,145 +0,0 @@
     5.4 -/*
     5.5 - * This program is free software; you can redistribute it and/or modify it
     5.6 - * under the terms and conditions of the GNU General Public License,
     5.7 - * version 2, as published by the Free Software Foundation.
     5.8 - *
     5.9 - * This program is distributed in the hope it will be useful, but WITHOUT
    5.10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.11 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    5.12 - * more details.
    5.13 - *
    5.14 - * You should have received a copy of the GNU General Public License along with
    5.15 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    5.16 - * Place - Suite 330, Boston, MA 02111-1307 USA.
    5.17 - */
    5.18 -
    5.19 -#include <xen/init.h>
    5.20 -#include <xen/irq.h>
    5.21 -#include <xen/spinlock.h>
    5.22 -#include <xen/sched.h>
    5.23 -#include <xen/xmalloc.h>
    5.24 -#include <xen/domain_page.h>
    5.25 -#include <asm/delay.h>
    5.26 -#include <asm/string.h>
    5.27 -#include <asm/mm.h>
    5.28 -#include <asm/iommu.h>
    5.29 -#include <asm/hvm/vmx/intel-iommu.h>
    5.30 -
    5.31 -extern struct iommu_ops intel_iommu_ops;
    5.32 -extern struct iommu_ops amd_iommu_ops;
    5.33 -
    5.34 -int iommu_domain_init(struct domain *domain)
    5.35 -{
    5.36 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
    5.37 -
    5.38 -    spin_lock_init(&hd->mapping_lock);
    5.39 -    spin_lock_init(&hd->iommu_list_lock);
    5.40 -    INIT_LIST_HEAD(&hd->pdev_list);
    5.41 -    INIT_LIST_HEAD(&hd->g2m_ioport_list);
    5.42 -
    5.43 -    if ( !iommu_enabled )
    5.44 -        return 0;
    5.45 -
    5.46 -    switch ( boot_cpu_data.x86_vendor )
    5.47 -    {
    5.48 -    case X86_VENDOR_INTEL:
    5.49 -        hd->platform_ops = &intel_iommu_ops;
    5.50 -        break;
    5.51 -    case X86_VENDOR_AMD:
    5.52 -        hd->platform_ops = &amd_iommu_ops;
    5.53 -        break;
    5.54 -    default:
    5.55 -        BUG();
    5.56 -    }
    5.57 -
    5.58 -    return hd->platform_ops->init(domain);
    5.59 -}
    5.60 -
    5.61 -int assign_device(struct domain *d, u8 bus, u8 devfn)
    5.62 -{
    5.63 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
    5.64 -
    5.65 -    if ( !iommu_enabled || !hd->platform_ops)
    5.66 -        return 0;
    5.67 -
    5.68 -    return hd->platform_ops->assign_device(d, bus, devfn);
    5.69 -}
    5.70 -
    5.71 -void iommu_domain_destroy(struct domain *d)
    5.72 -{
    5.73 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    5.74 -    uint32_t i;
    5.75 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
    5.76 -    struct list_head *ioport_list, *digl_list, *tmp;
    5.77 -    struct g2m_ioport *ioport;
    5.78 -    struct dev_intx_gsi_link *digl;
    5.79 -
    5.80 -    if ( !iommu_enabled || !hd->platform_ops)
    5.81 -        return;
    5.82 -
    5.83 -    if ( hvm_irq_dpci != NULL )
    5.84 -    {
    5.85 -        for ( i = 0; i < NR_IRQS; i++ )
    5.86 -        {
    5.87 -            if ( !hvm_irq_dpci->mirq[i].valid )
    5.88 -                continue;
    5.89 -
    5.90 -            pirq_guest_unbind(d, i);
    5.91 -            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
    5.92 -
    5.93 -            list_for_each_safe ( digl_list, tmp,
    5.94 -                                 &hvm_irq_dpci->mirq[i].digl_list )
    5.95 -            {
    5.96 -                digl = list_entry(digl_list,
    5.97 -                                  struct dev_intx_gsi_link, list);
    5.98 -                list_del(&digl->list);
    5.99 -                xfree(digl);
   5.100 -            }
   5.101 -        }
   5.102 -
   5.103 -        d->arch.hvm_domain.irq.dpci = NULL;
   5.104 -        xfree(hvm_irq_dpci);
   5.105 -    }
   5.106 -
   5.107 -    if ( hd )
   5.108 -    {
   5.109 -        list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
   5.110 -        {
   5.111 -            ioport = list_entry(ioport_list, struct g2m_ioport, list);
   5.112 -            list_del(&ioport->list);
   5.113 -            xfree(ioport);
   5.114 -        }
   5.115 -    }
   5.116 -
   5.117 -    return hd->platform_ops->teardown(d);
   5.118 -}
   5.119 -
   5.120 -int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
   5.121 -{
   5.122 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   5.123 -
   5.124 -    if ( !iommu_enabled || !hd->platform_ops)
   5.125 -        return 0;
   5.126 -
   5.127 -    return hd->platform_ops->map_page(d, gfn, mfn);
   5.128 -}
   5.129 -
   5.130 -int iommu_unmap_page(struct domain *d, unsigned long gfn)
   5.131 -{
   5.132 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   5.133 -
   5.134 -    if ( !iommu_enabled || !hd->platform_ops)
   5.135 -        return 0;
   5.136 -
   5.137 -    return hd->platform_ops->unmap_page(d, gfn);
   5.138 -}
   5.139 -
   5.140 -void deassign_device(struct domain *d, u8 bus, u8 devfn)
   5.141 -{
   5.142 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   5.143 -
   5.144 -    if ( !iommu_enabled || !hd->platform_ops)
   5.145 -        return;
   5.146 -
   5.147 -    return hd->platform_ops->reassign_device(d, dom0, bus, devfn);
   5.148 -}
     6.1 --- a/xen/arch/x86/irq.c	Sun Mar 16 14:11:34 2008 +0000
     6.2 +++ b/xen/arch/x86/irq.c	Mon Mar 17 10:45:24 2008 +0000
     6.3 @@ -15,7 +15,7 @@
     6.4  #include <xen/keyhandler.h>
     6.5  #include <xen/compat.h>
     6.6  #include <asm/current.h>
     6.7 -#include <asm/iommu.h>
     6.8 +#include <xen/iommu.h>
     6.9  
    6.10  /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
    6.11  int opt_noirqbalance = 0;
     7.1 --- a/xen/arch/x86/mm/p2m.c	Sun Mar 16 14:11:34 2008 +0000
     7.2 +++ b/xen/arch/x86/mm/p2m.c	Mon Mar 17 10:45:24 2008 +0000
     7.3 @@ -27,7 +27,7 @@
     7.4  #include <asm/page.h>
     7.5  #include <asm/paging.h>
     7.6  #include <asm/p2m.h>
     7.7 -#include <asm/iommu.h>
     7.8 +#include <xen/iommu.h>
     7.9  
    7.10  /* Debugging and auditing of the P2M code? */
    7.11  #define P2M_AUDIT     0
     8.1 --- a/xen/drivers/passthrough/Makefile	Sun Mar 16 14:11:34 2008 +0000
     8.2 +++ b/xen/drivers/passthrough/Makefile	Mon Mar 17 10:45:24 2008 +0000
     8.3 @@ -1,2 +1,5 @@
     8.4  subdir-$(x86) += vtd
     8.5  subdir-$(x86) += amd
     8.6 +
     8.7 +obj-y += iommu.o
     8.8 +obj-y += io.o
     9.1 --- a/xen/drivers/passthrough/amd/iommu_detect.c	Sun Mar 16 14:11:34 2008 +0000
     9.2 +++ b/xen/drivers/passthrough/amd/iommu_detect.c	Mon Mar 17 10:45:24 2008 +0000
     9.3 @@ -20,7 +20,7 @@
     9.4  
     9.5  #include <xen/config.h>
     9.6  #include <xen/errno.h>
     9.7 -#include <asm/iommu.h>
     9.8 +#include <xen/iommu.h>
     9.9  #include <asm/amd-iommu.h>
    9.10  #include <asm/hvm/svm/amd-iommu-proto.h>
    9.11  #include "../pci-direct.h"
    10.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Sun Mar 16 14:11:34 2008 +0000
    10.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Mon Mar 17 10:45:24 2008 +0000
    10.3 @@ -19,7 +19,7 @@
    10.4   */
    10.5  
    10.6  #include <xen/sched.h>
    10.7 -#include <asm/hvm/iommu.h>
    10.8 +#include <xen/hvm/iommu.h>
    10.9  #include <asm/amd-iommu.h>
   10.10  #include <asm/hvm/svm/amd-iommu-proto.h>
   10.11  
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/drivers/passthrough/io.c	Mon Mar 17 10:45:24 2008 +0000
    11.3 @@ -0,0 +1,272 @@
    11.4 +/*
    11.5 + * Copyright (c) 2006, Intel Corporation.
    11.6 + *
    11.7 + * This program is free software; you can redistribute it and/or modify it
    11.8 + * under the terms and conditions of the GNU General Public License,
    11.9 + * version 2, as published by the Free Software Foundation.
   11.10 + *
   11.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   11.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   11.14 + * more details.
   11.15 + *
   11.16 + * You should have received a copy of the GNU General Public License along with
   11.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   11.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   11.19 + *
   11.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   11.21 + * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
   11.22 + */
   11.23 +
   11.24 +#include <xen/event.h>
   11.25 +#include <xen/iommu.h>
   11.26 +
   11.27 +static void pt_irq_time_out(void *data)
   11.28 +{
   11.29 +    struct hvm_mirq_dpci_mapping *irq_map = data;
   11.30 +    unsigned int guest_gsi, machine_gsi = 0;
   11.31 +    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
   11.32 +    struct dev_intx_gsi_link *digl;
   11.33 +    uint32_t device, intx;
   11.34 +
   11.35 +    list_for_each_entry ( digl, &irq_map->digl_list, list )
   11.36 +    {
   11.37 +        guest_gsi = digl->gsi;
   11.38 +        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
   11.39 +        device = digl->device;
   11.40 +        intx = digl->intx;
   11.41 +        hvm_pci_intx_deassert(irq_map->dom, device, intx);
   11.42 +    }
   11.43 +
   11.44 +    clear_bit(machine_gsi, dpci->dirq_mask);
   11.45 +    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
   11.46 +    spin_lock(&dpci->dirq_lock);
   11.47 +    dpci->mirq[machine_gsi].pending = 0;
   11.48 +    spin_unlock(&dpci->dirq_lock);
   11.49 +    pirq_guest_eoi(irq_map->dom, machine_gsi);
   11.50 +}
   11.51 +
   11.52 +int pt_irq_create_bind_vtd(
   11.53 +    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
   11.54 +{
   11.55 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   11.56 +    uint32_t machine_gsi, guest_gsi;
   11.57 +    uint32_t device, intx, link;
   11.58 +    struct dev_intx_gsi_link *digl;
   11.59 +
   11.60 +    if ( hvm_irq_dpci == NULL )
   11.61 +    {
   11.62 +        hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
   11.63 +        if ( hvm_irq_dpci == NULL )
   11.64 +            return -ENOMEM;
   11.65 +
   11.66 +        memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
   11.67 +        spin_lock_init(&hvm_irq_dpci->dirq_lock);
   11.68 +        for ( int i = 0; i < NR_IRQS; i++ )
   11.69 +            INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
   11.70 +
   11.71 +        if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
   11.72 +                     0, (unsigned long)hvm_irq_dpci) != 0 )
   11.73 +            xfree(hvm_irq_dpci);
   11.74 +
   11.75 +        hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   11.76 +    }
   11.77 +
   11.78 +    machine_gsi = pt_irq_bind->machine_irq;
   11.79 +    device = pt_irq_bind->u.pci.device;
   11.80 +    intx = pt_irq_bind->u.pci.intx;
   11.81 +    guest_gsi = hvm_pci_intx_gsi(device, intx);
   11.82 +    link = hvm_pci_intx_link(device, intx);
   11.83 +    hvm_irq_dpci->link_cnt[link]++;
   11.84 +
   11.85 +    digl = xmalloc(struct dev_intx_gsi_link);
   11.86 +    if ( !digl )
   11.87 +        return -ENOMEM;
   11.88 +
   11.89 +    digl->device = device;
   11.90 +    digl->intx = intx;
   11.91 +    digl->gsi = guest_gsi;
   11.92 +    digl->link = link;
   11.93 +    list_add_tail(&digl->list,
   11.94 +                  &hvm_irq_dpci->mirq[machine_gsi].digl_list);
   11.95 +
   11.96 +    hvm_irq_dpci->girq[guest_gsi].valid = 1;
   11.97 +    hvm_irq_dpci->girq[guest_gsi].device = device;
   11.98 +    hvm_irq_dpci->girq[guest_gsi].intx = intx;
   11.99 +    hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
  11.100 +
  11.101 +    /* Bind the same mirq once in the same domain */
  11.102 +    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
  11.103 +    {
  11.104 +        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
  11.105 +        hvm_irq_dpci->mirq[machine_gsi].dom = d;
  11.106 +
  11.107 +        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
  11.108 +                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
  11.109 +        /* Deal with gsi for legacy devices */
  11.110 +        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
  11.111 +    }
  11.112 +
  11.113 +    gdprintk(XENLOG_INFO VTDPREFIX,
  11.114 +             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
  11.115 +             machine_gsi, device, intx);
  11.116 +    return 0;
  11.117 +}
  11.118 +
  11.119 +int pt_irq_destroy_bind_vtd(
  11.120 +    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
  11.121 +{
  11.122 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  11.123 +    uint32_t machine_gsi, guest_gsi;
  11.124 +    uint32_t device, intx, link;
  11.125 +    struct list_head *digl_list, *tmp;
  11.126 +    struct dev_intx_gsi_link *digl;
  11.127 +
  11.128 +    if ( hvm_irq_dpci == NULL )
  11.129 +        return 0;
  11.130 +
  11.131 +    machine_gsi = pt_irq_bind->machine_irq;
  11.132 +    device = pt_irq_bind->u.pci.device;
  11.133 +    intx = pt_irq_bind->u.pci.intx;
  11.134 +    guest_gsi = hvm_pci_intx_gsi(device, intx);
  11.135 +    link = hvm_pci_intx_link(device, intx);
  11.136 +    hvm_irq_dpci->link_cnt[link]--;
  11.137 +
  11.138 +    gdprintk(XENLOG_INFO,
  11.139 +            "pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n",
  11.140 +            machine_gsi, guest_gsi, device, intx);
  11.141 +    memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping));
  11.142 +
  11.143 +    /* clear the mirq info */
  11.144 +    if ( hvm_irq_dpci->mirq[machine_gsi].valid )
  11.145 +    {
  11.146 +
  11.147 +        list_for_each_safe ( digl_list, tmp,
  11.148 +                &hvm_irq_dpci->mirq[machine_gsi].digl_list )
  11.149 +        {
  11.150 +            digl = list_entry(digl_list,
  11.151 +                    struct dev_intx_gsi_link, list);
  11.152 +            if ( digl->device == device &&
  11.153 +                 digl->intx   == intx &&
  11.154 +                 digl->link   == link &&
  11.155 +                 digl->gsi    == guest_gsi )
  11.156 +            {
  11.157 +                list_del(&digl->list);
  11.158 +                xfree(digl);
  11.159 +            }
  11.160 +        }
  11.161 +
  11.162 +        if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
  11.163 +        {
  11.164 +            pirq_guest_unbind(d, machine_gsi);
  11.165 +            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  11.166 +            hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
  11.167 +            hvm_irq_dpci->mirq[machine_gsi].valid = 0;
  11.168 +        }
  11.169 +    }
  11.170 +
  11.171 +    gdprintk(XENLOG_INFO,
  11.172 +             "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
  11.173 +             machine_gsi, device, intx);
  11.174 +
  11.175 +    return 0;
  11.176 +}
  11.177 +
  11.178 +int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
  11.179 +{
  11.180 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  11.181 +
  11.182 +    if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
  11.183 +         !hvm_irq->dpci->mirq[mirq].valid )
  11.184 +        return 0;
  11.185 +
  11.186 +    /*
  11.187 +     * Set a timer here to avoid situations where the IRQ line is shared, and
  11.188 +     * the device belonging to the pass-through guest is not yet active. In
  11.189 +     * this case the guest may not pick up the interrupt (e.g., masked at the
  11.190 +     * PIC) and we need to detect that.
  11.191 +     */
  11.192 +    set_bit(mirq, hvm_irq->dpci->dirq_mask);
  11.193 +    set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)],
  11.194 +              NOW() + PT_IRQ_TIME_OUT);
  11.195 +    vcpu_kick(d->vcpu[0]);
  11.196 +
  11.197 +    return 1;
  11.198 +}
  11.199 +
  11.200 +static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
  11.201 +{
  11.202 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  11.203 +    struct hvm_irq_dpci *dpci = hvm_irq->dpci;
  11.204 +    struct dev_intx_gsi_link *digl, *tmp;
  11.205 +    int i;
  11.206 +
  11.207 +    ASSERT(isairq < NR_ISAIRQS);
  11.208 +    if ( !iommu_enabled || !dpci ||
  11.209 +         !test_bit(isairq, dpci->isairq_map) )
  11.210 +        return;
  11.211 +
  11.212 +    /* Multiple mirq may be mapped to one isa irq */
  11.213 +    for ( i = 0; i < NR_IRQS; i++ )
  11.214 +    {
  11.215 +        if ( !dpci->mirq[i].valid )
  11.216 +            continue;
  11.217 +
  11.218 +        list_for_each_entry_safe ( digl, tmp,
  11.219 +            &dpci->mirq[i].digl_list, list )
  11.220 +        {
  11.221 +            if ( hvm_irq->pci_link.route[digl->link] == isairq )
  11.222 +            {
  11.223 +                hvm_pci_intx_deassert(d, digl->device, digl->intx);
  11.224 +                spin_lock(&dpci->dirq_lock);
  11.225 +                if ( --dpci->mirq[i].pending == 0 )
  11.226 +                {
  11.227 +                    spin_unlock(&dpci->dirq_lock);
  11.228 +                    gdprintk(XENLOG_INFO VTDPREFIX,
  11.229 +                             "hvm_dpci_isairq_eoi:: mirq = %x\n", i);
  11.230 +                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
  11.231 +                    pirq_guest_eoi(d, i);
  11.232 +                }
  11.233 +                else
  11.234 +                    spin_unlock(&dpci->dirq_lock);
  11.235 +            }
  11.236 +        }
  11.237 +    }
  11.238 +}
  11.239 +
  11.240 +void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
  11.241 +                  union vioapic_redir_entry *ent)
  11.242 +{
  11.243 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  11.244 +    uint32_t device, intx, machine_gsi;
  11.245 +
  11.246 +    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
  11.247 +         (guest_gsi >= NR_ISAIRQS &&
  11.248 +          !hvm_irq_dpci->girq[guest_gsi].valid) )
  11.249 +        return;
  11.250 +
  11.251 +    if ( guest_gsi < NR_ISAIRQS )
  11.252 +    {
  11.253 +        hvm_dpci_isairq_eoi(d, guest_gsi);
  11.254 +        return;
  11.255 +    }
  11.256 +
  11.257 +    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
  11.258 +    device = hvm_irq_dpci->girq[guest_gsi].device;
  11.259 +    intx = hvm_irq_dpci->girq[guest_gsi].intx;
  11.260 +    hvm_pci_intx_deassert(d, device, intx);
  11.261 +
  11.262 +    spin_lock(&hvm_irq_dpci->dirq_lock);
  11.263 +    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
  11.264 +    {
  11.265 +        spin_unlock(&hvm_irq_dpci->dirq_lock);
  11.266 +
  11.267 +        gdprintk(XENLOG_INFO VTDPREFIX,
  11.268 +                 "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
  11.269 +        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  11.270 +        if ( (ent == NULL) || !ent->fields.mask )
  11.271 +            pirq_guest_eoi(d, machine_gsi);
  11.272 +    }
  11.273 +    else
  11.274 +        spin_unlock(&hvm_irq_dpci->dirq_lock);
  11.275 +}
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/drivers/passthrough/iommu.c	Mon Mar 17 10:45:24 2008 +0000
    12.3 @@ -0,0 +1,136 @@
    12.4 +/*
    12.5 + * This program is free software; you can redistribute it and/or modify it
    12.6 + * under the terms and conditions of the GNU General Public License,
    12.7 + * version 2, as published by the Free Software Foundation.
    12.8 + *
    12.9 + * This program is distributed in the hope it will be useful, but WITHOUT
   12.10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.11 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   12.12 + * more details.
   12.13 + *
   12.14 + * You should have received a copy of the GNU General Public License along with
   12.15 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   12.16 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   12.17 + */
   12.18 +
   12.19 +#include <xen/sched.h>
   12.20 +#include <xen/iommu.h>
   12.21 +
   12.22 +extern struct iommu_ops intel_iommu_ops;
   12.23 +extern struct iommu_ops amd_iommu_ops;
   12.24 +
   12.25 +int iommu_domain_init(struct domain *domain)
   12.26 +{
   12.27 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   12.28 +
   12.29 +    spin_lock_init(&hd->mapping_lock);
   12.30 +    spin_lock_init(&hd->iommu_list_lock);
   12.31 +    INIT_LIST_HEAD(&hd->pdev_list);
   12.32 +    INIT_LIST_HEAD(&hd->g2m_ioport_list);
   12.33 +
   12.34 +    if ( !iommu_enabled )
   12.35 +        return 0;
   12.36 +
   12.37 +    switch ( boot_cpu_data.x86_vendor )
   12.38 +    {
   12.39 +    case X86_VENDOR_INTEL:
   12.40 +        hd->platform_ops = &intel_iommu_ops;
   12.41 +        break;
   12.42 +    case X86_VENDOR_AMD:
   12.43 +        hd->platform_ops = &amd_iommu_ops;
   12.44 +        break;
   12.45 +    default:
   12.46 +        BUG();
   12.47 +    }
   12.48 +
   12.49 +    return hd->platform_ops->init(domain);
   12.50 +}
   12.51 +
   12.52 +int assign_device(struct domain *d, u8 bus, u8 devfn)
   12.53 +{
   12.54 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
   12.55 +
   12.56 +    if ( !iommu_enabled || !hd->platform_ops)
   12.57 +        return 0;
   12.58 +
   12.59 +    return hd->platform_ops->assign_device(d, bus, devfn);
   12.60 +}
   12.61 +
   12.62 +void iommu_domain_destroy(struct domain *d)
   12.63 +{
   12.64 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   12.65 +    uint32_t i;
   12.66 +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
   12.67 +    struct list_head *ioport_list, *digl_list, *tmp;
   12.68 +    struct g2m_ioport *ioport;
   12.69 +    struct dev_intx_gsi_link *digl;
   12.70 +
   12.71 +    if ( !iommu_enabled || !hd->platform_ops)
   12.72 +        return;
   12.73 +
   12.74 +    if ( hvm_irq_dpci != NULL )
   12.75 +    {
   12.76 +        for ( i = 0; i < NR_IRQS; i++ )
   12.77 +        {
   12.78 +            if ( !hvm_irq_dpci->mirq[i].valid )
   12.79 +                continue;
   12.80 +
   12.81 +            pirq_guest_unbind(d, i);
   12.82 +            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
   12.83 +
   12.84 +            list_for_each_safe ( digl_list, tmp,
   12.85 +                                 &hvm_irq_dpci->mirq[i].digl_list )
   12.86 +            {
   12.87 +                digl = list_entry(digl_list,
   12.88 +                                  struct dev_intx_gsi_link, list);
   12.89 +                list_del(&digl->list);
   12.90 +                xfree(digl);
   12.91 +            }
   12.92 +        }
   12.93 +
   12.94 +        d->arch.hvm_domain.irq.dpci = NULL;
   12.95 +        xfree(hvm_irq_dpci);
   12.96 +    }
   12.97 +
   12.98 +    if ( hd )
   12.99 +    {
  12.100 +        list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
  12.101 +        {
  12.102 +            ioport = list_entry(ioport_list, struct g2m_ioport, list);
  12.103 +            list_del(&ioport->list);
  12.104 +            xfree(ioport);
  12.105 +        }
  12.106 +    }
  12.107 +
  12.108 +    return hd->platform_ops->teardown(d);
  12.109 +}
  12.110 +
  12.111 +int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
  12.112 +{
  12.113 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
  12.114 +
  12.115 +    if ( !iommu_enabled || !hd->platform_ops)
  12.116 +        return 0;
  12.117 +
  12.118 +    return hd->platform_ops->map_page(d, gfn, mfn);
  12.119 +}
  12.120 +
  12.121 +int iommu_unmap_page(struct domain *d, unsigned long gfn)
  12.122 +{
  12.123 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
  12.124 +
  12.125 +    if ( !iommu_enabled || !hd->platform_ops)
  12.126 +        return 0;
  12.127 +
  12.128 +    return hd->platform_ops->unmap_page(d, gfn);
  12.129 +}
  12.130 +
  12.131 +void deassign_device(struct domain *d, u8 bus, u8 devfn)
  12.132 +{
  12.133 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
  12.134 +
  12.135 +    if ( !iommu_enabled || !hd->platform_ops)
  12.136 +        return;
  12.137 +
  12.138 +    return hd->platform_ops->reassign_device(d, dom0, bus, devfn);
  12.139 +}
    13.1 --- a/xen/drivers/passthrough/vtd/Makefile	Sun Mar 16 14:11:34 2008 +0000
    13.2 +++ b/xen/drivers/passthrough/vtd/Makefile	Mon Mar 17 10:45:24 2008 +0000
    13.3 @@ -1,6 +1,5 @@
    13.4  obj-y += iommu.o
    13.5  obj-y += dmar.o
    13.6  obj-y += utils.o
    13.7 -obj-y += io.o
    13.8  obj-y += qinval.o
    13.9  obj-y += intremap.o
    14.1 --- a/xen/drivers/passthrough/vtd/dmar.h	Sun Mar 16 14:11:34 2008 +0000
    14.2 +++ b/xen/drivers/passthrough/vtd/dmar.h	Mon Mar 17 10:45:24 2008 +0000
    14.3 @@ -22,7 +22,7 @@
    14.4  #define _DMAR_H_
    14.5  
    14.6  #include <xen/list.h>
    14.7 -#include <asm/iommu.h>
    14.8 +#include <xen/iommu.h>
    14.9  
   14.10  extern u8 dmar_host_address_width;
   14.11  
    15.1 --- a/xen/drivers/passthrough/vtd/extern.h	Sun Mar 16 14:11:34 2008 +0000
    15.2 +++ b/xen/drivers/passthrough/vtd/extern.h	Mon Mar 17 10:45:24 2008 +0000
    15.3 @@ -42,8 +42,6 @@ int queue_invalidate_iec(struct iommu *i
    15.4  int invalidate_sync(struct iommu *iommu);
    15.5  int iommu_flush_iec_global(struct iommu *iommu);
    15.6  int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx);
    15.7 -void print_iommu_regs(struct acpi_drhd_unit *drhd);
    15.8 -int vtd_hw_check(void);
    15.9  struct iommu * ioapic_to_iommu(unsigned int apic_id);
   15.10  struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
   15.11  void clear_fault_bits(struct iommu *iommu);
    16.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Sun Mar 16 14:11:34 2008 +0000
    16.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Mon Mar 17 10:45:24 2008 +0000
    16.3 @@ -18,28 +18,10 @@
    16.4   * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
    16.5   */
    16.6  
    16.7 -#include <xen/config.h>
    16.8 -#include <xen/lib.h>
    16.9 -#include <xen/init.h>
   16.10  #include <xen/irq.h>
   16.11 -#include <xen/delay.h>
   16.12  #include <xen/sched.h>
   16.13 -#include <xen/acpi.h>
   16.14 -#include <xen/keyhandler.h>
   16.15 -#include <xen/spinlock.h>
   16.16 -#include <asm/io.h>
   16.17 -#include <asm/mc146818rtc.h>
   16.18 -#include <asm/smp.h>
   16.19 -#include <asm/desc.h>
   16.20 -#include <mach_apic.h>
   16.21 -#include <io_ports.h>
   16.22 -
   16.23 -#include <xen/spinlock.h>
   16.24 -#include <xen/xmalloc.h>
   16.25 -#include <xen/domain_page.h>
   16.26 -#include <asm/delay.h>
   16.27 -#include <asm/string.h>
   16.28 -#include <asm/iommu.h>
   16.29 +#include <xen/iommu.h>
   16.30 +#include "iommu.h"
   16.31  #include "dmar.h"
   16.32  #include "vtd.h"
   16.33  #include "../pci-direct.h"
   16.34 @@ -172,7 +154,7 @@ io_apic_read_remap_rte(
   16.35      struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
   16.36      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   16.37  
   16.38 -    if ( !iommu || !(ir_ctrl->iremap) )
   16.39 +    if ( !iommu || !ir_ctrl || !(ir_ctrl->iremap) )
   16.40      {
   16.41          *IO_APIC_BASE(apic) = reg;
   16.42          return *(IO_APIC_BASE(apic)+4);
   16.43 @@ -218,7 +200,7 @@ io_apic_write_remap_rte(
   16.44      struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
   16.45      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   16.46  
   16.47 -    if ( !iommu || !(ir_ctrl->iremap) )
   16.48 +    if ( !iommu || !ir_ctrl || !(ir_ctrl->iremap) )
   16.49      {
   16.50          *IO_APIC_BASE(apic) = reg;
   16.51          *(IO_APIC_BASE(apic)+4) = value;
    17.1 --- a/xen/drivers/passthrough/vtd/io.c	Sun Mar 16 14:11:34 2008 +0000
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,296 +0,0 @@
    17.4 -/*
    17.5 - * Copyright (c) 2006, Intel Corporation.
    17.6 - *
    17.7 - * This program is free software; you can redistribute it and/or modify it
    17.8 - * under the terms and conditions of the GNU General Public License,
    17.9 - * version 2, as published by the Free Software Foundation.
   17.10 - *
   17.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   17.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   17.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   17.14 - * more details.
   17.15 - *
   17.16 - * You should have received a copy of the GNU General Public License along with
   17.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   17.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   17.19 - *
   17.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   17.21 - * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
   17.22 - */
   17.23 -
   17.24 -#include <xen/init.h>
   17.25 -#include <xen/config.h>
   17.26 -#include <xen/init.h>
   17.27 -#include <xen/mm.h>
   17.28 -#include <xen/lib.h>
   17.29 -#include <xen/errno.h>
   17.30 -#include <xen/trace.h>
   17.31 -#include <xen/event.h>
   17.32 -#include <xen/hypercall.h>
   17.33 -#include <asm/current.h>
   17.34 -#include <asm/cpufeature.h>
   17.35 -#include <asm/processor.h>
   17.36 -#include <asm/msr.h>
   17.37 -#include <asm/apic.h>
   17.38 -#include <asm/paging.h>
   17.39 -#include <asm/shadow.h>
   17.40 -#include <asm/p2m.h>
   17.41 -#include <asm/hvm/hvm.h>
   17.42 -#include <asm/hvm/support.h>
   17.43 -#include <asm/hvm/vpt.h>
   17.44 -#include <asm/hvm/vpic.h>
   17.45 -#include <asm/hvm/vlapic.h>
   17.46 -#include <public/sched.h>
   17.47 -#include <xen/iocap.h>
   17.48 -#include <public/hvm/ioreq.h>
   17.49 -#include <public/domctl.h>
   17.50 -
   17.51 -static void pt_irq_time_out(void *data)
   17.52 -{
   17.53 -    struct hvm_mirq_dpci_mapping *irq_map = data;
   17.54 -    unsigned int guest_gsi, machine_gsi = 0;
   17.55 -    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
   17.56 -    struct dev_intx_gsi_link *digl;
   17.57 -    uint32_t device, intx;
   17.58 -
   17.59 -    list_for_each_entry ( digl, &irq_map->digl_list, list )
   17.60 -    {
   17.61 -        guest_gsi = digl->gsi;
   17.62 -        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
   17.63 -        device = digl->device;
   17.64 -        intx = digl->intx;
   17.65 -        hvm_pci_intx_deassert(irq_map->dom, device, intx);
   17.66 -    }
   17.67 -
   17.68 -    clear_bit(machine_gsi, dpci->dirq_mask);
   17.69 -    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
   17.70 -    spin_lock(&dpci->dirq_lock);
   17.71 -    dpci->mirq[machine_gsi].pending = 0;
   17.72 -    spin_unlock(&dpci->dirq_lock);
   17.73 -    pirq_guest_eoi(irq_map->dom, machine_gsi);
   17.74 -}
   17.75 -
   17.76 -int pt_irq_create_bind_vtd(
   17.77 -    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
   17.78 -{
   17.79 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   17.80 -    uint32_t machine_gsi, guest_gsi;
   17.81 -    uint32_t device, intx, link;
   17.82 -    struct dev_intx_gsi_link *digl;
   17.83 -
   17.84 -    if ( hvm_irq_dpci == NULL )
   17.85 -    {
   17.86 -        hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
   17.87 -        if ( hvm_irq_dpci == NULL )
   17.88 -            return -ENOMEM;
   17.89 -
   17.90 -        memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
   17.91 -        spin_lock_init(&hvm_irq_dpci->dirq_lock);
   17.92 -        for ( int i = 0; i < NR_IRQS; i++ )
   17.93 -            INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
   17.94 -
   17.95 -        if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
   17.96 -                     0, (unsigned long)hvm_irq_dpci) != 0 )
   17.97 -            xfree(hvm_irq_dpci);
   17.98 -
   17.99 -        hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.100 -    }
  17.101 -
  17.102 -    machine_gsi = pt_irq_bind->machine_irq;
  17.103 -    device = pt_irq_bind->u.pci.device;
  17.104 -    intx = pt_irq_bind->u.pci.intx;
  17.105 -    guest_gsi = hvm_pci_intx_gsi(device, intx);
  17.106 -    link = hvm_pci_intx_link(device, intx);
  17.107 -    hvm_irq_dpci->link_cnt[link]++;
  17.108 -
  17.109 -    digl = xmalloc(struct dev_intx_gsi_link);
  17.110 -    if ( !digl )
  17.111 -        return -ENOMEM;
  17.112 -
  17.113 -    digl->device = device;
  17.114 -    digl->intx = intx;
  17.115 -    digl->gsi = guest_gsi;
  17.116 -    digl->link = link;
  17.117 -    list_add_tail(&digl->list,
  17.118 -                  &hvm_irq_dpci->mirq[machine_gsi].digl_list);
  17.119 -
  17.120 -    hvm_irq_dpci->girq[guest_gsi].valid = 1;
  17.121 -    hvm_irq_dpci->girq[guest_gsi].device = device;
  17.122 -    hvm_irq_dpci->girq[guest_gsi].intx = intx;
  17.123 -    hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
  17.124 -
  17.125 -    /* Bind the same mirq once in the same domain */
  17.126 -    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
  17.127 -    {
  17.128 -        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
  17.129 -        hvm_irq_dpci->mirq[machine_gsi].dom = d;
  17.130 -
  17.131 -        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
  17.132 -                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
  17.133 -        /* Deal with gsi for legacy devices */
  17.134 -        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
  17.135 -    }
  17.136 -
  17.137 -    gdprintk(XENLOG_INFO VTDPREFIX,
  17.138 -             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
  17.139 -             machine_gsi, device, intx);
  17.140 -    return 0;
  17.141 -}
  17.142 -
  17.143 -int pt_irq_destroy_bind_vtd(
  17.144 -    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
  17.145 -{
  17.146 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.147 -    uint32_t machine_gsi, guest_gsi;
  17.148 -    uint32_t device, intx, link;
  17.149 -    struct list_head *digl_list, *tmp;
  17.150 -    struct dev_intx_gsi_link *digl;
  17.151 -
  17.152 -    if ( hvm_irq_dpci == NULL )
  17.153 -        return 0;
  17.154 -
  17.155 -    machine_gsi = pt_irq_bind->machine_irq;
  17.156 -    device = pt_irq_bind->u.pci.device;
  17.157 -    intx = pt_irq_bind->u.pci.intx;
  17.158 -    guest_gsi = hvm_pci_intx_gsi(device, intx);
  17.159 -    link = hvm_pci_intx_link(device, intx);
  17.160 -    hvm_irq_dpci->link_cnt[link]--;
  17.161 -
  17.162 -    gdprintk(XENLOG_INFO,
  17.163 -            "pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n",
  17.164 -            machine_gsi, guest_gsi, device, intx);
  17.165 -    memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping));
  17.166 -
  17.167 -    /* clear the mirq info */
  17.168 -    if ( hvm_irq_dpci->mirq[machine_gsi].valid )
  17.169 -    {
  17.170 -
  17.171 -        list_for_each_safe ( digl_list, tmp,
  17.172 -                &hvm_irq_dpci->mirq[machine_gsi].digl_list )
  17.173 -        {
  17.174 -            digl = list_entry(digl_list,
  17.175 -                    struct dev_intx_gsi_link, list);
  17.176 -            if ( digl->device == device &&
  17.177 -                 digl->intx   == intx &&
  17.178 -                 digl->link   == link &&
  17.179 -                 digl->gsi    == guest_gsi )
  17.180 -            {
  17.181 -                list_del(&digl->list);
  17.182 -                xfree(digl);
  17.183 -            }
  17.184 -        }
  17.185 -
  17.186 -        if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
  17.187 -        {
  17.188 -            pirq_guest_unbind(d, machine_gsi);
  17.189 -            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  17.190 -            hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
  17.191 -            hvm_irq_dpci->mirq[machine_gsi].valid = 0;
  17.192 -        }
  17.193 -    }
  17.194 -
  17.195 -    gdprintk(XENLOG_INFO,
  17.196 -             "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
  17.197 -             machine_gsi, device, intx);
  17.198 -
  17.199 -    return 0;
  17.200 -}
  17.201 -
  17.202 -int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
  17.203 -{
  17.204 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  17.205 -
  17.206 -    if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
  17.207 -         !hvm_irq->dpci->mirq[mirq].valid )
  17.208 -        return 0;
  17.209 -
  17.210 -    /*
  17.211 -     * Set a timer here to avoid situations where the IRQ line is shared, and
  17.212 -     * the device belonging to the pass-through guest is not yet active. In
  17.213 -     * this case the guest may not pick up the interrupt (e.g., masked at the
  17.214 -     * PIC) and we need to detect that.
  17.215 -     */
  17.216 -    set_bit(mirq, hvm_irq->dpci->dirq_mask);
  17.217 -    set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)],
  17.218 -              NOW() + PT_IRQ_TIME_OUT);
  17.219 -    vcpu_kick(d->vcpu[0]);
  17.220 -
  17.221 -    return 1;
  17.222 -}
  17.223 -
  17.224 -static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
  17.225 -{
  17.226 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  17.227 -    struct hvm_irq_dpci *dpci = hvm_irq->dpci;
  17.228 -    struct dev_intx_gsi_link *digl, *tmp;
  17.229 -    int i;
  17.230 -
  17.231 -    ASSERT(isairq < NR_ISAIRQS);
  17.232 -    if ( !iommu_enabled || !dpci ||
  17.233 -         !test_bit(isairq, dpci->isairq_map) )
  17.234 -        return;
  17.235 -
  17.236 -    /* Multiple mirq may be mapped to one isa irq */
  17.237 -    for ( i = 0; i < NR_IRQS; i++ )
  17.238 -    {
  17.239 -        if ( !dpci->mirq[i].valid )
  17.240 -            continue;
  17.241 -
  17.242 -        list_for_each_entry_safe ( digl, tmp,
  17.243 -            &dpci->mirq[i].digl_list, list )
  17.244 -        {
  17.245 -            if ( hvm_irq->pci_link.route[digl->link] == isairq )
  17.246 -            {
  17.247 -                hvm_pci_intx_deassert(d, digl->device, digl->intx);
  17.248 -                spin_lock(&dpci->dirq_lock);
  17.249 -                if ( --dpci->mirq[i].pending == 0 )
  17.250 -                {
  17.251 -                    spin_unlock(&dpci->dirq_lock);
  17.252 -                    gdprintk(XENLOG_INFO VTDPREFIX,
  17.253 -                             "hvm_dpci_isairq_eoi:: mirq = %x\n", i);
  17.254 -                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
  17.255 -                    pirq_guest_eoi(d, i);
  17.256 -                }
  17.257 -                else
  17.258 -                    spin_unlock(&dpci->dirq_lock);
  17.259 -            }
  17.260 -        }
  17.261 -    }
  17.262 -}
  17.263 -
  17.264 -void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
  17.265 -                  union vioapic_redir_entry *ent)
  17.266 -{
  17.267 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.268 -    uint32_t device, intx, machine_gsi;
  17.269 -
  17.270 -    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
  17.271 -         (guest_gsi >= NR_ISAIRQS &&
  17.272 -          !hvm_irq_dpci->girq[guest_gsi].valid) )
  17.273 -        return;
  17.274 -
  17.275 -    if ( guest_gsi < NR_ISAIRQS )
  17.276 -    {
  17.277 -        hvm_dpci_isairq_eoi(d, guest_gsi);
  17.278 -        return;
  17.279 -    }
  17.280 -
  17.281 -    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
  17.282 -    device = hvm_irq_dpci->girq[guest_gsi].device;
  17.283 -    intx = hvm_irq_dpci->girq[guest_gsi].intx;
  17.284 -    hvm_pci_intx_deassert(d, device, intx);
  17.285 -
  17.286 -    spin_lock(&hvm_irq_dpci->dirq_lock);
  17.287 -    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
  17.288 -    {
  17.289 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  17.290 -
  17.291 -        gdprintk(XENLOG_INFO VTDPREFIX,
  17.292 -                 "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
  17.293 -        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  17.294 -        if ( (ent == NULL) || !ent->fields.mask )
  17.295 -            pirq_guest_eoi(d, machine_gsi);
  17.296 -    }
  17.297 -    else
  17.298 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  17.299 -}
    18.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Sun Mar 16 14:11:34 2008 +0000
    18.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Mon Mar 17 10:45:24 2008 +0000
    18.3 @@ -19,17 +19,12 @@
    18.4   * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
    18.5   */
    18.6  
    18.7 -#include <xen/init.h>
    18.8  #include <xen/irq.h>
    18.9 -#include <xen/spinlock.h>
   18.10  #include <xen/sched.h>
   18.11  #include <xen/xmalloc.h>
   18.12  #include <xen/domain_page.h>
   18.13 -#include <asm/delay.h>
   18.14 -#include <asm/string.h>
   18.15 -#include <asm/mm.h>
   18.16 -#include <asm/iommu.h>
   18.17 -#include <asm/hvm/vmx/intel-iommu.h>
   18.18 +#include <xen/iommu.h>
   18.19 +#include "iommu.h"
   18.20  #include "dmar.h"
   18.21  #include "../pci-direct.h"
   18.22  #include "../pci_regs.h"
   18.23 @@ -74,6 +69,93 @@ static void iommu_domid_release(struct d
   18.24      }
   18.25  }
   18.26  
   18.27 +static struct intel_iommu *alloc_intel_iommu(void)
   18.28 +{
   18.29 +    struct intel_iommu *intel;
   18.30 +
   18.31 +    intel = xmalloc(struct intel_iommu);
   18.32 +    if ( !intel )
   18.33 +    {
   18.34 +        gdprintk(XENLOG_ERR VTDPREFIX,
   18.35 +                 "Allocate intel_iommu failed.\n");
   18.36 +        return NULL;
   18.37 +    }
   18.38 +    memset(intel, 0, sizeof(struct intel_iommu));
   18.39 +
   18.40 +    spin_lock_init(&intel->qi_ctrl.qinval_lock);
   18.41 +    spin_lock_init(&intel->qi_ctrl.qinval_poll_lock);
   18.42 +
   18.43 +    spin_lock_init(&intel->ir_ctrl.iremap_lock);
   18.44 +
   18.45 +    return intel;
   18.46 +}
   18.47 +
   18.48 +static void free_intel_iommu(struct intel_iommu *intel)
   18.49 +{
   18.50 +    if ( intel )
   18.51 +    {
   18.52 +        xfree(intel);
   18.53 +        intel = NULL;
   18.54 +    }
   18.55 +}
   18.56 +
   18.57 +struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu)
   18.58 +{
   18.59 +    if ( !iommu )
   18.60 +        return NULL;
   18.61 +
   18.62 +    if ( !iommu->intel )
   18.63 +    {
   18.64 +        iommu->intel = alloc_intel_iommu();
   18.65 +        if ( !iommu->intel )
   18.66 +        {
   18.67 +            dprintk(XENLOG_ERR VTDPREFIX,
   18.68 +                    "iommu_qi_ctrl: Allocate iommu->intel failed.\n");
   18.69 +            return NULL;
   18.70 +        }
   18.71 +    }
   18.72 +
   18.73 +    return &(iommu->intel->qi_ctrl);
   18.74 +}
   18.75 +
   18.76 +struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu)
   18.77 +{
   18.78 +    if ( !iommu )
   18.79 +        return NULL;
   18.80 +
   18.81 +    if ( !iommu->intel )
   18.82 +    {
   18.83 +        iommu->intel = alloc_intel_iommu();
   18.84 +        if ( !iommu->intel )
   18.85 +        {
   18.86 +            dprintk(XENLOG_ERR VTDPREFIX,
   18.87 +                    "iommu_ir_ctrl: Allocate iommu->intel failed.\n");
   18.88 +            return NULL;
   18.89 +        }
   18.90 +    }
   18.91 +
   18.92 +    return &(iommu->intel->ir_ctrl);
   18.93 +}
   18.94 +
   18.95 +struct iommu_flush *iommu_get_flush(struct iommu *iommu)
   18.96 +{
   18.97 +    if ( !iommu )
   18.98 +        return NULL;
   18.99 +
  18.100 +    if ( !iommu->intel )
  18.101 +    {
  18.102 +        iommu->intel = alloc_intel_iommu();
  18.103 +        if ( !iommu->intel )
  18.104 +        {
  18.105 +            dprintk(XENLOG_ERR VTDPREFIX,
  18.106 +                    "iommu_get_flush: Allocate iommu->intel failed.\n");
  18.107 +            return NULL;
  18.108 +        }
  18.109 +    }
  18.110 +
  18.111 +    return &(iommu->intel->flush);
  18.112 +}
  18.113 +
  18.114  unsigned int x86_clflush_size;
  18.115  void clflush_cache_range(void *adr, int size)
  18.116  {
  18.117 @@ -756,40 +838,34 @@ static int iommu_page_fault_do_one(struc
  18.118              PCI_SLOT(source_id & 0xFF), PCI_FUNC(source_id & 0xFF), addr,
  18.119              fault_reason, iommu->reg);
  18.120  
  18.121 -    if (fault_reason < 0x20) 
  18.122 +    if ( fault_reason < 0x20 )
  18.123          print_vtd_entries(current->domain, iommu, (source_id >> 8),
  18.124 -                          (source_id & 0xff), (addr >> PAGE_SHIFT)); 
  18.125 +                          (source_id & 0xff), (addr >> PAGE_SHIFT));
  18.126  
  18.127      return 0;
  18.128  }
  18.129  
  18.130  static void iommu_fault_status(u32 fault_status)
  18.131  {
  18.132 -    if (fault_status & DMA_FSTS_PFO)
  18.133 +    if ( fault_status & DMA_FSTS_PFO )
  18.134          dprintk(XENLOG_ERR VTDPREFIX,
  18.135              "iommu_fault_status: Fault Overflow\n");
  18.136 -    else
  18.137 -    if (fault_status & DMA_FSTS_PPF)
  18.138 +    else if ( fault_status & DMA_FSTS_PPF )
  18.139          dprintk(XENLOG_ERR VTDPREFIX,
  18.140              "iommu_fault_status: Primary Pending Fault\n");
  18.141 -    else
  18.142 -    if (fault_status & DMA_FSTS_AFO)
  18.143 +    else if ( fault_status & DMA_FSTS_AFO )
  18.144          dprintk(XENLOG_ERR VTDPREFIX,
  18.145              "iommu_fault_status: Advanced Fault Overflow\n");
  18.146 -    else
  18.147 -    if (fault_status & DMA_FSTS_APF)
  18.148 +    else if ( fault_status & DMA_FSTS_APF )
  18.149          dprintk(XENLOG_ERR VTDPREFIX,
  18.150              "iommu_fault_status: Advanced Pending Fault\n");
  18.151 -    else
  18.152 -    if (fault_status & DMA_FSTS_IQE)
  18.153 +    else if ( fault_status & DMA_FSTS_IQE )
  18.154          dprintk(XENLOG_ERR VTDPREFIX,
  18.155              "iommu_fault_status: Invalidation Queue Error\n");
  18.156 -    else
  18.157 -    if (fault_status & DMA_FSTS_ICE)
  18.158 +    else if ( fault_status & DMA_FSTS_ICE )
  18.159          dprintk(XENLOG_ERR VTDPREFIX,
  18.160              "iommu_fault_status: Invalidation Completion Error\n");
  18.161 -    else
  18.162 -    if (fault_status & DMA_FSTS_ITE)
  18.163 +    else if ( fault_status & DMA_FSTS_ITE )
  18.164          dprintk(XENLOG_ERR VTDPREFIX,
  18.165              "iommu_fault_status: Invalidation Time-out Error\n");
  18.166  }
  18.167 @@ -976,8 +1052,6 @@ struct iommu *iommu_alloc(void *hw_data)
  18.168  {
  18.169      struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
  18.170      struct iommu *iommu;
  18.171 -    struct qi_ctrl *qi_ctrl;
  18.172 -    struct ir_ctrl *ir_ctrl;
  18.173  
  18.174      if ( nr_iommus > MAX_IOMMUS )
  18.175      {
  18.176 @@ -1014,12 +1088,7 @@ struct iommu *iommu_alloc(void *hw_data)
  18.177      spin_lock_init(&iommu->lock);
  18.178      spin_lock_init(&iommu->register_lock);
  18.179  
  18.180 -    qi_ctrl = iommu_qi_ctrl(iommu);
  18.181 -    spin_lock_init(&qi_ctrl->qinval_lock);
  18.182 -    spin_lock_init(&qi_ctrl->qinval_poll_lock);
  18.183 -
  18.184 -    ir_ctrl = iommu_ir_ctrl(iommu);
  18.185 -    spin_lock_init(&ir_ctrl->iremap_lock);
  18.186 +    iommu->intel = alloc_intel_iommu();
  18.187  
  18.188      drhd->iommu = iommu;
  18.189      return iommu;
  18.190 @@ -1036,6 +1105,7 @@ static void free_iommu(struct iommu *iom
  18.191          free_xenheap_page((void *)iommu->root_entry);
  18.192      if ( iommu->reg )
  18.193          iounmap(iommu->reg);
  18.194 +    free_intel_iommu(iommu->intel);
  18.195      free_irq(iommu->vector);
  18.196      xfree(iommu);
  18.197  }
  18.198 @@ -1063,7 +1133,7 @@ int intel_iommu_domain_init(struct domai
  18.199          iommu = drhd->iommu ? : iommu_alloc(drhd);
  18.200  
  18.201      /* calculate AGAW */
  18.202 -    if (guest_width > cap_mgaw(iommu->cap))
  18.203 +    if ( guest_width > cap_mgaw(iommu->cap) )
  18.204          guest_width = cap_mgaw(iommu->cap);
  18.205      adjust_width = guestwidth_to_adjustwidth(guest_width);
  18.206      agaw = width_to_agaw(adjust_width);
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/drivers/passthrough/vtd/iommu.h	Mon Mar 17 10:45:24 2008 +0000
    19.3 @@ -0,0 +1,454 @@
    19.4 +/*
    19.5 + * Copyright (c) 2006, Intel Corporation.
    19.6 + *
    19.7 + * This program is free software; you can redistribute it and/or modify it
    19.8 + * under the terms and conditions of the GNU General Public License,
    19.9 + * version 2, as published by the Free Software Foundation.
   19.10 + *
   19.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   19.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   19.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   19.14 + * more details.
   19.15 + *
   19.16 + * You should have received a copy of the GNU General Public License along with
   19.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   19.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   19.19 + *
   19.20 + * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   19.21 + */
   19.22 +
   19.23 +#ifndef _INTEL_IOMMU_H_
   19.24 +#define _INTEL_IOMMU_H_
   19.25 +
   19.26 +#include <xen/types.h>
   19.27 +
   19.28 +/*
   19.29 + * Intel IOMMU register specification per version 1.0 public spec.
   19.30 + */
   19.31 +
   19.32 +#define    DMAR_VER_REG    0x0    /* Arch version supported by this IOMMU */
   19.33 +#define    DMAR_CAP_REG    0x8    /* Hardware supported capabilities */
   19.34 +#define    DMAR_ECAP_REG    0x10    /* Extended capabilities supported */
   19.35 +#define    DMAR_GCMD_REG    0x18    /* Global command register */
   19.36 +#define    DMAR_GSTS_REG    0x1c    /* Global status register */
   19.37 +#define    DMAR_RTADDR_REG    0x20    /* Root entry table */
   19.38 +#define    DMAR_CCMD_REG    0x28    /* Context command reg */
   19.39 +#define    DMAR_FSTS_REG    0x34    /* Fault Status register */
   19.40 +#define    DMAR_FECTL_REG    0x38    /* Fault control register */
   19.41 +#define    DMAR_FEDATA_REG    0x3c    /* Fault event interrupt data register */
   19.42 +#define    DMAR_FEADDR_REG    0x40    /* Fault event interrupt addr register */
   19.43 +#define    DMAR_FEUADDR_REG 0x44    /* Upper address register */
   19.44 +#define    DMAR_AFLOG_REG    0x58    /* Advanced Fault control */
   19.45 +#define    DMAR_PMEN_REG    0x64    /* Enable Protected Memory Region */
   19.46 +#define    DMAR_PLMBASE_REG 0x68    /* PMRR Low addr */
   19.47 +#define    DMAR_PLMLIMIT_REG 0x6c    /* PMRR low limit */
   19.48 +#define    DMAR_PHMBASE_REG 0x70    /* pmrr high base addr */
   19.49 +#define    DMAR_PHMLIMIT_REG 0x78    /* pmrr high limit */
   19.50 +#define    DMAR_IQH_REG    0x80    /* invalidation queue head */
   19.51 +#define    DMAR_IQT_REG    0x88    /* invalidation queue tail */
   19.52 +#define    DMAR_IQA_REG    0x90    /* invalidation queue addr */
   19.53 +#define    DMAR_IRTA_REG   0xB8    /* intr remap */
   19.54 +
   19.55 +#define OFFSET_STRIDE        (9)
   19.56 +#define dmar_readl(dmar, reg) readl(dmar + reg)
   19.57 +#define dmar_writel(dmar, reg, val) writel(val, dmar + reg)
   19.58 +#define dmar_readq(dmar, reg) ({ \
   19.59 +        u32 lo, hi; \
   19.60 +        lo = dmar_readl(dmar, reg); \
   19.61 +        hi = dmar_readl(dmar, reg + 4); \
   19.62 +        (((u64) hi) << 32) + lo; })
   19.63 +#define dmar_writeq(dmar, reg, val) do {\
   19.64 +        dmar_writel(dmar, reg, (u32)val); \
   19.65 +        dmar_writel(dmar, reg + 4, (u32)((u64) val >> 32)); \
   19.66 +    } while (0)
   19.67 +
   19.68 +#define VER_MAJOR(v)        (((v) & 0xf0) >> 4)
   19.69 +#define VER_MINOR(v)        ((v) & 0x0f)
   19.70 +
   19.71 +/*
   19.72 + * Decoding Capability Register
   19.73 + */
   19.74 +#define cap_read_drain(c)    (((c) >> 55) & 1)
   19.75 +#define cap_write_drain(c)    (((c) >> 54) & 1)
   19.76 +#define cap_max_amask_val(c)    (((c) >> 48) & 0x3f)
   19.77 +#define cap_num_fault_regs(c)    ((((c) >> 40) & 0xff) + 1)
   19.78 +#define cap_pgsel_inv(c)       (((c) >> 39) & 1)
   19.79 +
   19.80 +#define cap_super_page_val(c)    (((c) >> 34) & 0xf)
   19.81 +#define cap_super_offset(c)    (((find_first_bit(&cap_super_page_val(c), 4)) \
   19.82 +                    * OFFSET_STRIDE) + 21)
   19.83 +
   19.84 +#define cap_fault_reg_offset(c)    ((((c) >> 24) & 0x3ff) * 16)
   19.85 +
   19.86 +#define cap_isoch(c)        (((c) >> 23) & 1)
   19.87 +#define cap_qos(c)        (((c) >> 22) & 1)
   19.88 +#define cap_mgaw(c)        ((((c) >> 16) & 0x3f) + 1)
   19.89 +#define cap_sagaw(c)        (((c) >> 8) & 0x1f)
   19.90 +#define cap_caching_mode(c)    (((c) >> 7) & 1)
   19.91 +#define cap_phmr(c)        (((c) >> 6) & 1)
   19.92 +#define cap_plmr(c)        (((c) >> 5) & 1)
   19.93 +#define cap_rwbf(c)        (((c) >> 4) & 1)
   19.94 +#define cap_afl(c)        (((c) >> 3) & 1)
   19.95 +#define cap_ndoms(c)        (1 << (4 + 2 * ((c) & 0x7)))
   19.96 +
   19.97 +/*
   19.98 + * Extended Capability Register
   19.99 + */
  19.100 +
  19.101 +#define ecap_niotlb_iunits(e)    ((((e) >> 24) & 0xff) + 1)
  19.102 +#define ecap_iotlb_offset(e)     ((((e) >> 8) & 0x3ff) * 16)
  19.103 +#define ecap_coherent(e)         ((e >> 0) & 0x1)
  19.104 +#define ecap_queued_inval(e)     ((e >> 1) & 0x1)
  19.105 +#define ecap_dev_iotlb(e)        ((e >> 2) & 0x1)
  19.106 +#define ecap_intr_remap(e)       ((e >> 3) & 0x1)
  19.107 +#define ecap_ext_intr(e)         ((e >> 4) & 0x1)
  19.108 +#define ecap_cache_hints(e)      ((e >> 5) & 0x1)
  19.109 +#define ecap_pass_thru(e)        ((e >> 6) & 0x1)
  19.110 +
  19.111 +/* IOTLB_REG */
  19.112 +#define DMA_TLB_FLUSH_GRANU_OFFSET  60
  19.113 +#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
  19.114 +#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
  19.115 +#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
  19.116 +#define DMA_TLB_IIRG(x) (((x) >> 60) & 7) 
  19.117 +#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
  19.118 +#define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
  19.119 +
  19.120 +#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
  19.121 +#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
  19.122 +#define DMA_TLB_IVT (((u64)1) << 63)
  19.123 +
  19.124 +#define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
  19.125 +#define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
  19.126 +
  19.127 +/* GCMD_REG */
  19.128 +#define DMA_GCMD_TE     (((u64)1) << 31)
  19.129 +#define DMA_GCMD_SRTP   (((u64)1) << 30)
  19.130 +#define DMA_GCMD_SFL    (((u64)1) << 29)
  19.131 +#define DMA_GCMD_EAFL   (((u64)1) << 28)
  19.132 +#define DMA_GCMD_WBF    (((u64)1) << 27)
  19.133 +#define DMA_GCMD_QIE    (((u64)1) << 26)
  19.134 +#define DMA_GCMD_IRE    (((u64)1) << 25)
  19.135 +#define DMA_GCMD_SIRTP  (((u64)1) << 24)
  19.136 +#define DMA_GCMD_CFI    (((u64)1) << 23)
  19.137 +
  19.138 +/* GSTS_REG */
  19.139 +#define DMA_GSTS_TES    (((u64)1) << 31)
  19.140 +#define DMA_GSTS_RTPS   (((u64)1) << 30)
  19.141 +#define DMA_GSTS_FLS    (((u64)1) << 29)
  19.142 +#define DMA_GSTS_AFLS   (((u64)1) << 28)
  19.143 +#define DMA_GSTS_WBFS   (((u64)1) << 27)
  19.144 +#define DMA_GSTS_QIES   (((u64)1) <<26)
  19.145 +#define DMA_GSTS_IRES   (((u64)1) <<25)
  19.146 +#define DMA_GSTS_SIRTPS (((u64)1) << 24)
  19.147 +#define DMA_GSTS_CFIS   (((u64)1) <<23)
  19.148 +
  19.149 +/* PMEN_REG */
  19.150 +#define DMA_PMEN_EPM    (((u32)1) << 31)
  19.151 +#define DMA_PMEN_PRS    (((u32)1) << 0)
  19.152 +
  19.153 +/* CCMD_REG */
  19.154 +#define DMA_CCMD_INVL_GRANU_OFFSET  61
  19.155 +#define DMA_CCMD_ICC   (((u64)1) << 63)
  19.156 +#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
  19.157 +#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
  19.158 +#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
  19.159 +#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
  19.160 +#define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
  19.161 +#define DMA_CCMD_MASK_NOBIT 0
  19.162 +#define DMA_CCMD_MASK_1BIT 1
  19.163 +#define DMA_CCMD_MASK_2BIT 2
  19.164 +#define DMA_CCMD_MASK_3BIT 3
  19.165 +#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
  19.166 +#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
  19.167 +
  19.168 +#define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
  19.169 +
  19.170 +/* FECTL_REG */
  19.171 +#define DMA_FECTL_IM (((u64)1) << 31)
  19.172 +
  19.173 +/* FSTS_REG */
  19.174 +#define DMA_FSTS_PFO ((u64)1 << 0)
  19.175 +#define DMA_FSTS_PPF ((u64)1 << 1)
  19.176 +#define DMA_FSTS_AFO ((u64)1 << 2)
  19.177 +#define DMA_FSTS_APF ((u64)1 << 3)
  19.178 +#define DMA_FSTS_IQE ((u64)1 << 4)
  19.179 +#define DMA_FSTS_ICE ((u64)1 << 5)
  19.180 +#define DMA_FSTS_ITE ((u64)1 << 6)
  19.181 +#define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
  19.182 +#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
  19.183 +
  19.184 +/* FRCD_REG, 32 bits access */
  19.185 +#define DMA_FRCD_F (((u64)1) << 31)
  19.186 +#define dma_frcd_type(d) ((d >> 30) & 1)
  19.187 +#define dma_frcd_fault_reason(c) (c & 0xff)
  19.188 +#define dma_frcd_source_id(c) (c & 0xffff)
  19.189 +#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
  19.190 +
  19.191 +/*
  19.192 + * 0: Present
  19.193 + * 1-11: Reserved
  19.194 + * 12-63: Context Ptr (12 - (haw-1))
  19.195 + * 64-127: Reserved
  19.196 + */
  19.197 +struct root_entry {
  19.198 +    u64    val;
  19.199 +    u64    rsvd1;
  19.200 +};
  19.201 +#define root_present(root)    ((root).val & 1)
  19.202 +#define set_root_present(root) do {(root).val |= 1;} while(0)
  19.203 +#define get_context_addr(root) ((root).val & PAGE_MASK_4K)
  19.204 +#define set_root_value(root, value) \
  19.205 +    do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
  19.206 +
  19.207 +struct context_entry {
  19.208 +    u64 lo;
  19.209 +    u64 hi;
  19.210 +};
  19.211 +#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
  19.212 +#define context_present(c) ((c).lo & 1)
  19.213 +#define context_fault_disable(c) (((c).lo >> 1) & 1)
  19.214 +#define context_translation_type(c) (((c).lo >> 2) & 3)
  19.215 +#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
  19.216 +#define context_address_width(c) ((c).hi &  7)
  19.217 +#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
  19.218 +
  19.219 +#define context_set_present(c) do {(c).lo |= 1;} while(0)
  19.220 +#define context_clear_present(c) do {(c).lo &= ~1;} while(0)
  19.221 +#define context_set_fault_enable(c) \
  19.222 +    do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
  19.223 +
  19.224 +#define context_set_translation_type(c, val) do { \
  19.225 +        (c).lo &= (((u64)-1) << 4) | 3; \
  19.226 +        (c).lo |= (val & 3) << 2; \
  19.227 +    } while(0)
  19.228 +#define CONTEXT_TT_MULTI_LEVEL 0
  19.229 +#define CONTEXT_TT_DEV_IOTLB   1
  19.230 +#define CONTEXT_TT_PASS_THRU   2
  19.231 +
  19.232 +#define context_set_address_root(c, val) \
  19.233 +    do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
  19.234 +#define context_set_address_width(c, val) \
  19.235 +    do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
  19.236 +#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
  19.237 +
  19.238 +/* page table handling */
  19.239 +#define LEVEL_STRIDE       (9)
  19.240 +#define LEVEL_MASK         ((1 << LEVEL_STRIDE) - 1)
  19.241 +#define agaw_to_level(val) ((val) + 2)
  19.242 +#define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
  19.243 +#define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)
  19.244 +#define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
  19.245 +#define address_level_offset(addr, level) \
  19.246 +            ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
  19.247 +#define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
  19.248 +#define level_size(l) (1 << level_to_offset_bits(l))
  19.249 +#define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
  19.250 +
  19.251 +/*
  19.252 + * 0: readable
  19.253 + * 1: writable
  19.254 + * 2-6: reserved
  19.255 + * 7: super page
  19.256 + * 8-11: available
  19.257 + * 12-63: Host physcial address
  19.258 + */
  19.259 +struct dma_pte {
  19.260 +    u64 val;
  19.261 +};
  19.262 +#define dma_clear_pte(p)    do {(p).val = 0;} while(0)
  19.263 +#define dma_set_pte_readable(p) do {(p).val |= 1;} while(0)
  19.264 +#define dma_set_pte_writable(p) do {(p).val |= 2;} while(0)
  19.265 +#define dma_set_pte_superpage(p) do {(p).val |= 8;} while(0)
  19.266 +#define dma_set_pte_prot(p, prot) do { (p).val = (((p).val >> 2) << 2) | ((prot) & 3);} while (0)
  19.267 +#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
  19.268 +#define dma_set_pte_addr(p, addr) do {(p).val |= ((addr) >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;} while(0)
  19.269 +#define DMA_PTE_READ (1)
  19.270 +#define DMA_PTE_WRITE (2)
  19.271 +#define dma_pte_present(p) (((p).val & 3) != 0)
  19.272 +
  19.273 +/* interrupt remap entry */
  19.274 +struct iremap_entry {
  19.275 +  union {
  19.276 +    u64 lo_val;
  19.277 +    struct {
  19.278 +        u64 p       : 1,
  19.279 +            fpd     : 1,
  19.280 +            dm      : 1,
  19.281 +            rh      : 1,
  19.282 +            tm      : 1,
  19.283 +            dlm     : 3,
  19.284 +            avail   : 4,
  19.285 +            res_1   : 4,
  19.286 +            vector  : 8,
  19.287 +            res_2   : 8,
  19.288 +            dst     : 32;
  19.289 +    }lo;
  19.290 +  };
  19.291 +  union {
  19.292 +    u64 hi_val;
  19.293 +    struct {
  19.294 +        u64 sid     : 16,
  19.295 +            sq      : 2,
  19.296 +            svt     : 2,
  19.297 +            res_1   : 44;
  19.298 +    }hi;
  19.299 +  };
  19.300 +};
  19.301 +#define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
  19.302 +#define iremap_present(v) ((v).lo & 1)
  19.303 +#define iremap_fault_disable(v) (((v).lo >> 1) & 1)
  19.304 +
  19.305 +#define iremap_set_present(v) do {(v).lo |= 1;} while(0)
  19.306 +#define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
  19.307 +
  19.308 +/* queue invalidation entry */
  19.309 +struct qinval_entry {
  19.310 +    union {
  19.311 +        struct {
  19.312 +            struct {
  19.313 +                u64 type    : 4,
  19.314 +                    granu   : 2,
  19.315 +                    res_1   : 10,
  19.316 +                    did     : 16,
  19.317 +                    sid     : 16,
  19.318 +                    fm      : 2,
  19.319 +                    res_2   : 14;
  19.320 +            }lo;
  19.321 +            struct {
  19.322 +                u64 res;
  19.323 +            }hi;
  19.324 +        }cc_inv_dsc;
  19.325 +        struct {
  19.326 +            struct {
  19.327 +                u64 type    : 4,
  19.328 +                    granu   : 2,
  19.329 +                    dw      : 1,
  19.330 +                    dr      : 1,
  19.331 +                    res_1   : 8,
  19.332 +                    did     : 16,
  19.333 +                    res_2   : 32;
  19.334 +            }lo;
  19.335 +            struct {
  19.336 +                u64 am      : 6,
  19.337 +                    ih      : 1,
  19.338 +                    res_1   : 5,
  19.339 +                    addr    : 52;
  19.340 +            }hi;
  19.341 +        }iotlb_inv_dsc;
  19.342 +        struct {
  19.343 +            struct {
  19.344 +                u64 type    : 4,
  19.345 +                    res_1   : 12,
  19.346 +                    max_invs_pend: 5,
  19.347 +                    res_2   : 11,
  19.348 +                    sid     : 16,
  19.349 +                    res_3   : 16;
  19.350 +            }lo;
  19.351 +            struct {
  19.352 +                u64 size    : 1,
  19.353 +                    res_1   : 11,
  19.354 +                    addr    : 52;
  19.355 +            }hi;
  19.356 +        }dev_iotlb_inv_dsc;
  19.357 +        struct {
  19.358 +            struct {
  19.359 +                u64 type    : 4,
  19.360 +                    granu   : 1,
  19.361 +                    res_1   : 22,
  19.362 +                    im      : 5,
  19.363 +                    iidx    : 16,
  19.364 +                    res_2   : 16;
  19.365 +            }lo;
  19.366 +            struct {
  19.367 +                u64 res;
  19.368 +            }hi;
  19.369 +        }iec_inv_dsc;
  19.370 +        struct {
  19.371 +            struct {
  19.372 +                u64 type    : 4,
  19.373 +                    iflag   : 1,
  19.374 +                    sw      : 1,
  19.375 +                    fn      : 1,
  19.376 +                    res_1   : 25,
  19.377 +                    sdata   : 32;
  19.378 +            }lo;
  19.379 +            struct {
  19.380 +                u64 res_1   : 2,
  19.381 +                    saddr   : 62;
  19.382 +            }hi;
  19.383 +        }inv_wait_dsc;
  19.384 +    }q;
  19.385 +};
  19.386 +
  19.387 +struct poll_info {
  19.388 +    u64 saddr;
  19.389 +    u32 udata;
  19.390 +};
  19.391 +
  19.392 +#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
  19.393 +#define qinval_present(v) ((v).lo & 1)
  19.394 +#define qinval_fault_disable(v) (((v).lo >> 1) & 1)
  19.395 +
  19.396 +#define qinval_set_present(v) do {(v).lo |= 1;} while(0)
  19.397 +#define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
  19.398 +
  19.399 +#define RESERVED_VAL        0
  19.400 +
  19.401 +#define TYPE_INVAL_CONTEXT      0x1
  19.402 +#define TYPE_INVAL_IOTLB        0x2
  19.403 +#define TYPE_INVAL_DEVICE_IOTLB 0x3
  19.404 +#define TYPE_INVAL_IEC          0x4
  19.405 +#define TYPE_INVAL_WAIT         0x5
  19.406 +
  19.407 +#define NOTIFY_TYPE_POLL        1
  19.408 +#define NOTIFY_TYPE_INTR        1
  19.409 +#define INTERRUTP_FLAG          1
  19.410 +#define STATUS_WRITE            1
  19.411 +#define FENCE_FLAG              1
  19.412 +
  19.413 +#define IEC_GLOBAL_INVL         0
  19.414 +#define IEC_INDEX_INVL          1
  19.415 +#define IRTA_REG_EIME_SHIFT     11
  19.416 +#define IRTA_REG_TABLE_SIZE     7    // 4k page = 256 * 16 byte entries
  19.417 +                                     // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
  19.418 +                                     // IRTA_REG_TABLE_SIZE = 7
  19.419 +
  19.420 +#define VTD_PAGE_TABLE_LEVEL_3  3
  19.421 +#define VTD_PAGE_TABLE_LEVEL_4  4
  19.422 +
  19.423 +#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
  19.424 +#define MAX_IOMMU_REGS 0xc0
  19.425 +
  19.426 +extern struct list_head acpi_drhd_units;
  19.427 +extern struct list_head acpi_rmrr_units;
  19.428 +extern struct list_head acpi_ioapic_units;
  19.429 +
  19.430 +struct qi_ctrl {
  19.431 +    struct qinval_entry *qinval;         /* queue invalidation page */
  19.432 +    int qinval_index;                    /* queue invalidation index */
  19.433 +    spinlock_t qinval_lock;      /* lock for queue invalidation page */
  19.434 +    spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
  19.435 +    volatile u32 qinval_poll_status;     /* used by poll methord to sync */
  19.436 +};
  19.437 +
  19.438 +struct ir_ctrl {
  19.439 +    struct iremap_entry *iremap; /* interrupt remap table */
  19.440 +    int iremap_index;            /* interrupt remap index */
  19.441 +    spinlock_t iremap_lock;      /* lock for irq remappping table */
  19.442 +};
  19.443 +
  19.444 +struct iommu_flush {
  19.445 +    int (*context)(void *iommu, u16 did, u16 source_id,
  19.446 +                   u8 function_mask, u64 type, int non_present_entry_flush);
  19.447 +    int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
  19.448 +                 u64 type, int non_present_entry_flush);
  19.449 +};
  19.450 +
  19.451 +struct intel_iommu {
  19.452 +    struct qi_ctrl qi_ctrl;
  19.453 +    struct ir_ctrl ir_ctrl;
  19.454 +    struct iommu_flush flush;
  19.455 +};
  19.456 +
  19.457 +#endif
    20.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Sun Mar 16 14:11:34 2008 +0000
    20.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Mon Mar 17 10:45:24 2008 +0000
    20.3 @@ -19,15 +19,9 @@
    20.4   */
    20.5  
    20.6  
    20.7 -#include <xen/init.h>
    20.8 -#include <xen/irq.h>
    20.9 -#include <xen/spinlock.h>
   20.10  #include <xen/sched.h>
   20.11 -#include <xen/xmalloc.h>
   20.12 -#include <xen/domain_page.h>
   20.13 -#include <asm/delay.h>
   20.14 -#include <asm/string.h>
   20.15 -#include <asm/iommu.h>
   20.16 +#include <xen/iommu.h>
   20.17 +#include "iommu.h"
   20.18  #include "dmar.h"
   20.19  #include "vtd.h"
   20.20  #include "../pci-direct.h"
    21.1 --- a/xen/drivers/passthrough/vtd/utils.c	Sun Mar 16 14:11:34 2008 +0000
    21.2 +++ b/xen/drivers/passthrough/vtd/utils.c	Mon Mar 17 10:45:24 2008 +0000
    21.3 @@ -17,22 +17,15 @@
    21.4   * Copyright (C) Allen Kay <allen.m.kay@intel.com>
    21.5   */
    21.6  
    21.7 -#include <xen/init.h>
    21.8 -#include <xen/bitmap.h>
    21.9 -#include <xen/irq.h>
   21.10 -#include <xen/spinlock.h>
   21.11  #include <xen/sched.h>
   21.12  #include <xen/delay.h>
   21.13 -#include <asm/iommu.h>
   21.14 +#include <xen/iommu.h>
   21.15 +#include "iommu.h"
   21.16  #include "dmar.h"
   21.17  #include "../pci-direct.h"
   21.18  #include "../pci_regs.h"
   21.19  #include "msi.h"
   21.20  
   21.21 -#include <xen/mm.h>
   21.22 -#include <xen/xmalloc.h>
   21.23 -#include <xen/inttypes.h>
   21.24 -
   21.25  #define INTEL   0x8086
   21.26  #define SEABURG 0x4000
   21.27  #define C_STEP  2
    22.1 --- a/xen/drivers/passthrough/vtd/vtd.h	Sun Mar 16 14:11:34 2008 +0000
    22.2 +++ b/xen/drivers/passthrough/vtd/vtd.h	Mon Mar 17 10:45:24 2008 +0000
    22.3 @@ -21,16 +21,7 @@
    22.4  #ifndef _VTD_H_
    22.5  #define _VTD_H_
    22.6  
    22.7 -#include <xen/list.h>
    22.8 -#include <asm/iommu.h>
    22.9 -
   22.10 -#define VTDPREFIX "[VT-D]"
   22.11 -
   22.12 -#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
   22.13 -#define time_after(a,b)         \
   22.14 -        (typecheck(unsigned long, a) && \
   22.15 -         typecheck(unsigned long, b) && \
   22.16 -         ((long)(b) - (long)(a) < 0))
   22.17 +#include <xen/iommu.h>
   22.18  
   22.19  struct IO_APIC_route_remap_entry {
   22.20      union {
    23.1 --- a/xen/include/asm-x86/fixmap.h	Sun Mar 16 14:11:34 2008 +0000
    23.2 +++ b/xen/include/asm-x86/fixmap.h	Mon Mar 17 10:45:24 2008 +0000
    23.3 @@ -17,7 +17,7 @@
    23.4  #include <asm/acpi.h>
    23.5  #include <asm/page.h>
    23.6  #include <xen/kexec.h>
    23.7 -#include <asm/iommu.h>
    23.8 +#include <xen/iommu.h>
    23.9  #include <asm/amd-iommu.h>
   23.10  
   23.11  /*
    24.1 --- a/xen/include/asm-x86/hvm/domain.h	Sun Mar 16 14:11:34 2008 +0000
    24.2 +++ b/xen/include/asm-x86/hvm/domain.h	Mon Mar 17 10:45:24 2008 +0000
    24.3 @@ -21,13 +21,13 @@
    24.4  #ifndef __ASM_X86_HVM_DOMAIN_H__
    24.5  #define __ASM_X86_HVM_DOMAIN_H__
    24.6  
    24.7 -#include <asm/iommu.h>
    24.8 +#include <xen/iommu.h>
    24.9  #include <asm/hvm/irq.h>
   24.10  #include <asm/hvm/vpt.h>
   24.11  #include <asm/hvm/vlapic.h>
   24.12  #include <asm/hvm/vioapic.h>
   24.13  #include <asm/hvm/io.h>
   24.14 -#include <asm/hvm/iommu.h>
   24.15 +#include <xen/hvm/iommu.h>
   24.16  #include <public/hvm/params.h>
   24.17  #include <public/hvm/save.h>
   24.18  
    25.1 --- a/xen/include/asm-x86/hvm/iommu.h	Sun Mar 16 14:11:34 2008 +0000
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,56 +0,0 @@
    25.4 -/*
    25.5 - * Copyright (c) 2006, Intel Corporation.
    25.6 - *
    25.7 - * This program is free software; you can redistribute it and/or modify it
    25.8 - * under the terms and conditions of the GNU General Public License,
    25.9 - * version 2, as published by the Free Software Foundation.
   25.10 - *
   25.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   25.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   25.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   25.14 - * more details.
   25.15 - *
   25.16 - * You should have received a copy of the GNU General Public License along with
   25.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   25.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   25.19 - *
   25.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   25.21 - */
   25.22 -
   25.23 -#ifndef __ASM_X86_HVM_IOMMU_H__
   25.24 -#define __ASM_X86_HVM_IOMMU_H__
   25.25 -
   25.26 -#include <asm/iommu.h>
   25.27 -#include <asm/hvm/irq.h>
   25.28 -#include <asm/hvm/vpt.h>
   25.29 -#include <asm/hvm/vlapic.h>
   25.30 -#include <asm/hvm/io.h>
   25.31 -#include <public/hvm/params.h>
   25.32 -#include <public/hvm/save.h>
   25.33 -
   25.34 -struct g2m_ioport {
   25.35 -    struct list_head list;
   25.36 -    unsigned int gport;
   25.37 -    unsigned int mport;
   25.38 -    unsigned int np;
   25.39 -};
   25.40 -
   25.41 -struct hvm_iommu {
   25.42 -    spinlock_t iommu_list_lock;    /* protect iommu specific lists */
   25.43 -    struct list_head pdev_list;    /* direct accessed pci devices */
   25.44 -    struct dma_pte *pgd;           /* io page directory root */
   25.45 -    spinlock_t mapping_lock;       /* io page table lock */
   25.46 -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
   25.47 -    struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
   25.48 -    domid_t iommu_domid;           /* domain id stored in iommu */
   25.49 -
   25.50 -    /* amd iommu support */
   25.51 -    int domain_id;
   25.52 -    int paging_mode;
   25.53 -    void *root_table;
   25.54 -
   25.55 -    /* iommu_ops */
   25.56 -    struct iommu_ops *platform_ops;
   25.57 -};
   25.58 -
   25.59 -#endif // __ASM_X86_HVM_IOMMU_H__
    26.1 --- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h	Sun Mar 16 14:11:34 2008 +0000
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,460 +0,0 @@
    26.4 -/*
    26.5 - * Copyright (c) 2006, Intel Corporation.
    26.6 - *
    26.7 - * This program is free software; you can redistribute it and/or modify it
    26.8 - * under the terms and conditions of the GNU General Public License,
    26.9 - * version 2, as published by the Free Software Foundation.
   26.10 - *
   26.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   26.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   26.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   26.14 - * more details.
   26.15 - *
   26.16 - * You should have received a copy of the GNU General Public License along with
   26.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   26.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   26.19 - *
   26.20 - * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   26.21 - */
   26.22 -
   26.23 -#ifndef _INTEL_IOMMU_H_
   26.24 -#define _INTEL_IOMMU_H_
   26.25 -
   26.26 -#include <xen/types.h>
   26.27 -
   26.28 -/*
   26.29 - * Intel IOMMU register specification per version 1.0 public spec.
   26.30 - */
   26.31 -
   26.32 -#define    DMAR_VER_REG    0x0    /* Arch version supported by this IOMMU */
   26.33 -#define    DMAR_CAP_REG    0x8    /* Hardware supported capabilities */
   26.34 -#define    DMAR_ECAP_REG    0x10    /* Extended capabilities supported */
   26.35 -#define    DMAR_GCMD_REG    0x18    /* Global command register */
   26.36 -#define    DMAR_GSTS_REG    0x1c    /* Global status register */
   26.37 -#define    DMAR_RTADDR_REG    0x20    /* Root entry table */
   26.38 -#define    DMAR_CCMD_REG    0x28    /* Context command reg */
   26.39 -#define    DMAR_FSTS_REG    0x34    /* Fault Status register */
   26.40 -#define    DMAR_FECTL_REG    0x38    /* Fault control register */
   26.41 -#define    DMAR_FEDATA_REG    0x3c    /* Fault event interrupt data register */
   26.42 -#define    DMAR_FEADDR_REG    0x40    /* Fault event interrupt addr register */
   26.43 -#define    DMAR_FEUADDR_REG 0x44    /* Upper address register */
   26.44 -#define    DMAR_AFLOG_REG    0x58    /* Advanced Fault control */
   26.45 -#define    DMAR_PMEN_REG    0x64    /* Enable Protected Memory Region */
   26.46 -#define    DMAR_PLMBASE_REG 0x68    /* PMRR Low addr */
   26.47 -#define    DMAR_PLMLIMIT_REG 0x6c    /* PMRR low limit */
   26.48 -#define    DMAR_PHMBASE_REG 0x70    /* pmrr high base addr */
   26.49 -#define    DMAR_PHMLIMIT_REG 0x78    /* pmrr high limit */
   26.50 -#define    DMAR_IQH_REG    0x80    /* invalidation queue head */
   26.51 -#define    DMAR_IQT_REG    0x88    /* invalidation queue tail */
   26.52 -#define    DMAR_IQA_REG    0x90    /* invalidation queue addr */
   26.53 -#define    DMAR_IRTA_REG   0xB8    /* intr remap */
   26.54 -
   26.55 -#define OFFSET_STRIDE        (9)
   26.56 -#define dmar_readl(dmar, reg) readl(dmar + reg)
   26.57 -#define dmar_writel(dmar, reg, val) writel(val, dmar + reg)
   26.58 -#define dmar_readq(dmar, reg) ({ \
   26.59 -        u32 lo, hi; \
   26.60 -        lo = dmar_readl(dmar, reg); \
   26.61 -        hi = dmar_readl(dmar, reg + 4); \
   26.62 -        (((u64) hi) << 32) + lo; })
   26.63 -#define dmar_writeq(dmar, reg, val) do {\
   26.64 -        dmar_writel(dmar, reg, (u32)val); \
   26.65 -        dmar_writel(dmar, reg + 4, (u32)((u64) val >> 32)); \
   26.66 -    } while (0)
   26.67 -
   26.68 -#define VER_MAJOR(v)        (((v) & 0xf0) >> 4)
   26.69 -#define VER_MINOR(v)        ((v) & 0x0f)
   26.70 -
   26.71 -/*
   26.72 - * Decoding Capability Register
   26.73 - */
   26.74 -#define cap_read_drain(c)    (((c) >> 55) & 1)
   26.75 -#define cap_write_drain(c)    (((c) >> 54) & 1)
   26.76 -#define cap_max_amask_val(c)    (((c) >> 48) & 0x3f)
   26.77 -#define cap_num_fault_regs(c)    ((((c) >> 40) & 0xff) + 1)
   26.78 -#define cap_pgsel_inv(c)       (((c) >> 39) & 1)
   26.79 -
   26.80 -#define cap_super_page_val(c)    (((c) >> 34) & 0xf)
   26.81 -#define cap_super_offset(c)    (((find_first_bit(&cap_super_page_val(c), 4)) \
   26.82 -                    * OFFSET_STRIDE) + 21)
   26.83 -
   26.84 -#define cap_fault_reg_offset(c)    ((((c) >> 24) & 0x3ff) * 16)
   26.85 -
   26.86 -#define cap_isoch(c)        (((c) >> 23) & 1)
   26.87 -#define cap_qos(c)        (((c) >> 22) & 1)
   26.88 -#define cap_mgaw(c)        ((((c) >> 16) & 0x3f) + 1)
   26.89 -#define cap_sagaw(c)        (((c) >> 8) & 0x1f)
   26.90 -#define cap_caching_mode(c)    (((c) >> 7) & 1)
   26.91 -#define cap_phmr(c)        (((c) >> 6) & 1)
   26.92 -#define cap_plmr(c)        (((c) >> 5) & 1)
   26.93 -#define cap_rwbf(c)        (((c) >> 4) & 1)
   26.94 -#define cap_afl(c)        (((c) >> 3) & 1)
   26.95 -#define cap_ndoms(c)        (1 << (4 + 2 * ((c) & 0x7)))
   26.96 -
   26.97 -/*
   26.98 - * Extended Capability Register
   26.99 - */
  26.100 -
  26.101 -#define ecap_niotlb_iunits(e)    ((((e) >> 24) & 0xff) + 1)
  26.102 -#define ecap_iotlb_offset(e)     ((((e) >> 8) & 0x3ff) * 16)
  26.103 -#define ecap_coherent(e)         ((e >> 0) & 0x1)
  26.104 -#define ecap_queued_inval(e)     ((e >> 1) & 0x1)
  26.105 -#define ecap_dev_iotlb(e)        ((e >> 2) & 0x1)
  26.106 -#define ecap_intr_remap(e)       ((e >> 3) & 0x1)
  26.107 -#define ecap_ext_intr(e)         ((e >> 4) & 0x1)
  26.108 -#define ecap_cache_hints(e)      ((e >> 5) & 0x1)
  26.109 -#define ecap_pass_thru(e)        ((e >> 6) & 0x1)
  26.110 -
  26.111 -#define PAGE_SHIFT_4K        (12)
  26.112 -#define PAGE_SIZE_4K        (1UL << PAGE_SHIFT_4K)
  26.113 -#define PAGE_MASK_4K        (((u64)-1) << PAGE_SHIFT_4K)
  26.114 -#define PAGE_ALIGN_4K(addr)    (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
  26.115 -
  26.116 -/* IOTLB_REG */
  26.117 -#define DMA_TLB_FLUSH_GRANU_OFFSET  60
  26.118 -#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
  26.119 -#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
  26.120 -#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
  26.121 -#define DMA_TLB_IIRG(x) (((x) >> 60) & 7) 
  26.122 -#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
  26.123 -#define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
  26.124 -
  26.125 -#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
  26.126 -#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
  26.127 -#define DMA_TLB_IVT (((u64)1) << 63)
  26.128 -
  26.129 -#define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
  26.130 -#define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
  26.131 -
  26.132 -/* GCMD_REG */
  26.133 -#define DMA_GCMD_TE     (((u64)1) << 31)
  26.134 -#define DMA_GCMD_SRTP   (((u64)1) << 30)
  26.135 -#define DMA_GCMD_SFL    (((u64)1) << 29)
  26.136 -#define DMA_GCMD_EAFL   (((u64)1) << 28)
  26.137 -#define DMA_GCMD_WBF    (((u64)1) << 27)
  26.138 -#define DMA_GCMD_QIE    (((u64)1) << 26)
  26.139 -#define DMA_GCMD_IRE    (((u64)1) << 25)
  26.140 -#define DMA_GCMD_SIRTP  (((u64)1) << 24)
  26.141 -#define DMA_GCMD_CFI    (((u64)1) << 23)
  26.142 -
  26.143 -/* GSTS_REG */
  26.144 -#define DMA_GSTS_TES    (((u64)1) << 31)
  26.145 -#define DMA_GSTS_RTPS   (((u64)1) << 30)
  26.146 -#define DMA_GSTS_FLS    (((u64)1) << 29)
  26.147 -#define DMA_GSTS_AFLS   (((u64)1) << 28)
  26.148 -#define DMA_GSTS_WBFS   (((u64)1) << 27)
  26.149 -#define DMA_GSTS_QIES   (((u64)1) <<26)
  26.150 -#define DMA_GSTS_IRES   (((u64)1) <<25)
  26.151 -#define DMA_GSTS_SIRTPS (((u64)1) << 24)
  26.152 -#define DMA_GSTS_CFIS   (((u64)1) <<23)
  26.153 -
  26.154 -/* PMEN_REG */
  26.155 -#define DMA_PMEN_EPM    (((u32)1) << 31)
  26.156 -#define DMA_PMEN_PRS    (((u32)1) << 0)
  26.157 -
  26.158 -/* CCMD_REG */
  26.159 -#define DMA_CCMD_INVL_GRANU_OFFSET  61
  26.160 -#define DMA_CCMD_ICC   (((u64)1) << 63)
  26.161 -#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
  26.162 -#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
  26.163 -#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
  26.164 -#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
  26.165 -#define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
  26.166 -#define DMA_CCMD_MASK_NOBIT 0
  26.167 -#define DMA_CCMD_MASK_1BIT 1
  26.168 -#define DMA_CCMD_MASK_2BIT 2
  26.169 -#define DMA_CCMD_MASK_3BIT 3
  26.170 -#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
  26.171 -#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
  26.172 -
  26.173 -#define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
  26.174 -
  26.175 -/* FECTL_REG */
  26.176 -#define DMA_FECTL_IM (((u64)1) << 31)
  26.177 -
  26.178 -/* FSTS_REG */
  26.179 -#define DMA_FSTS_PFO ((u64)1 << 0)
  26.180 -#define DMA_FSTS_PPF ((u64)1 << 1)
  26.181 -#define DMA_FSTS_AFO ((u64)1 << 2)
  26.182 -#define DMA_FSTS_APF ((u64)1 << 3)
  26.183 -#define DMA_FSTS_IQE ((u64)1 << 4)
  26.184 -#define DMA_FSTS_ICE ((u64)1 << 5)
  26.185 -#define DMA_FSTS_ITE ((u64)1 << 6)
  26.186 -#define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
  26.187 -#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
  26.188 -
  26.189 -/* FRCD_REG, 32 bits access */
  26.190 -#define DMA_FRCD_F (((u64)1) << 31)
  26.191 -#define dma_frcd_type(d) ((d >> 30) & 1)
  26.192 -#define dma_frcd_fault_reason(c) (c & 0xff)
  26.193 -#define dma_frcd_source_id(c) (c & 0xffff)
  26.194 -#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
  26.195 -
  26.196 -/*
  26.197 - * 0: Present
  26.198 - * 1-11: Reserved
  26.199 - * 12-63: Context Ptr (12 - (haw-1))
  26.200 - * 64-127: Reserved
  26.201 - */
  26.202 -struct root_entry {
  26.203 -    u64    val;
  26.204 -    u64    rsvd1;
  26.205 -};
  26.206 -#define root_present(root)    ((root).val & 1)
  26.207 -#define set_root_present(root) do {(root).val |= 1;} while(0)
  26.208 -#define get_context_addr(root) ((root).val & PAGE_MASK_4K)
  26.209 -#define set_root_value(root, value) \
  26.210 -    do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
  26.211 -
  26.212 -struct context_entry {
  26.213 -    u64 lo;
  26.214 -    u64 hi;
  26.215 -};
  26.216 -#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
  26.217 -#define context_present(c) ((c).lo & 1)
  26.218 -#define context_fault_disable(c) (((c).lo >> 1) & 1)
  26.219 -#define context_translation_type(c) (((c).lo >> 2) & 3)
  26.220 -#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
  26.221 -#define context_address_width(c) ((c).hi &  7)
  26.222 -#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
  26.223 -
  26.224 -#define context_set_present(c) do {(c).lo |= 1;} while(0)
  26.225 -#define context_clear_present(c) do {(c).lo &= ~1;} while(0)
  26.226 -#define context_set_fault_enable(c) \
  26.227 -    do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
  26.228 -
  26.229 -#define context_set_translation_type(c, val) do { \
  26.230 -        (c).lo &= (((u64)-1) << 4) | 3; \
  26.231 -        (c).lo |= (val & 3) << 2; \
  26.232 -    } while(0)
  26.233 -#define CONTEXT_TT_MULTI_LEVEL 0
  26.234 -#define CONTEXT_TT_DEV_IOTLB   1
  26.235 -#define CONTEXT_TT_PASS_THRU   2
  26.236 -
  26.237 -#define context_set_address_root(c, val) \
  26.238 -    do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
  26.239 -#define context_set_address_width(c, val) \
  26.240 -    do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
  26.241 -#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
  26.242 -
  26.243 -/* page table handling */
  26.244 -#define LEVEL_STRIDE       (9)
  26.245 -#define LEVEL_MASK         ((1 << LEVEL_STRIDE) - 1)
  26.246 -#define agaw_to_level(val) ((val) + 2)
  26.247 -#define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
  26.248 -#define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)
  26.249 -#define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
  26.250 -#define address_level_offset(addr, level) \
  26.251 -            ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
  26.252 -#define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
  26.253 -#define level_size(l) (1 << level_to_offset_bits(l))
  26.254 -#define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
  26.255 -
  26.256 -/*
  26.257 - * 0: readable
  26.258 - * 1: writable
  26.259 - * 2-6: reserved
  26.260 - * 7: super page
  26.261 - * 8-11: available
  26.262 - * 12-63: Host physcial address
  26.263 - */
  26.264 -struct dma_pte {
  26.265 -    u64 val;
  26.266 -};
  26.267 -#define dma_clear_pte(p)    do {(p).val = 0;} while(0)
  26.268 -#define dma_set_pte_readable(p) do {(p).val |= 1;} while(0)
  26.269 -#define dma_set_pte_writable(p) do {(p).val |= 2;} while(0)
  26.270 -#define dma_set_pte_superpage(p) do {(p).val |= 8;} while(0)
  26.271 -#define dma_set_pte_prot(p, prot) do { (p).val = (((p).val >> 2) << 2) | ((prot) & 3);} while (0)
  26.272 -#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
  26.273 -#define dma_set_pte_addr(p, addr) do {(p).val |= ((addr) >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;} while(0)
  26.274 -#define DMA_PTE_READ (1)
  26.275 -#define DMA_PTE_WRITE (2)
  26.276 -#define dma_pte_present(p) (((p).val & 3) != 0)
  26.277 -
  26.278 -/* interrupt remap entry */
  26.279 -struct iremap_entry {
  26.280 -  union {
  26.281 -    u64 lo_val;
  26.282 -    struct {
  26.283 -        u64 p       : 1,
  26.284 -            fpd     : 1,
  26.285 -            dm      : 1,
  26.286 -            rh      : 1,
  26.287 -            tm      : 1,
  26.288 -            dlm     : 3,
  26.289 -            avail   : 4,
  26.290 -            res_1   : 4,
  26.291 -            vector  : 8,
  26.292 -            res_2   : 8,
  26.293 -            dst     : 32;
  26.294 -    }lo;
  26.295 -  };
  26.296 -  union {
  26.297 -    u64 hi_val;
  26.298 -    struct {
  26.299 -        u64 sid     : 16,
  26.300 -            sq      : 2,
  26.301 -            svt     : 2,
  26.302 -            res_1   : 44;
  26.303 -    }hi;
  26.304 -  };
  26.305 -};
  26.306 -#define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
  26.307 -#define iremap_present(v) ((v).lo & 1)
  26.308 -#define iremap_fault_disable(v) (((v).lo >> 1) & 1)
  26.309 -
  26.310 -#define iremap_set_present(v) do {(v).lo |= 1;} while(0)
  26.311 -#define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
  26.312 -
  26.313 -/* queue invalidation entry */
  26.314 -struct qinval_entry {
  26.315 -    union {
  26.316 -        struct {
  26.317 -            struct {
  26.318 -                u64 type    : 4,
  26.319 -                    granu   : 2,
  26.320 -                    res_1   : 10,
  26.321 -                    did     : 16,
  26.322 -                    sid     : 16,
  26.323 -                    fm      : 2,
  26.324 -                    res_2   : 14;
  26.325 -            }lo;
  26.326 -            struct {
  26.327 -                u64 res;
  26.328 -            }hi;
  26.329 -        }cc_inv_dsc;
  26.330 -        struct {
  26.331 -            struct {
  26.332 -                u64 type    : 4,
  26.333 -                    granu   : 2,
  26.334 -                    dw      : 1,
  26.335 -                    dr      : 1,
  26.336 -                    res_1   : 8,
  26.337 -                    did     : 16,
  26.338 -                    res_2   : 32;
  26.339 -            }lo;
  26.340 -            struct {
  26.341 -                u64 am      : 6,
  26.342 -                    ih      : 1,
  26.343 -                    res_1   : 5,
  26.344 -                    addr    : 52;
  26.345 -            }hi;
  26.346 -        }iotlb_inv_dsc;
  26.347 -        struct {
  26.348 -            struct {
  26.349 -                u64 type    : 4,
  26.350 -                    res_1   : 12,
  26.351 -                    max_invs_pend: 5,
  26.352 -                    res_2   : 11,
  26.353 -                    sid     : 16,
  26.354 -                    res_3   : 16;
  26.355 -            }lo;
  26.356 -            struct {
  26.357 -                u64 size    : 1,
  26.358 -                    res_1   : 11,
  26.359 -                    addr    : 52;
  26.360 -            }hi;
  26.361 -        }dev_iotlb_inv_dsc;
  26.362 -        struct {
  26.363 -            struct {
  26.364 -                u64 type    : 4,
  26.365 -                    granu   : 1,
  26.366 -                    res_1   : 22,
  26.367 -                    im      : 5,
  26.368 -                    iidx    : 16,
  26.369 -                    res_2   : 16;
  26.370 -            }lo;
  26.371 -            struct {
  26.372 -                u64 res;
  26.373 -            }hi;
  26.374 -        }iec_inv_dsc;
  26.375 -        struct {
  26.376 -            struct {
  26.377 -                u64 type    : 4,
  26.378 -                    iflag   : 1,
  26.379 -                    sw      : 1,
  26.380 -                    fn      : 1,
  26.381 -                    res_1   : 25,
  26.382 -                    sdata   : 32;
  26.383 -            }lo;
  26.384 -            struct {
  26.385 -                u64 res_1   : 2,
  26.386 -                    saddr   : 62;
  26.387 -            }hi;
  26.388 -        }inv_wait_dsc;
  26.389 -    }q;
  26.390 -};
  26.391 -
  26.392 -struct poll_info {
  26.393 -    u64 saddr;
  26.394 -    u32 udata;
  26.395 -};
  26.396 -
  26.397 -#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
  26.398 -#define qinval_present(v) ((v).lo & 1)
  26.399 -#define qinval_fault_disable(v) (((v).lo >> 1) & 1)
  26.400 -
  26.401 -#define qinval_set_present(v) do {(v).lo |= 1;} while(0)
  26.402 -#define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
  26.403 -
  26.404 -#define RESERVED_VAL        0
  26.405 -
  26.406 -#define TYPE_INVAL_CONTEXT      0x1
  26.407 -#define TYPE_INVAL_IOTLB        0x2
  26.408 -#define TYPE_INVAL_DEVICE_IOTLB 0x3
  26.409 -#define TYPE_INVAL_IEC          0x4
  26.410 -#define TYPE_INVAL_WAIT         0x5
  26.411 -
  26.412 -#define NOTIFY_TYPE_POLL        1
  26.413 -#define NOTIFY_TYPE_INTR        1
  26.414 -#define INTERRUTP_FLAG          1
  26.415 -#define STATUS_WRITE            1
  26.416 -#define FENCE_FLAG              1
  26.417 -
  26.418 -#define IEC_GLOBAL_INVL         0
  26.419 -#define IEC_INDEX_INVL          1
  26.420 -#define IRTA_REG_EIME_SHIFT     11
  26.421 -#define IRTA_REG_TABLE_SIZE     7    // 4k page = 256 * 16 byte entries
  26.422 -                                     // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
  26.423 -                                     // IRTA_REG_TABLE_SIZE = 7
  26.424 -
  26.425 -#define VTD_PAGE_TABLE_LEVEL_3  3
  26.426 -#define VTD_PAGE_TABLE_LEVEL_4  4
  26.427 -
  26.428 -#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
  26.429 -#define MAX_IOMMUS 32
  26.430 -#define MAX_IOMMU_REGS 0xc0
  26.431 -
  26.432 -extern struct list_head acpi_drhd_units;
  26.433 -extern struct list_head acpi_rmrr_units;
  26.434 -extern struct list_head acpi_ioapic_units;
  26.435 -
  26.436 -struct qi_ctrl {
  26.437 -    struct qinval_entry *qinval;         /* queue invalidation page */
  26.438 -    int qinval_index;                    /* queue invalidation index */
  26.439 -    spinlock_t qinval_lock;      /* lock for queue invalidation page */
  26.440 -    spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
  26.441 -    volatile u32 qinval_poll_status;     /* used by poll methord to sync */
  26.442 -};
  26.443 -
  26.444 -struct ir_ctrl {
  26.445 -    struct iremap_entry *iremap;         /* interrupt remap table */
  26.446 -    int iremap_index;                    /* interrupt remap index */
  26.447 -    spinlock_t iremap_lock;      /* lock for irq remappping table */
  26.448 -};
  26.449 -
  26.450 -struct iommu_flush {
  26.451 -    int (*context)(void *iommu, u16 did, u16 source_id,
  26.452 -                   u8 function_mask, u64 type, int non_present_entry_flush);
  26.453 -    int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
  26.454 -                 u64 type, int non_present_entry_flush);
  26.455 -};
  26.456 -
  26.457 -struct intel_iommu {
  26.458 -    struct qi_ctrl qi_ctrl;
  26.459 -    struct ir_ctrl ir_ctrl;
  26.460 -    struct iommu_flush flush; 
  26.461 -};
  26.462 -
  26.463 -#endif
    27.1 --- a/xen/include/asm-x86/io_apic.h	Sun Mar 16 14:11:34 2008 +0000
    27.2 +++ b/xen/include/asm-x86/io_apic.h	Mon Mar 17 10:45:24 2008 +0000
    27.3 @@ -6,7 +6,7 @@
    27.4  #include <asm/mpspec.h>
    27.5  #include <asm/apicdef.h>
    27.6  #include <asm/fixmap.h>
    27.7 -#include <asm/iommu.h>
    27.8 +#include <xen/iommu.h>
    27.9  
   27.10  /*
   27.11   * Intel IO-APIC support for SMP and UP systems.
    28.1 --- a/xen/include/asm-x86/iommu.h	Sun Mar 16 14:11:34 2008 +0000
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,109 +0,0 @@
    28.4 -/*
    28.5 - * Copyright (c) 2006, Intel Corporation.
    28.6 - *
    28.7 - * This program is free software; you can redistribute it and/or modify it
    28.8 - * under the terms and conditions of the GNU General Public License,
    28.9 - * version 2, as published by the Free Software Foundation.
   28.10 - *
   28.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   28.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   28.14 - * more details.
   28.15 - *
   28.16 - * You should have received a copy of the GNU General Public License along with
   28.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   28.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   28.19 - *
   28.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   28.21 - */
   28.22 -
   28.23 -#ifndef _IOMMU_H_
   28.24 -#define _IOMMU_H_
   28.25 -
   28.26 -#include <xen/init.h>
   28.27 -#include <xen/list.h>
   28.28 -#include <xen/spinlock.h>
   28.29 -#include <asm/hvm/vmx/intel-iommu.h>
   28.30 -#include <public/hvm/ioreq.h>
   28.31 -#include <public/domctl.h>
   28.32 -
   28.33 -extern int vtd_enabled;
   28.34 -extern int amd_iommu_enabled;
   28.35 -
   28.36 -#define iommu_enabled ( amd_iommu_enabled || vtd_enabled )
   28.37 -#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
   28.38 -#define domain_vmx_iommu(d)     (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
   28.39 -#define iommu_qi_ctrl(iommu)    (&(iommu->intel.qi_ctrl));
   28.40 -#define iommu_ir_ctrl(iommu)    (&(iommu->intel.ir_ctrl));
   28.41 -#define iommu_get_flush(iommu)  (&(iommu->intel.flush));
   28.42 -
   28.43 -/*
   28.44 - * The PCI interface treats multi-function devices as independent
   28.45 - * devices.  The slot/function address of each device is encoded
   28.46 - * in a single byte as follows:
   28.47 - *
   28.48 - * 15:8 = bus
   28.49 - *  7:3 = slot
   28.50 - *  2:0 = function
   28.51 - */
   28.52 -#define PCI_DEVFN(slot,func)  (((slot & 0x1f) << 3) | (func & 0x07))
   28.53 -#define PCI_SLOT(devfn)       (((devfn) >> 3) & 0x1f)
   28.54 -#define PCI_FUNC(devfn)       ((devfn) & 0x07)
   28.55 -
   28.56 -struct pci_dev {
   28.57 -    struct list_head list;
   28.58 -    u8 bus;
   28.59 -    u8 devfn;
   28.60 -};
   28.61 -
   28.62 -struct iommu {
   28.63 -    struct list_head list;
   28.64 -    void __iomem *reg; /* Pointer to hardware regs, virtual addr */
   28.65 -    u32	gcmd;          /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
   28.66 -    u64	cap;
   28.67 -    u64	ecap;
   28.68 -    spinlock_t lock; /* protect context, domain ids */
   28.69 -    spinlock_t register_lock; /* protect iommu register handling */
   28.70 -    struct root_entry *root_entry; /* virtual address */
   28.71 -    unsigned int vector;
   28.72 -    struct intel_iommu intel;
   28.73 -};
   28.74 -
   28.75 -int iommu_setup(void);
   28.76 -int iommu_domain_init(struct domain *d);
   28.77 -void iommu_domain_destroy(struct domain *d);
   28.78 -int device_assigned(u8 bus, u8 devfn);
   28.79 -int assign_device(struct domain *d, u8 bus, u8 devfn);
   28.80 -void deassign_device(struct domain *d, u8 bus, u8 devfn);
   28.81 -void reassign_device_ownership(struct domain *source,
   28.82 -                               struct domain *target,
   28.83 -                               u8 bus, u8 devfn);
   28.84 -int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
   28.85 -int iommu_unmap_page(struct domain *d, unsigned long gfn);
   28.86 -void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
   28.87 -void iommu_set_pgd(struct domain *d);
   28.88 -void iommu_domain_teardown(struct domain *d);
   28.89 -int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
   28.90 -int dpci_ioport_intercept(ioreq_t *p);
   28.91 -int pt_irq_create_bind_vtd(struct domain *d,
   28.92 -                           xen_domctl_bind_pt_irq_t *pt_irq_bind);
   28.93 -int pt_irq_destroy_bind_vtd(struct domain *d,
   28.94 -                            xen_domctl_bind_pt_irq_t *pt_irq_bind);
   28.95 -unsigned int io_apic_read_remap_rte(
   28.96 -    unsigned int apic, unsigned int reg);
   28.97 -void io_apic_write_remap_rte(unsigned int apic,
   28.98 -    unsigned int reg, unsigned int value);
   28.99 -
  28.100 -#define PT_IRQ_TIME_OUT MILLISECS(8)
  28.101 -#define VTDPREFIX "[VT-D]"
  28.102 -
  28.103 -struct iommu_ops {
  28.104 -    int (*init)(struct domain *d);
  28.105 -    int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
  28.106 -    void (*teardown)(struct domain *d);
  28.107 -    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
  28.108 -    int (*unmap_page)(struct domain *d, unsigned long gfn);
  28.109 -    void (*reassign_device)(struct domain *s, struct domain *t, u8 bus, u8 devfn);
  28.110 -};
  28.111 -
  28.112 -#endif /* _IOMMU_H_ */
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/include/xen/hvm/iommu.h	Mon Mar 17 10:45:24 2008 +0000
    29.3 @@ -0,0 +1,56 @@
    29.4 +/*
    29.5 + * Copyright (c) 2006, Intel Corporation.
    29.6 + *
    29.7 + * This program is free software; you can redistribute it and/or modify it
    29.8 + * under the terms and conditions of the GNU General Public License,
    29.9 + * version 2, as published by the Free Software Foundation.
   29.10 + *
   29.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   29.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   29.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   29.14 + * more details.
   29.15 + *
   29.16 + * You should have received a copy of the GNU General Public License along with
   29.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   29.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   29.19 + *
   29.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   29.21 + */
   29.22 +
   29.23 +#ifndef __ASM_X86_HVM_IOMMU_H__
   29.24 +#define __ASM_X86_HVM_IOMMU_H__
   29.25 +
   29.26 +#include <xen/iommu.h>
   29.27 +#include <asm/hvm/irq.h>
   29.28 +#include <asm/hvm/vpt.h>
   29.29 +#include <asm/hvm/vlapic.h>
   29.30 +#include <asm/hvm/io.h>
   29.31 +#include <public/hvm/params.h>
   29.32 +#include <public/hvm/save.h>
   29.33 +
   29.34 +struct g2m_ioport {
   29.35 +    struct list_head list;
   29.36 +    unsigned int gport;
   29.37 +    unsigned int mport;
   29.38 +    unsigned int np;
   29.39 +};
   29.40 +
   29.41 +struct hvm_iommu {
   29.42 +    spinlock_t iommu_list_lock;    /* protect iommu specific lists */
   29.43 +    struct list_head pdev_list;    /* direct accessed pci devices */
   29.44 +    struct dma_pte *pgd;           /* io page directory root */
   29.45 +    spinlock_t mapping_lock;       /* io page table lock */
   29.46 +    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
   29.47 +    struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
   29.48 +    domid_t iommu_domid;           /* domain id stored in iommu */
   29.49 +
   29.50 +    /* amd iommu support */
   29.51 +    int domain_id;
   29.52 +    int paging_mode;
   29.53 +    void *root_table;
   29.54 +
   29.55 +    /* iommu_ops */
   29.56 +    struct iommu_ops *platform_ops;
   29.57 +};
   29.58 +
   29.59 +#endif // __ASM_X86_HVM_IOMMU_H__
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/xen/include/xen/iommu.h	Mon Mar 17 10:45:24 2008 +0000
    30.3 @@ -0,0 +1,115 @@
    30.4 +/*
    30.5 + * Copyright (c) 2006, Intel Corporation.
    30.6 + *
    30.7 + * This program is free software; you can redistribute it and/or modify it
    30.8 + * under the terms and conditions of the GNU General Public License,
    30.9 + * version 2, as published by the Free Software Foundation.
   30.10 + *
   30.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   30.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   30.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   30.14 + * more details.
   30.15 + *
   30.16 + * You should have received a copy of the GNU General Public License along with
   30.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   30.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   30.19 + *
   30.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   30.21 + */
   30.22 +
   30.23 +#ifndef _IOMMU_H_
   30.24 +#define _IOMMU_H_
   30.25 +
   30.26 +#include <xen/init.h>
   30.27 +#include <xen/list.h>
   30.28 +#include <xen/spinlock.h>
   30.29 +#include <public/hvm/ioreq.h>
   30.30 +#include <public/domctl.h>
   30.31 +
   30.32 +extern int vtd_enabled;
   30.33 +extern int amd_iommu_enabled;
   30.34 +
   30.35 +#define iommu_enabled ( amd_iommu_enabled || vtd_enabled )
   30.36 +#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
   30.37 +#define domain_vmx_iommu(d)     (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
   30.38 +
   30.39 +#define MAX_IOMMUS 32
   30.40 +
   30.41 +#define PAGE_SHIFT_4K       (12)
   30.42 +#define PAGE_SIZE_4K        (1UL << PAGE_SHIFT_4K)
   30.43 +#define PAGE_MASK_4K        (((u64)-1) << PAGE_SHIFT_4K)
   30.44 +#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
   30.45 +
   30.46 +/*
   30.47 + * The PCI interface treats multi-function devices as independent
   30.48 + * devices.  The slot/function address of each device is encoded
   30.49 + * in a single byte as follows:
   30.50 + *
   30.51 + * 15:8 = bus
   30.52 + *  7:3 = slot
   30.53 + *  2:0 = function
   30.54 + */
   30.55 +#define PCI_DEVFN(slot,func)  (((slot & 0x1f) << 3) | (func & 0x07))
   30.56 +#define PCI_SLOT(devfn)       (((devfn) >> 3) & 0x1f)
   30.57 +#define PCI_FUNC(devfn)       ((devfn) & 0x07)
   30.58 +
   30.59 +struct pci_dev {
   30.60 +    struct list_head list;
   30.61 +    u8 bus;
   30.62 +    u8 devfn;
   30.63 +};
   30.64 +
   30.65 +struct iommu {
   30.66 +    struct list_head list;
   30.67 +    void __iomem *reg; /* Pointer to hardware regs, virtual addr */
   30.68 +    u32	gcmd;          /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
   30.69 +    u64	cap;
   30.70 +    u64	ecap;
   30.71 +    spinlock_t lock; /* protect context, domain ids */
   30.72 +    spinlock_t register_lock; /* protect iommu register handling */
   30.73 +    struct root_entry *root_entry; /* virtual address */
   30.74 +    unsigned int vector;
   30.75 +    struct intel_iommu *intel;
   30.76 +};
   30.77 +
   30.78 +int iommu_setup(void);
   30.79 +int iommu_domain_init(struct domain *d);
   30.80 +void iommu_domain_destroy(struct domain *d);
   30.81 +int device_assigned(u8 bus, u8 devfn);
   30.82 +int assign_device(struct domain *d, u8 bus, u8 devfn);
   30.83 +void deassign_device(struct domain *d, u8 bus, u8 devfn);
   30.84 +void reassign_device_ownership(struct domain *source,
   30.85 +                               struct domain *target,
   30.86 +                               u8 bus, u8 devfn);
   30.87 +int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
   30.88 +int iommu_unmap_page(struct domain *d, unsigned long gfn);
   30.89 +void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
   30.90 +void iommu_set_pgd(struct domain *d);
   30.91 +void iommu_domain_teardown(struct domain *d);
   30.92 +int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
   30.93 +int dpci_ioport_intercept(ioreq_t *p);
   30.94 +int pt_irq_create_bind_vtd(struct domain *d,
   30.95 +                           xen_domctl_bind_pt_irq_t *pt_irq_bind);
   30.96 +int pt_irq_destroy_bind_vtd(struct domain *d,
   30.97 +                            xen_domctl_bind_pt_irq_t *pt_irq_bind);
   30.98 +unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
   30.99 +void io_apic_write_remap_rte(unsigned int apic,
  30.100 +                             unsigned int reg, unsigned int value);
  30.101 +struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu);
  30.102 +struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu);
  30.103 +struct iommu_flush *iommu_get_flush(struct iommu *iommu);
  30.104 +
  30.105 +#define PT_IRQ_TIME_OUT MILLISECS(8)
  30.106 +#define VTDPREFIX "[VT-D]"
  30.107 +
  30.108 +struct iommu_ops {
  30.109 +    int (*init)(struct domain *d);
  30.110 +    int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
  30.111 +    void (*teardown)(struct domain *d);
  30.112 +    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
  30.113 +    int (*unmap_page)(struct domain *d, unsigned long gfn);
  30.114 +    void (*reassign_device)(struct domain *s, struct domain *t,
  30.115 +                            u8 bus, u8 devfn);
  30.116 +};
  30.117 +
  30.118 +#endif /* _IOMMU_H_ */