ia64/xen-unstable

changeset 18657:e57ca7937ae8

iommu: make some functions (mainly MSI-related) dummy on IA64 for now.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 15:13:02 2008 +0100 (2008-10-20)
parents d8a156bdef14
children 10a2069a1edb
files xen/drivers/passthrough/io.c xen/drivers/passthrough/pci.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/utils.c xen/include/asm-x86/hvm/irq.h
line diff
     1.1 --- a/xen/drivers/passthrough/io.c	Mon Oct 20 15:11:41 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/io.c	Mon Oct 20 15:13:02 2008 +0100
     1.3 @@ -20,6 +20,9 @@
     1.4  
     1.5  #include <xen/event.h>
     1.6  #include <xen/iommu.h>
     1.7 +#include <asm/hvm/irq.h>
     1.8 +#include <asm/hvm/iommu.h>
     1.9 +#include <xen/hvm/irq.h>
    1.10  
    1.11  static void pt_irq_time_out(void *data)
    1.12  {
    1.13 @@ -245,6 +248,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
    1.14      return 1;
    1.15  }
    1.16  
    1.17 +#ifdef SUPPORT_MSI_REMAPPING
    1.18  void hvm_dpci_msi_eoi(struct domain *d, int vector)
    1.19  {
    1.20      struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    1.21 @@ -277,6 +281,7 @@ void hvm_dpci_msi_eoi(struct domain *d, 
    1.22  
    1.23      spin_unlock(&d->event_lock);
    1.24  }
    1.25 +#endif
    1.26  
    1.27  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
    1.28                    union vioapic_redir_entry *ent)
     2.1 --- a/xen/drivers/passthrough/pci.c	Mon Oct 20 15:11:41 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/pci.c	Mon Oct 20 15:13:02 2008 +0100
     2.3 @@ -21,6 +21,8 @@
     2.4  #include <xen/list.h>
     2.5  #include <xen/prefetch.h>
     2.6  #include <xen/iommu.h>
     2.7 +#include <asm/hvm/iommu.h>
     2.8 +#include <asm/hvm/irq.h>
     2.9  #include <xen/delay.h>
    2.10  #include <xen/keyhandler.h>
    2.11  
    2.12 @@ -207,6 +209,7 @@ void pci_release_devices(struct domain *
    2.13      }
    2.14  }
    2.15  
    2.16 +#ifdef SUPPORT_MSI_REMAPPING
    2.17  static void dump_pci_devices(unsigned char ch)
    2.18  {
    2.19      struct pci_dev *pdev;
    2.20 @@ -236,7 +239,7 @@ static int __init setup_dump_pcidevs(voi
    2.21      return 0;
    2.22  }
    2.23  __initcall(setup_dump_pcidevs);
    2.24 -
    2.25 +#endif
    2.26  
    2.27  
    2.28  /*
     3.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Mon Oct 20 15:11:41 2008 +0100
     3.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Mon Oct 20 15:13:02 2008 +0100
     3.3 @@ -24,6 +24,7 @@
     3.4  #include <xen/xmalloc.h>
     3.5  #include <xen/domain_page.h>
     3.6  #include <xen/iommu.h>
     3.7 +#include <asm/hvm/iommu.h>
     3.8  #include <xen/numa.h>
     3.9  #include <xen/time.h>
    3.10  #include <xen/pci.h>
    3.11 @@ -857,6 +858,7 @@ static void dma_msi_data_init(struct iom
    3.12      spin_unlock_irqrestore(&iommu->register_lock, flags);
    3.13  }
    3.14  
    3.15 +#ifdef SUPPORT_MSI_REMAPPING
    3.16  static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
    3.17  {
    3.18      u64 msi_address;
    3.19 @@ -873,6 +875,12 @@ static void dma_msi_addr_init(struct iom
    3.20      dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
    3.21      spin_unlock_irqrestore(&iommu->register_lock, flags);
    3.22  }
    3.23 +#else
    3.24 +static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
    3.25 +{
    3.26 +    /* ia64: TODO */
    3.27 +}
    3.28 +#endif
    3.29  
    3.30  static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
    3.31  {
     4.1 --- a/xen/drivers/passthrough/vtd/utils.c	Mon Oct 20 15:11:41 2008 +0100
     4.2 +++ b/xen/drivers/passthrough/vtd/utils.c	Mon Oct 20 15:13:02 2008 +0100
     4.3 @@ -204,6 +204,7 @@ void print_vtd_entries(struct iommu *iom
     4.4  
     4.5  void dump_iommu_info(unsigned char key)
     4.6  {
     4.7 +#if defined(__i386__) || defined(__x86_64__)
     4.8      struct acpi_drhd_unit *drhd;
     4.9      struct iommu *iommu;
    4.10      int i;
    4.11 @@ -305,6 +306,10 @@ void dump_iommu_info(unsigned char key)
    4.12              }
    4.13          }
    4.14      }
    4.15 +#else
    4.16 +    printk("%s: not implemnted on IA64 for now.\n", __func__);
    4.17 +    /* ia64: TODO */
    4.18 +#endif
    4.19  }
    4.20  
    4.21  /*
     5.1 --- a/xen/include/asm-x86/hvm/irq.h	Mon Oct 20 15:11:41 2008 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/irq.h	Mon Oct 20 15:13:02 2008 +0100
     5.3 @@ -102,4 +102,12 @@ struct hvm_irq {
     5.4  struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
     5.5  struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
     5.6                                             struct hvm_intack intack);
     5.7 +
     5.8 +/*
     5.9 + * Currently IA64 Xen doesn't support MSI. So for x86, we define this macro
    5.10 + * to control the conditional compilation of some MSI-related functions.
    5.11 + * This macro will be removed once IA64 has MSI support.
    5.12 + */
    5.13 +#define SUPPORT_MSI_REMAPPING 1
    5.14 +
    5.15  #endif /* __ASM_X86_HVM_IRQ_H__ */