ia64/xen-unstable

changeset 17974:1e9df5cb885f

PCI device register/unregister + pci_dev cleanups

Move pci_dev lists from hvm to arch_domain

Move the pci_dev list from hvm to arch_domain since PCI devs are no
longer hvm specific. Also removed locking for pci_dev lists. Will
reintroduce them later.

Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jul 04 17:51:16 2008 +0100 (2008-07-04)
parents e42135b61dc6
children bd7f2a120f94
files xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/multi.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/dmar.h xen/drivers/passthrough/vtd/iommu.c xen/include/asm-x86/domain.h xen/include/asm-x86/hvm/svm/amd-iommu-proto.h xen/include/xen/hvm/iommu.h xen/include/xen/pci.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Jul 04 17:50:31 2008 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Fri Jul 04 17:51:16 2008 +0100
     1.3 @@ -350,6 +350,8 @@ int arch_domain_create(struct domain *d,
     1.4          hvm_funcs.hap_supported &&
     1.5          (domcr_flags & DOMCRF_hap);
     1.6  
     1.7 +    INIT_LIST_HEAD(&d->arch.pdev_list);
     1.8 +
     1.9      d->arch.relmem = RELMEM_not_started;
    1.10      INIT_LIST_HEAD(&d->arch.relmem_list);
    1.11  
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Jul 04 17:50:31 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Jul 04 17:51:16 2008 +0100
     2.3 @@ -911,7 +911,7 @@ int hvm_set_cr0(unsigned long value)
     2.4          }
     2.5      }
     2.6  
     2.7 -    if ( !list_empty(&domain_hvm_iommu(v->domain)->pdev_list) )
     2.8 +    if ( has_arch_pdevs(v->domain) )
     2.9      {
    2.10          if ( (value & X86_CR0_CD) && !(value & X86_CR0_NW) )
    2.11          {
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Jul 04 17:50:31 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Jul 04 17:51:16 2008 +0100
     3.3 @@ -1132,7 +1132,7 @@ static void wbinvd_ipi(void *info)
     3.4  
     3.5  static void svm_wbinvd_intercept(void)
     3.6  {
     3.7 -    if ( !list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
     3.8 +    if ( has_arch_pdevs(current->domain) )
     3.9          on_each_cpu(wbinvd_ipi, NULL, 1, 1);
    3.10  }
    3.11  
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jul 04 17:50:31 2008 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jul 04 17:51:16 2008 +0100
     4.3 @@ -849,8 +849,7 @@ void vmx_do_resume(struct vcpu *v)
     4.4           *     there is no wbinvd exit, or
     4.5           *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
     4.6           */
     4.7 -        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
     4.8 -             !cpu_has_wbinvd_exiting )
     4.9 +        if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
    4.10          {
    4.11              int cpu = v->arch.hvm_vmx.active_cpu;
    4.12              if ( cpu != -1 )
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Jul 04 17:50:31 2008 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Jul 04 17:51:16 2008 +0100
     5.3 @@ -1926,7 +1926,7 @@ static void wbinvd_ipi(void *info)
     5.4  
     5.5  static void vmx_wbinvd_intercept(void)
     5.6  {
     5.7 -    if ( list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
     5.8 +    if ( !has_arch_pdevs(current->domain) )
     5.9          return;
    5.10  
    5.11      if ( cpu_has_wbinvd_exiting )
     6.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Jul 04 17:50:31 2008 +0100
     6.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Jul 04 17:51:16 2008 +0100
     6.3 @@ -840,8 +840,7 @@ static always_inline void
     6.4       * For HVM domains with direct access to MMIO areas, set the correct
     6.5       * caching attributes in the shadows to match what was asked for.
     6.6       */
     6.7 -    if ( (level == 1) && is_hvm_domain(d) &&
     6.8 -         !list_empty(&(domain_hvm_iommu(d)->pdev_list)) &&
     6.9 +    if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&
    6.10           !is_xen_heap_mfn(mfn_x(target_mfn)) )
    6.11      {
    6.12          unsigned int type;
     7.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jul 04 17:50:31 2008 +0100
     7.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jul 04 17:51:16 2008 +0100
     7.3 @@ -292,7 +292,6 @@ static void amd_iommu_setup_domain_devic
     7.4  
     7.5  static void amd_iommu_setup_dom0_devices(struct domain *d)
     7.6  {
     7.7 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
     7.8      struct amd_iommu *iommu;
     7.9      struct pci_dev *pdev;
    7.10      int bus, dev, func;
    7.11 @@ -314,7 +313,7 @@ static void amd_iommu_setup_dom0_devices
    7.12                  pdev = xmalloc(struct pci_dev);
    7.13                  pdev->bus = bus;
    7.14                  pdev->devfn = PCI_DEVFN(dev, func);
    7.15 -                list_add_tail(&pdev->list, &hd->pdev_list);
    7.16 +                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
    7.17  
    7.18                  bdf = (bus << 8) | pdev->devfn;
    7.19                  /* supported device? */
    7.20 @@ -490,12 +489,9 @@ extern void pdev_flr(u8 bus, u8 devfn);
    7.21  static int reassign_device( struct domain *source, struct domain *target,
    7.22                              u8 bus, u8 devfn)
    7.23  {
    7.24 -    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
    7.25 -    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
    7.26      struct pci_dev *pdev;
    7.27      struct amd_iommu *iommu;
    7.28      int bdf;
    7.29 -    unsigned long flags;
    7.30  
    7.31      for_each_pdev ( source, pdev )
    7.32      {
    7.33 @@ -520,11 +516,7 @@ static int reassign_device( struct domai
    7.34  
    7.35          amd_iommu_disable_domain_device(source, iommu, bdf);
    7.36          /* Move pci device from the source domain to target domain. */
    7.37 -        spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
    7.38 -        spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
    7.39 -        list_move(&pdev->list, &target_hd->pdev_list);
    7.40 -        spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
    7.41 -        spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
    7.42 +        list_move(&pdev->domain_list, &target->arch.pdev_list);
    7.43  
    7.44          amd_iommu_setup_domain_device(target, iommu, bdf);
    7.45          amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
    7.46 @@ -559,12 +551,11 @@ static int amd_iommu_assign_device(struc
    7.47  
    7.48  static void release_domain_devices(struct domain *d)
    7.49  {
    7.50 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
    7.51      struct pci_dev *pdev;
    7.52  
    7.53 -    while ( !list_empty(&hd->pdev_list) )
    7.54 +    while ( has_arch_pdevs(d) )
    7.55      {
    7.56 -        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
    7.57 +        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
    7.58          pdev_flr(pdev->bus, pdev->devfn);
    7.59          amd_iov_info("release domain %d devices %x:%x.%x\n", d->domain_id,
    7.60                   pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
     8.1 --- a/xen/drivers/passthrough/iommu.c	Fri Jul 04 17:50:31 2008 +0100
     8.2 +++ b/xen/drivers/passthrough/iommu.c	Fri Jul 04 17:51:16 2008 +0100
     8.3 @@ -35,8 +35,6 @@ int iommu_domain_init(struct domain *dom
     8.4      struct hvm_iommu *hd = domain_hvm_iommu(domain);
     8.5  
     8.6      spin_lock_init(&hd->mapping_lock);
     8.7 -    spin_lock_init(&hd->iommu_list_lock);
     8.8 -    INIT_LIST_HEAD(&hd->pdev_list);
     8.9      INIT_LIST_HEAD(&hd->g2m_ioport_list);
    8.10  
    8.11      if ( !iommu_enabled )
    8.12 @@ -68,7 +66,7 @@ int assign_device(struct domain *d, u8 b
    8.13      if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
    8.14          return rc;
    8.15  
    8.16 -    if ( has_iommu_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
    8.17 +    if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
    8.18      {
    8.19          d->need_iommu = 1;
    8.20          return iommu_populate_page_table(d);
    8.21 @@ -190,7 +188,7 @@ void deassign_device(struct domain *d, u
    8.22  
    8.23      hd->platform_ops->reassign_device(d, dom0, bus, devfn);
    8.24  
    8.25 -    if ( !has_iommu_pdevs(d) && need_iommu(d) )
    8.26 +    if ( !has_arch_pdevs(d) && need_iommu(d) )
    8.27      {
    8.28          d->need_iommu = 0;
    8.29          hd->platform_ops->teardown(d);
    8.30 @@ -242,8 +240,7 @@ int iommu_get_device_group(struct domain
    8.31  
    8.32      group_id = ops->get_device_group_id(bus, devfn);
    8.33  
    8.34 -    list_for_each_entry(pdev,
    8.35 -        &(dom0->arch.hvm_domain.hvm_iommu.pdev_list), list)
    8.36 +    for_each_pdev( d, pdev )
    8.37      {
    8.38          if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
    8.39              continue;
     9.1 --- a/xen/drivers/passthrough/vtd/dmar.h	Fri Jul 04 17:50:31 2008 +0100
     9.2 +++ b/xen/drivers/passthrough/vtd/dmar.h	Fri Jul 04 17:51:16 2008 +0100
     9.3 @@ -70,10 +70,6 @@ struct acpi_atsr_unit {
     9.4      list_for_each_entry(iommu, \
     9.5          &(domain->arch.hvm_domain.hvm_iommu.iommu_list), list)
     9.6  
     9.7 -#define for_each_pdev(domain, pdev) \
     9.8 -    list_for_each_entry(pdev, \
     9.9 -         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
    9.10 -
    9.11  #define for_each_drhd_unit(drhd) \
    9.12      list_for_each_entry(drhd, &acpi_drhd_units, list)
    9.13  #define for_each_rmrr_device(rmrr, pdev) \
    10.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Fri Jul 04 17:50:31 2008 +0100
    10.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Fri Jul 04 17:51:16 2008 +0100
    10.3 @@ -1023,8 +1023,6 @@ static int intel_iommu_domain_init(struc
    10.4      u64 i;
    10.5      struct acpi_drhd_unit *drhd;
    10.6  
    10.7 -    INIT_LIST_HEAD(&hd->pdev_list);
    10.8 -
    10.9      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
   10.10      iommu = drhd->iommu;
   10.11  
   10.12 @@ -1366,12 +1364,10 @@ void reassign_device_ownership(
   10.13      u8 bus, u8 devfn)
   10.14  {
   10.15      struct hvm_iommu *source_hd = domain_hvm_iommu(source);
   10.16 -    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
   10.17      struct pci_dev *pdev, *pdev2;
   10.18      struct acpi_drhd_unit *drhd;
   10.19      struct iommu *iommu;
   10.20      int status;
   10.21 -    unsigned long flags;
   10.22      int found = 0;
   10.23  
   10.24      pdev_flr(bus, devfn);
   10.25 @@ -1388,11 +1384,7 @@ void reassign_device_ownership(
   10.26      domain_context_unmap(iommu, pdev);
   10.27  
   10.28      /* Move pci device from the source domain to target domain. */
   10.29 -    spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
   10.30 -    spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
   10.31 -    list_move(&pdev->list, &target_hd->pdev_list);
   10.32 -    spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
   10.33 -    spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
   10.34 +    list_move(&pdev->domain_list, &target->arch.pdev_list);
   10.35  
   10.36      for_each_pdev ( source, pdev2 )
   10.37      {
   10.38 @@ -1413,12 +1405,11 @@ void reassign_device_ownership(
   10.39  
   10.40  void return_devices_to_dom0(struct domain *d)
   10.41  {
   10.42 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
   10.43      struct pci_dev *pdev;
   10.44  
   10.45 -    while ( !list_empty(&hd->pdev_list) )
   10.46 +    while ( has_arch_pdevs(d) )
   10.47      {
   10.48 -        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
   10.49 +        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
   10.50          pci_cleanup_msi(pdev->bus, pdev->devfn);
   10.51          reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
   10.52      }
   10.53 @@ -1631,7 +1622,7 @@ static void setup_dom0_devices(struct do
   10.54                  pdev = xmalloc(struct pci_dev);
   10.55                  pdev->bus = bus;
   10.56                  pdev->devfn = PCI_DEVFN(dev, func);
   10.57 -                list_add_tail(&pdev->list, &hd->pdev_list);
   10.58 +                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
   10.59  
   10.60                  drhd = acpi_find_matched_drhd_unit(pdev);
   10.61                  ret = domain_context_mapping(d, drhd->iommu, pdev);
    11.1 --- a/xen/include/asm-x86/domain.h	Fri Jul 04 17:50:31 2008 +0100
    11.2 +++ b/xen/include/asm-x86/domain.h	Fri Jul 04 17:51:16 2008 +0100
    11.3 @@ -228,6 +228,7 @@ struct arch_domain
    11.4      struct rangeset *ioport_caps;
    11.5      uint32_t pci_cf8;
    11.6  
    11.7 +    struct list_head pdev_list;
    11.8      struct hvm_domain hvm_domain;
    11.9  
   11.10      struct paging_domain paging;
   11.11 @@ -266,6 +267,9 @@ struct arch_domain
   11.12      cpuid_input_t cpuids[MAX_CPUID_INPUT];
   11.13  } __cacheline_aligned;
   11.14  
   11.15 +#define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
   11.16 +
   11.17 +
   11.18  #ifdef __i386__
   11.19  struct pae_l3_cache {
   11.20      /*
    12.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Jul 04 17:50:31 2008 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Jul 04 17:51:16 2008 +0100
    12.3 @@ -28,10 +28,6 @@
    12.4      list_for_each_entry(amd_iommu, \
    12.5          &amd_iommu_head, list)
    12.6  
    12.7 -#define for_each_pdev(domain, pdev) \
    12.8 -    list_for_each_entry(pdev, \
    12.9 -         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
   12.10 -
   12.11  #define DMA_32BIT_MASK  0x00000000ffffffffULL
   12.12  #define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
   12.13  
    13.1 --- a/xen/include/xen/hvm/iommu.h	Fri Jul 04 17:50:31 2008 +0100
    13.2 +++ b/xen/include/xen/hvm/iommu.h	Fri Jul 04 17:51:16 2008 +0100
    13.3 @@ -36,8 +36,6 @@ struct g2m_ioport {
    13.4  };
    13.5  
    13.6  struct hvm_iommu {
    13.7 -    spinlock_t iommu_list_lock;    /* protect iommu specific lists */
    13.8 -    struct list_head pdev_list;    /* direct accessed pci devices */
    13.9      u64 pgd_maddr;                 /* io page directory machine address */
   13.10      spinlock_t mapping_lock;       /* io page table lock */
   13.11      int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
   13.12 @@ -55,7 +53,4 @@ struct hvm_iommu {
   13.13      struct iommu_ops *platform_ops;
   13.14  };
   13.15  
   13.16 -#define has_iommu_pdevs(domain) \
   13.17 -    (!list_empty(&(domain->arch.hvm_domain.hvm_iommu.pdev_list)))
   13.18 -
   13.19  #endif /* __ASM_X86_HVM_IOMMU_H__ */
    14.1 --- a/xen/include/xen/pci.h	Fri Jul 04 17:50:31 2008 +0100
    14.2 +++ b/xen/include/xen/pci.h	Fri Jul 04 17:51:16 2008 +0100
    14.3 @@ -25,7 +25,7 @@
    14.4  #define PCI_FUNC(devfn)       ((devfn) & 0x07)
    14.5  
    14.6  struct pci_dev {
    14.7 -    struct list_head list;
    14.8 +    struct list_head domain_list;
    14.9      struct list_head msi_dev_list;
   14.10      u8 bus;
   14.11      u8 devfn;
   14.12 @@ -50,4 +50,9 @@ void pci_conf_write32(
   14.13  int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap);
   14.14  int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap);
   14.15  
   14.16 +
   14.17 +#define for_each_pdev(domain, pdev) \
   14.18 +    list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list)
   14.19 +
   14.20 +
   14.21  #endif /* __XEN_PCI_H__ */