ia64/xen-unstable

changeset 19187:1eb6afcad849

vtd: adding support for multiple queued invalidation pages

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 09 14:23:51 2009 +0000 (2009-02-09)
parents 1d4ce9e31fa0
children 09ea7eea8122
files xen/drivers/passthrough/vtd/ia64/vtd.c xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/iommu.h xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/vtd.h xen/drivers/passthrough/vtd/x86/vtd.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/ia64/vtd.c	Mon Feb 09 14:22:07 2009 +0000
     1.2 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c	Mon Feb 09 14:23:51 2009 +0000
     1.3 @@ -45,16 +45,17 @@ void unmap_vtd_domain_page(void *va)
     1.4  }
     1.5  
     1.6  /* Allocate page table, return its machine address */
     1.7 -u64 alloc_pgtable_maddr(struct domain *d)
     1.8 +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
     1.9  {
    1.10      struct page_info *pg;
    1.11      u64 *vaddr;
    1.12  
    1.13 -    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
    1.14 +    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
    1.15 +                             d ? MEMF_node(domain_to_node(d)) : 0);
    1.16      vaddr = map_domain_page(page_to_mfn(pg));
    1.17      if ( !vaddr )
    1.18          return 0;
    1.19 -    memset(vaddr, 0, PAGE_SIZE);
    1.20 +    memset(vaddr, 0, PAGE_SIZE * npages);
    1.21  
    1.22      iommu_flush_cache_page(vaddr);
    1.23      unmap_domain_page(vaddr);
     2.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Mon Feb 09 14:22:07 2009 +0000
     2.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Mon Feb 09 14:23:51 2009 +0000
     2.3 @@ -502,7 +502,7 @@ int intremap_setup(struct iommu *iommu)
     2.4      ir_ctrl = iommu_ir_ctrl(iommu);
     2.5      if ( ir_ctrl->iremap_maddr == 0 )
     2.6      {
     2.7 -        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL);
     2.8 +        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
     2.9          if ( ir_ctrl->iremap_maddr == 0 )
    2.10          {
    2.11              dprintk(XENLOG_WARNING VTDPREFIX,
     3.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Mon Feb 09 14:22:07 2009 +0000
     3.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Mon Feb 09 14:23:51 2009 +0000
     3.3 @@ -129,9 +129,9 @@ void iommu_flush_cache_entry(void *addr)
     3.4      __iommu_flush_cache(addr, 8);
     3.5  }
     3.6  
     3.7 -void iommu_flush_cache_page(void *addr)
     3.8 +void iommu_flush_cache_page(void *addr, unsigned long npages)
     3.9  {
    3.10 -    __iommu_flush_cache(addr, PAGE_SIZE_4K);
    3.11 +    __iommu_flush_cache(addr, PAGE_SIZE_4K * npages);
    3.12  }
    3.13  
    3.14  int nr_iommus;
    3.15 @@ -146,7 +146,7 @@ static u64 bus_to_context_maddr(struct i
    3.16      root = &root_entries[bus];
    3.17      if ( !root_present(*root) )
    3.18      {
    3.19 -        maddr = alloc_pgtable_maddr(NULL);
    3.20 +        maddr = alloc_pgtable_maddr(NULL, 1);
    3.21          if ( maddr == 0 )
    3.22          {
    3.23              unmap_vtd_domain_page(root_entries);
    3.24 @@ -174,7 +174,7 @@ static u64 addr_to_dma_page_maddr(struct
    3.25      addr &= (((u64)1) << addr_width) - 1;
    3.26      ASSERT(spin_is_locked(&hd->mapping_lock));
    3.27      if ( hd->pgd_maddr == 0 )
    3.28 -        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain)) == 0) )
    3.29 +        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain, 1)) == 0) )
    3.30              goto out;
    3.31  
    3.32      parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
    3.33 @@ -187,7 +187,7 @@ static u64 addr_to_dma_page_maddr(struct
    3.34          {
    3.35              if ( !alloc )
    3.36                  break;
    3.37 -            maddr = alloc_pgtable_maddr(domain);
    3.38 +            maddr = alloc_pgtable_maddr(domain, 1);
    3.39              if ( !maddr )
    3.40                  break;
    3.41              dma_set_pte_addr(*pte, maddr);
    3.42 @@ -577,7 +577,7 @@ static int iommu_set_root_entry(struct i
    3.43      spin_lock(&iommu->lock);
    3.44  
    3.45      if ( iommu->root_maddr == 0 )
    3.46 -        iommu->root_maddr = alloc_pgtable_maddr(NULL);
    3.47 +        iommu->root_maddr = alloc_pgtable_maddr(NULL, 1);
    3.48      if ( iommu->root_maddr == 0 )
    3.49      {
    3.50          spin_unlock(&iommu->lock);
     4.1 --- a/xen/drivers/passthrough/vtd/iommu.h	Mon Feb 09 14:22:07 2009 +0000
     4.2 +++ b/xen/drivers/passthrough/vtd/iommu.h	Mon Feb 09 14:23:51 2009 +0000
     4.3 @@ -397,7 +397,9 @@ struct poll_info {
     4.4      u32 udata;
     4.5  };
     4.6  
     4.7 -#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
     4.8 +#define MAX_QINVAL_PAGES 8
     4.9 +#define NUM_QINVAL_PAGES 1
    4.10 +#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct qinval_entry))
    4.11  #define qinval_present(v) ((v).lo & 1)
    4.12  #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
    4.13  
     5.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Mon Feb 09 14:22:07 2009 +0000
     5.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Mon Feb 09 14:23:51 2009 +0000
     5.3 @@ -427,7 +427,7 @@ int qinval_setup(struct iommu *iommu)
     5.4  
     5.5      if ( qi_ctrl->qinval_maddr == 0 )
     5.6      {
     5.7 -        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL);
     5.8 +        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
     5.9          if ( qi_ctrl->qinval_maddr == 0 )
    5.10          {
    5.11              dprintk(XENLOG_WARNING VTDPREFIX,
    5.12 @@ -445,6 +445,8 @@ int qinval_setup(struct iommu *iommu)
    5.13       * registers are automatically reset to 0 with write
    5.14       * to IQA register.
    5.15       */
    5.16 +    if ( NUM_QINVAL_PAGES <= MAX_QINVAL_PAGES )
    5.17 +        qi_ctrl->qinval_maddr |= NUM_QINVAL_PAGES - 1;
    5.18      dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
    5.19  
    5.20      /* enable queued invalidation hardware */
     6.1 --- a/xen/drivers/passthrough/vtd/vtd.h	Mon Feb 09 14:22:07 2009 +0000
     6.2 +++ b/xen/drivers/passthrough/vtd/vtd.h	Mon Feb 09 14:23:51 2009 +0000
     6.3 @@ -101,12 +101,12 @@ unsigned int get_cache_line_size(void);
     6.4  void cacheline_flush(char *);
     6.5  void flush_all_cache(void);
     6.6  void *map_to_nocache_virt(int nr_iommus, u64 maddr);
     6.7 -u64 alloc_pgtable_maddr(struct domain *d);
     6.8 +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages);
     6.9  void free_pgtable_maddr(u64 maddr);
    6.10  void *map_vtd_domain_page(u64 maddr);
    6.11  void unmap_vtd_domain_page(void *va);
    6.12  
    6.13  void iommu_flush_cache_entry(void *addr);
    6.14 -void iommu_flush_cache_page(void *addr);
    6.15 +void iommu_flush_cache_page(void *addr, unsigned long npages);
    6.16  
    6.17  #endif // _VTD_H_
     7.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Mon Feb 09 14:22:07 2009 +0000
     7.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Mon Feb 09 14:23:51 2009 +0000
     7.3 @@ -38,20 +38,21 @@ void unmap_vtd_domain_page(void *va)
     7.4  }
     7.5  
     7.6  /* Allocate page table, return its machine address */
     7.7 -u64 alloc_pgtable_maddr(struct domain *d)
     7.8 +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
     7.9  {
    7.10      struct page_info *pg;
    7.11      u64 *vaddr;
    7.12      unsigned long mfn;
    7.13  
    7.14 -    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
    7.15 +    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
    7.16 +                             d ? MEMF_node(domain_to_node(d)) : 0);
    7.17      if ( !pg )
    7.18          return 0;
    7.19      mfn = page_to_mfn(pg);
    7.20      vaddr = map_domain_page(mfn);
    7.21 -    memset(vaddr, 0, PAGE_SIZE);
    7.22 +    memset(vaddr, 0, PAGE_SIZE * npages);
    7.23  
    7.24 -    iommu_flush_cache_page(vaddr);
    7.25 +    iommu_flush_cache_page(vaddr, npages);
    7.26      unmap_domain_page(vaddr);
    7.27  
    7.28      return (u64)mfn << PAGE_SHIFT_4K;