ia64/xen-unstable

changeset 18663:2a25fd94c6f2

VT-d: correct allocation failure checks

Checking the return value of map_domain_page() (and hence
map_vtd_domain_page()) against NULL is pointless, checking the return
value of alloc_domheap_page() (and thus alloc_pgtable_maddr()) is
mandatory, however.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 15:19:39 2008 +0100 (2008-10-20)
parents bf84c03c38ee
children c4be040bef6f
files xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/x86/vtd.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Mon Oct 20 15:18:09 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Mon Oct 20 15:19:39 2008 +0100
     1.3 @@ -472,7 +472,7 @@ int intremap_setup(struct iommu *iommu)
     1.4          {
     1.5              dprintk(XENLOG_WARNING VTDPREFIX,
     1.6                      "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
     1.7 -            return -ENODEV;
     1.8 +            return -ENOMEM;
     1.9          }
    1.10          ir_ctrl->iremap_index = -1;
    1.11      }
     2.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Mon Oct 20 15:18:09 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Mon Oct 20 15:19:39 2008 +0100
     2.3 @@ -219,10 +219,10 @@ static u64 addr_to_dma_page_maddr(struct
     2.4              if ( !alloc )
     2.5                  break;
     2.6              maddr = alloc_pgtable_maddr();
     2.7 +            if ( !maddr )
     2.8 +                break;
     2.9              dma_set_pte_addr(*pte, maddr);
    2.10              vaddr = map_vtd_domain_page(maddr);
    2.11 -            if ( !vaddr )
    2.12 -                break;
    2.13  
    2.14              /*
    2.15               * high level table always sets r/w, last level
    2.16 @@ -235,8 +235,6 @@ static u64 addr_to_dma_page_maddr(struct
    2.17          else
    2.18          {
    2.19              vaddr = map_vtd_domain_page(pte->val);
    2.20 -            if ( !vaddr )
    2.21 -                break;
    2.22          }
    2.23  
    2.24          if ( level == 2 )
     3.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Mon Oct 20 15:18:09 2008 +0100
     3.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Mon Oct 20 15:19:39 2008 +0100
     3.3 @@ -428,7 +428,11 @@ int qinval_setup(struct iommu *iommu)
     3.4      {
     3.5          qi_ctrl->qinval_maddr = alloc_pgtable_maddr();
     3.6          if ( qi_ctrl->qinval_maddr == 0 )
     3.7 -            panic("Cannot allocate memory for qi_ctrl->qinval_maddr\n");
     3.8 +        {
     3.9 +            dprintk(XENLOG_WARNING VTDPREFIX,
    3.10 +                    "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
    3.11 +            return -ENOMEM;
    3.12 +        }
    3.13          flush->context = flush_context_qi;
    3.14          flush->iotlb = flush_iotlb_qi;
    3.15      }
     4.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Mon Oct 20 15:18:09 2008 +0100
     4.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Mon Oct 20 15:19:39 2008 +0100
     4.3 @@ -41,17 +41,19 @@ u64 alloc_pgtable_maddr(void)
     4.4  {
     4.5      struct page_info *pg;
     4.6      u64 *vaddr;
     4.7 +    unsigned long mfn;
     4.8  
     4.9      pg = alloc_domheap_page(NULL, 0);
    4.10 -    vaddr = map_domain_page(page_to_mfn(pg));
    4.11 -    if ( !vaddr )
    4.12 +    if ( !pg )
    4.13          return 0;
    4.14 +    mfn = page_to_mfn(pg);
    4.15 +    vaddr = map_domain_page(mfn);
    4.16      memset(vaddr, 0, PAGE_SIZE);
    4.17  
    4.18      iommu_flush_cache_page(vaddr);
    4.19      unmap_domain_page(vaddr);
    4.20  
    4.21 -    return page_to_maddr(pg);
    4.22 +    return (u64)mfn << PAGE_SHIFT_4K;
    4.23  }
    4.24  
    4.25  void free_pgtable_maddr(u64 maddr)