ia64/xen-unstable

changeset 15949:7bd5b1f55308

vtd: cleanups to iommu code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Sep 20 15:41:22 2007 +0100 (2007-09-20)
parents 2477e94450aa
children 35893e27bdeb
files xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Thu Sep 20 14:15:45 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Thu Sep 20 15:41:22 2007 +0100
     1.3 @@ -38,7 +38,7 @@
     1.4  #define VTDPREFIX
     1.5  extern void print_iommu_regs(struct acpi_drhd_unit *drhd);
     1.6  extern void print_vtd_entries(struct domain *d, int bus, int devfn,
     1.7 -                       unsigned long gmfn);
     1.8 +                              unsigned long gmfn);
     1.9  
    1.10  #define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
    1.11  
    1.12 @@ -51,13 +51,13 @@ unsigned int x86_clflush_size;
    1.13  void clflush_cache_range(void *adr, int size)
    1.14  {
    1.15      int i;
    1.16 -    for (i = 0; i < size; i += x86_clflush_size)
    1.17 +    for ( i = 0; i < size; i += x86_clflush_size )
    1.18          clflush(adr + i);
    1.19  }
    1.20  
    1.21  static void __iommu_flush_cache(struct iommu *iommu, void *addr, int size)
    1.22  {
    1.23 -    if (!ecap_coherent(iommu->ecap))
    1.24 +    if ( !ecap_coherent(iommu->ecap) )
    1.25          clflush_cache_range(addr, size);
    1.26  }
    1.27  
    1.28 @@ -69,7 +69,7 @@ static void __iommu_flush_cache(struct i
    1.29  int nr_iommus;
    1.30  /* context entry handling */
    1.31  static struct context_entry * device_to_context_entry(struct iommu *iommu,
    1.32 -        u8 bus, u8 devfn)
    1.33 +                                                      u8 bus, u8 devfn)
    1.34  {
    1.35      struct root_entry *root;
    1.36      struct context_entry *context;
    1.37 @@ -78,9 +78,11 @@ static struct context_entry * device_to_
    1.38  
    1.39      spin_lock_irqsave(&iommu->lock, flags);
    1.40      root = &iommu->root_entry[bus];
    1.41 -    if (!root_present(*root)) {
    1.42 +    if ( !root_present(*root) )
    1.43 +    {
    1.44          phy_addr = (unsigned long) alloc_xenheap_page();
    1.45 -        if (!phy_addr) {
    1.46 +        if ( !phy_addr )
    1.47 +        {
    1.48              spin_unlock_irqrestore(&iommu->lock, flags);
    1.49              return NULL;
    1.50          }
    1.51 @@ -107,14 +109,15 @@ static int device_context_mapped(struct 
    1.52  
    1.53      spin_lock_irqsave(&iommu->lock, flags);
    1.54      root = &iommu->root_entry[bus];
    1.55 -    if (!root_present(*root)) {
    1.56 +    if ( !root_present(*root) )
    1.57 +    {
    1.58          ret = 0;
    1.59          goto out;
    1.60      }
    1.61      phy_addr = get_context_addr(*root);
    1.62      context = (struct context_entry *)maddr_to_virt(phy_addr);
    1.63      ret = context_present(context[devfn]);
    1.64 -out:
    1.65 + out:
    1.66      spin_unlock_irqrestore(&iommu->lock, flags);
    1.67      return ret;
    1.68  }
    1.69 @@ -131,7 +134,7 @@ out:
    1.70  #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
    1.71  #define level_size(l) (1 << level_to_offset_bits(l))
    1.72  #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
    1.73 -static struct dma_pte * addr_to_dma_pte(struct domain *domain, u64 addr)
    1.74 +static struct dma_pte *addr_to_dma_pte(struct domain *domain, u64 addr)
    1.75  {
    1.76      struct hvm_iommu *hd = domain_hvm_iommu(domain);
    1.77      struct acpi_drhd_unit *drhd;
    1.78 @@ -147,41 +150,43 @@ static struct dma_pte * addr_to_dma_pte(
    1.79  
    1.80      addr &= (((u64)1) << addr_width) - 1;
    1.81      spin_lock_irqsave(&hd->mapping_lock, flags);
    1.82 -    if (!hd->pgd) {
    1.83 +    if ( !hd->pgd )
    1.84 +    {
    1.85          pgd = (struct dma_pte *)alloc_xenheap_page();
    1.86 -        if (!pgd && !hd->pgd) {
    1.87 +        if ( !pgd && !hd->pgd )
    1.88 +        {
    1.89              spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.90              return NULL;
    1.91          }
    1.92          memset((u8*)pgd, 0, PAGE_SIZE);
    1.93 -        if (!hd->pgd)
    1.94 +        if ( !hd->pgd )
    1.95              hd->pgd = pgd;
    1.96          else /* somebody is fast */
    1.97              free_xenheap_page((void *) pgd);
    1.98      }
    1.99      parent = hd->pgd;
   1.100 -    while (level > 0) {
   1.101 +    while ( level > 0 )
   1.102 +    {
   1.103          u8 *tmp;
   1.104          offset = address_level_offset(addr, level);
   1.105          pte = &parent[offset];
   1.106 -        if (level == 1)
   1.107 +        if ( level == 1 )
   1.108              break;
   1.109 -        if (dma_pte_addr(*pte) == 0) {
   1.110 +        if ( dma_pte_addr(*pte) == 0 )
   1.111 +        {
   1.112              tmp = alloc_xenheap_page();
   1.113 -            if (tmp == NULL)
   1.114 -                gdprintk(XENLOG_ERR VTDPREFIX,
   1.115 -                    "addr_to_dma_pte: tmp == NULL\n");
   1.116 - 
   1.117              memset(tmp, 0, PAGE_SIZE);
   1.118              iommu_flush_cache_page(iommu, tmp);
   1.119  
   1.120 -            if (!tmp && dma_pte_addr(*pte) == 0) {
   1.121 +            if ( !tmp && dma_pte_addr(*pte) == 0 )
   1.122 +            {
   1.123                  spin_unlock_irqrestore(&hd->mapping_lock, flags);
   1.124                  return NULL;
   1.125              }
   1.126 -            if (dma_pte_addr(*pte) == 0) {
   1.127 +            if ( dma_pte_addr(*pte) == 0 )
   1.128 +            {
   1.129                  dma_set_pte_addr(*pte,
   1.130 -                    virt_to_maddr(tmp));
   1.131 +                                 virt_to_maddr(tmp));
   1.132                  /*
   1.133                   * high level table always sets r/w, last level
   1.134                   * page table control read/write
   1.135 @@ -201,7 +206,7 @@ static struct dma_pte * addr_to_dma_pte(
   1.136  
   1.137  /* return address's pte at specific level */
   1.138  static struct dma_pte *dma_addr_level_pte(struct domain *domain, u64 addr,
   1.139 -        int level)
   1.140 +                                          int level)
   1.141  {
   1.142      struct hvm_iommu *hd = domain_hvm_iommu(domain);
   1.143      struct dma_pte *parent, *pte = NULL;
   1.144 @@ -209,13 +214,14 @@ static struct dma_pte *dma_addr_level_pt
   1.145      int offset;
   1.146  
   1.147      parent = hd->pgd;
   1.148 -    while (level <= total) {
   1.149 +    while ( level <= total )
   1.150 +    {
   1.151          offset = address_level_offset(addr, total);
   1.152          pte = &parent[offset];
   1.153 -        if (level == total)
   1.154 +        if ( level == total )
   1.155              return pte;
   1.156  
   1.157 -        if (dma_pte_addr(*pte) == 0)
   1.158 +        if ( dma_pte_addr(*pte) == 0 )
   1.159              break;
   1.160          parent = maddr_to_virt(dma_pte_addr(*pte));
   1.161          total--;
   1.162 @@ -225,245 +231,257 @@ static struct dma_pte *dma_addr_level_pt
   1.163  
   1.164  static void iommu_flush_write_buffer(struct iommu *iommu)
   1.165  {
   1.166 -	u32 val;
   1.167 -	unsigned long flag;
   1.168 -	unsigned long start_time;
   1.169 +    u32 val;
   1.170 +    unsigned long flag;
   1.171 +    unsigned long start_time;
   1.172  
   1.173 -	if (!cap_rwbf(iommu->cap))
   1.174 -		return;
   1.175 -	val = iommu->gcmd | DMA_GCMD_WBF;
   1.176 +    if ( !cap_rwbf(iommu->cap) )
   1.177 +        return;
   1.178 +    val = iommu->gcmd | DMA_GCMD_WBF;
   1.179  
   1.180 -	spin_lock_irqsave(&iommu->register_lock, flag);
   1.181 -	dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
   1.182 +    spin_lock_irqsave(&iommu->register_lock, flag);
   1.183 +    dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
   1.184  
   1.185 -	/* Make sure hardware complete it */
   1.186 -	start_time = jiffies;
   1.187 -	while (1) {
   1.188 -		val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
   1.189 -		if (!(val & DMA_GSTS_WBFS))
   1.190 -			break;
   1.191 -		if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))
   1.192 -			panic("DMAR hardware is malfunctional, please disable IOMMU\n");
   1.193 -		cpu_relax();
   1.194 -	}
   1.195 -	spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.196 +    /* Make sure hardware complete it */
   1.197 +    start_time = jiffies;
   1.198 +    for ( ; ; )
   1.199 +    {
   1.200 +        val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
   1.201 +        if ( !(val & DMA_GSTS_WBFS) )
   1.202 +            break;
   1.203 +        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
   1.204 +            panic("DMAR hardware is malfunctional,"
   1.205 +                  " please disable IOMMU\n");
   1.206 +        cpu_relax();
   1.207 +    }
   1.208 +    spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.209  }
   1.210  
   1.211  /* return value determine if we need a write buffer flush */
   1.212 -static int __iommu_flush_context(struct iommu *iommu,
   1.213 -	u16 did, u16 source_id, u8 function_mask, u64 type,
   1.214 -	int non_present_entry_flush)
   1.215 +static int __iommu_flush_context(
   1.216 +    struct iommu *iommu,
   1.217 +    u16 did, u16 source_id, u8 function_mask, u64 type,
   1.218 +    int non_present_entry_flush)
   1.219  {
   1.220 -	u64 val = 0;
   1.221 -	unsigned long flag;
   1.222 -	unsigned long start_time;
   1.223 +    u64 val = 0;
   1.224 +    unsigned long flag;
   1.225 +    unsigned long start_time;
   1.226  
   1.227 -	/*
   1.228 -	 * In the non-present entry flush case, if hardware doesn't cache
   1.229 -	 * non-present entry we do nothing and if hardware cache non-present
   1.230 -	 * entry, we flush entries of domain 0 (the domain id is used to cache
   1.231 -	 * any non-present entries)
   1.232 -	 */
   1.233 -	if (non_present_entry_flush) {
   1.234 -		if (!cap_caching_mode(iommu->cap))
   1.235 -			return 1;
   1.236 -		else
   1.237 -			did = 0;
   1.238 -	}
   1.239 +    /*
   1.240 +     * In the non-present entry flush case, if hardware doesn't cache
   1.241 +     * non-present entry we do nothing and if hardware cache non-present
   1.242 +     * entry, we flush entries of domain 0 (the domain id is used to cache
   1.243 +     * any non-present entries)
   1.244 +     */
   1.245 +    if ( non_present_entry_flush )
   1.246 +    {
   1.247 +        if ( !cap_caching_mode(iommu->cap) )
   1.248 +            return 1;
   1.249 +        else
   1.250 +            did = 0;
   1.251 +    }
   1.252  
   1.253 -        /* use register invalidation */
   1.254 -        switch (type)
   1.255 -        {
   1.256 -            case DMA_CCMD_GLOBAL_INVL:
   1.257 -                val = DMA_CCMD_GLOBAL_INVL;
   1.258 -                break;
   1.259 -            case DMA_CCMD_DOMAIN_INVL:
   1.260 -                val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
   1.261 -                break;
   1.262 -            case DMA_CCMD_DEVICE_INVL:
   1.263 -                val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
   1.264 -                  |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);
   1.265 -                break;
   1.266 -            default:
   1.267 -                BUG();
   1.268 -        }
   1.269 -        val |= DMA_CCMD_ICC;
   1.270 +    /* use register invalidation */
   1.271 +    switch ( type )
   1.272 +    {
   1.273 +    case DMA_CCMD_GLOBAL_INVL:
   1.274 +        val = DMA_CCMD_GLOBAL_INVL;
   1.275 +        break;
   1.276 +    case DMA_CCMD_DOMAIN_INVL:
   1.277 +        val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
   1.278 +        break;
   1.279 +    case DMA_CCMD_DEVICE_INVL:
   1.280 +        val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
   1.281 +            |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);
   1.282 +        break;
   1.283 +    default:
   1.284 +        BUG();
   1.285 +    }
   1.286 +    val |= DMA_CCMD_ICC;
   1.287  
   1.288 -        spin_lock_irqsave(&iommu->register_lock, flag);
   1.289 -        dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
   1.290 +    spin_lock_irqsave(&iommu->register_lock, flag);
   1.291 +    dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
   1.292  
   1.293 -        /* Make sure hardware complete it */
   1.294 -        start_time = jiffies;
   1.295 -        while (1) {
   1.296 -            val = dmar_readq(iommu->reg, DMAR_CCMD_REG);
   1.297 -            if (!(val & DMA_CCMD_ICC))
   1.298 -                break;
   1.299 -            if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))
   1.300 -                panic("DMAR hardware is malfunctional, please disable IOMMU\n");
   1.301 -            cpu_relax();
   1.302 -        }
   1.303 -        spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.304 -	/* flush context entry will implictly flush write buffer */
   1.305 -	return 0;
   1.306 +    /* Make sure hardware complete it */
   1.307 +    start_time = jiffies;
   1.308 +    for ( ; ; )
   1.309 +    {
   1.310 +        val = dmar_readq(iommu->reg, DMAR_CCMD_REG);
   1.311 +        if ( !(val & DMA_CCMD_ICC) )
   1.312 +            break;
   1.313 +        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
   1.314 +            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
   1.315 +        cpu_relax();
   1.316 +    }
   1.317 +    spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.318 +    /* flush context entry will implictly flush write buffer */
   1.319 +    return 0;
   1.320  }
   1.321  
   1.322 -static int inline iommu_flush_context_global(struct iommu *iommu,
   1.323 -	int non_present_entry_flush)
   1.324 +static int inline iommu_flush_context_global(
   1.325 +    struct iommu *iommu, int non_present_entry_flush)
   1.326  {
   1.327 -	return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
   1.328 -		non_present_entry_flush);
   1.329 +    return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
   1.330 +                                 non_present_entry_flush);
   1.331  }
   1.332  
   1.333 -static int inline iommu_flush_context_domain(struct iommu *iommu, u16 did,
   1.334 -	int non_present_entry_flush)
   1.335 +static int inline iommu_flush_context_domain(
   1.336 +    struct iommu *iommu, u16 did, int non_present_entry_flush)
   1.337  {
   1.338 -	return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
   1.339 -		non_present_entry_flush);
   1.340 +    return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
   1.341 +                                 non_present_entry_flush);
   1.342  }
   1.343  
   1.344 -static int inline iommu_flush_context_device(struct iommu *iommu,
   1.345 -	u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
   1.346 +static int inline iommu_flush_context_device(
   1.347 +    struct iommu *iommu, u16 did, u16 source_id,
   1.348 +    u8 function_mask, int non_present_entry_flush)
   1.349  {
   1.350 -	return __iommu_flush_context(iommu, did, source_id, function_mask,
   1.351 -		DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
   1.352 +    return __iommu_flush_context(iommu, did, source_id, function_mask,
   1.353 +                                 DMA_CCMD_DEVICE_INVL,
   1.354 +                                 non_present_entry_flush);
   1.355  }
   1.356  
   1.357  /* return value determine if we need a write buffer flush */
   1.358  static int __iommu_flush_iotlb(struct iommu *iommu, u16 did,
   1.359 -	u64 addr, unsigned int size_order, u64 type,
   1.360 -	int non_present_entry_flush)
   1.361 +                               u64 addr, unsigned int size_order, u64 type,
   1.362 +                               int non_present_entry_flush)
   1.363  {
   1.364 -	int tlb_offset = ecap_iotlb_offset(iommu->ecap);
   1.365 -	u64 val = 0, val_iva = 0;
   1.366 -	unsigned long flag;
   1.367 -	unsigned long start_time;
   1.368 +    int tlb_offset = ecap_iotlb_offset(iommu->ecap);
   1.369 +    u64 val = 0, val_iva = 0;
   1.370 +    unsigned long flag;
   1.371 +    unsigned long start_time;
   1.372  
   1.373 -	/*
   1.374 -	 * In the non-present entry flush case, if hardware doesn't cache
   1.375 -	 * non-present entry we do nothing and if hardware cache non-present
   1.376 -	 * entry, we flush entries of domain 0 (the domain id is used to cache
   1.377 -	 * any non-present entries)
   1.378 -	 */
   1.379 -	if (non_present_entry_flush) {
   1.380 -		if (!cap_caching_mode(iommu->cap))
   1.381 -			return 1;
   1.382 -		else
   1.383 -			did = 0;
   1.384 -	}
   1.385 +    /*
   1.386 +     * In the non-present entry flush case, if hardware doesn't cache
   1.387 +     * non-present entry we do nothing and if hardware cache non-present
   1.388 +     * entry, we flush entries of domain 0 (the domain id is used to cache
   1.389 +     * any non-present entries)
   1.390 +     */
   1.391 +    if ( non_present_entry_flush )
   1.392 +    {
   1.393 +        if ( !cap_caching_mode(iommu->cap) )
   1.394 +            return 1;
   1.395 +        else
   1.396 +            did = 0;
   1.397 +    }
   1.398  
   1.399 -        /* use register invalidation */
   1.400 -        switch (type) {
   1.401 -            case DMA_TLB_GLOBAL_FLUSH:
   1.402 -                /* global flush doesn't need set IVA_REG */
   1.403 -                val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
   1.404 -                break;
   1.405 -            case DMA_TLB_DSI_FLUSH:
   1.406 -                val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
   1.407 -                break;
   1.408 -            case DMA_TLB_PSI_FLUSH:
   1.409 -                val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
   1.410 -                /* Note: always flush non-leaf currently */
   1.411 -                val_iva = size_order | addr;
   1.412 -                break;
   1.413 -            default:
   1.414 -                BUG();
   1.415 -        }
   1.416 -        /* Note: set drain read/write */
   1.417 +    /* use register invalidation */
   1.418 +    switch ( type )
   1.419 +    {
   1.420 +    case DMA_TLB_GLOBAL_FLUSH:
   1.421 +        /* global flush doesn't need set IVA_REG */
   1.422 +        val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
   1.423 +        break;
   1.424 +    case DMA_TLB_DSI_FLUSH:
   1.425 +        val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
   1.426 +        break;
   1.427 +    case DMA_TLB_PSI_FLUSH:
   1.428 +        val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
   1.429 +        /* Note: always flush non-leaf currently */
   1.430 +        val_iva = size_order | addr;
   1.431 +        break;
   1.432 +    default:
   1.433 +        BUG();
   1.434 +    }
   1.435 +    /* Note: set drain read/write */
   1.436  #if 0
   1.437 -        /*
   1.438 -         * This is probably to be super secure.. Looks like we can
   1.439 -         * ignore it without any impact.
   1.440 -         */
   1.441 -        if (cap_read_drain(iommu->cap))
   1.442 -            val |= DMA_TLB_READ_DRAIN;
   1.443 +    /*
   1.444 +     * This is probably to be super secure.. Looks like we can
   1.445 +     * ignore it without any impact.
   1.446 +     */
   1.447 +    if ( cap_read_drain(iommu->cap) )
   1.448 +        val |= DMA_TLB_READ_DRAIN;
   1.449  #endif
   1.450 -        if (cap_write_drain(iommu->cap))
   1.451 -            val |= DMA_TLB_WRITE_DRAIN;
   1.452 +    if ( cap_write_drain(iommu->cap) )
   1.453 +        val |= DMA_TLB_WRITE_DRAIN;
   1.454  
   1.455 -        spin_lock_irqsave(&iommu->register_lock, flag);
   1.456 -        /* Note: Only uses first TLB reg currently */
   1.457 -        if (val_iva)
   1.458 -            dmar_writeq(iommu->reg, tlb_offset, val_iva);
   1.459 -        dmar_writeq(iommu->reg, tlb_offset + 8, val);
   1.460 +    spin_lock_irqsave(&iommu->register_lock, flag);
   1.461 +    /* Note: Only uses first TLB reg currently */
   1.462 +    if ( val_iva )
   1.463 +        dmar_writeq(iommu->reg, tlb_offset, val_iva);
   1.464 +    dmar_writeq(iommu->reg, tlb_offset + 8, val);
   1.465  
   1.466 -        /* Make sure hardware complete it */
   1.467 -        start_time = jiffies;
   1.468 -        while (1) {
   1.469 -            val = dmar_readq(iommu->reg, tlb_offset + 8);
   1.470 -            if (!(val & DMA_TLB_IVT))
   1.471 -                break;
   1.472 -            if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))
   1.473 -                panic("DMAR hardware is malfunctional, please disable IOMMU\n");
   1.474 -            cpu_relax();
   1.475 -        }
   1.476 -        spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.477 +    /* Make sure hardware complete it */
   1.478 +    start_time = jiffies;
   1.479 +    for ( ; ; )
   1.480 +    {
   1.481 +        val = dmar_readq(iommu->reg, tlb_offset + 8);
   1.482 +        if ( !(val & DMA_TLB_IVT) )
   1.483 +            break;
   1.484 +        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
   1.485 +            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
   1.486 +        cpu_relax();
   1.487 +    }
   1.488 +    spin_unlock_irqrestore(&iommu->register_lock, flag);
   1.489  
   1.490 -        /* check IOTLB invalidation granularity */
   1.491 -        if (DMA_TLB_IAIG(val) == 0)
   1.492 -            printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
   1.493 -        if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
   1.494 -            printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
   1.495 -              (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
   1.496 -	/* flush context entry will implictly flush write buffer */
   1.497 -	return 0;
   1.498 +    /* check IOTLB invalidation granularity */
   1.499 +    if ( DMA_TLB_IAIG(val) == 0 )
   1.500 +        printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
   1.501 +    if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
   1.502 +        printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
   1.503 +               (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
   1.504 +    /* flush context entry will implictly flush write buffer */
   1.505 +    return 0;
   1.506  }
   1.507  
   1.508  static int inline iommu_flush_iotlb_global(struct iommu *iommu,
   1.509 -	int non_present_entry_flush)
   1.510 +                                           int non_present_entry_flush)
   1.511  {
   1.512 -	return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
   1.513 -		non_present_entry_flush);
   1.514 +    return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
   1.515 +                               non_present_entry_flush);
   1.516  }
   1.517  
   1.518  static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
   1.519 -	int non_present_entry_flush)
   1.520 +                                        int non_present_entry_flush)
   1.521  {
   1.522 -	return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
   1.523 -		non_present_entry_flush);
   1.524 +    return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
   1.525 +                               non_present_entry_flush);
   1.526  }
   1.527  
   1.528  static int inline get_alignment(u64 base, unsigned int size)
   1.529  {
   1.530 -	int t = 0;
   1.531 -	u64 end;
   1.532 +    int t = 0;
   1.533 +    u64 end;
   1.534  
   1.535 -	end = base + size - 1;
   1.536 -	while (base != end) {
   1.537 -		t++;
   1.538 -		base >>= 1;
   1.539 -		end >>= 1;
   1.540 -	}
   1.541 -	return t;
   1.542 +    end = base + size - 1;
   1.543 +    while ( base != end )
   1.544 +    {
   1.545 +        t++;
   1.546 +        base >>= 1;
   1.547 +        end >>= 1;
   1.548 +    }
   1.549 +    return t;
   1.550  }
   1.551  
   1.552 -static int inline iommu_flush_iotlb_psi(struct iommu *iommu, u16 did,
   1.553 -	u64 addr, unsigned int pages, int non_present_entry_flush)
   1.554 +static int inline iommu_flush_iotlb_psi(
   1.555 +    struct iommu *iommu, u16 did,
   1.556 +    u64 addr, unsigned int pages, int non_present_entry_flush)
   1.557  {
   1.558 -	unsigned int align;
   1.559 +    unsigned int align;
   1.560  
   1.561 -	BUG_ON(addr & (~PAGE_MASK_4K));
   1.562 -	BUG_ON(pages == 0);
   1.563 +    BUG_ON(addr & (~PAGE_MASK_4K));
   1.564 +    BUG_ON(pages == 0);
   1.565  
   1.566 -	/* Fallback to domain selective flush if no PSI support */
   1.567 -	if (!cap_pgsel_inv(iommu->cap))
   1.568 -		return iommu_flush_iotlb_dsi(iommu, did,
   1.569 -			non_present_entry_flush);
   1.570 +    /* Fallback to domain selective flush if no PSI support */
   1.571 +    if ( !cap_pgsel_inv(iommu->cap) )
   1.572 +        return iommu_flush_iotlb_dsi(iommu, did,
   1.573 +                                     non_present_entry_flush);
   1.574  
   1.575 -	/*
   1.576 -	 * PSI requires page size is 2 ^ x, and the base address is naturally
   1.577 -	 * aligned to the size
   1.578 -	 */
   1.579 -	align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
   1.580 -	/* Fallback to domain selective flush if size is too big */
   1.581 -	if (align > cap_max_amask_val(iommu->cap))
   1.582 -		return iommu_flush_iotlb_dsi(iommu, did,
   1.583 -			non_present_entry_flush);
   1.584 +    /*
   1.585 +     * PSI requires page size is 2 ^ x, and the base address is naturally
   1.586 +     * aligned to the size
   1.587 +     */
   1.588 +    align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
   1.589 +    /* Fallback to domain selective flush if size is too big */
   1.590 +    if ( align > cap_max_amask_val(iommu->cap) )
   1.591 +        return iommu_flush_iotlb_dsi(iommu, did,
   1.592 +                                     non_present_entry_flush);
   1.593  
   1.594 -	addr >>= PAGE_SHIFT_4K + align;
   1.595 -	addr <<= PAGE_SHIFT_4K + align;
   1.596 +    addr >>= PAGE_SHIFT_4K + align;
   1.597 +    addr <<= PAGE_SHIFT_4K + align;
   1.598  
   1.599 -	return __iommu_flush_iotlb(iommu, did, addr, align,
   1.600 -		DMA_TLB_PSI_FLUSH, non_present_entry_flush);
   1.601 +    return __iommu_flush_iotlb(iommu, did, addr, align,
   1.602 +                               DMA_TLB_PSI_FLUSH, non_present_entry_flush);
   1.603  }
   1.604  
   1.605  void flush_all(void)
   1.606 @@ -473,7 +491,8 @@ void flush_all(void)
   1.607      int i = 0;
   1.608  
   1.609      wbinvd();
   1.610 -    for_each_drhd_unit(drhd) {
   1.611 +    for_each_drhd_unit ( drhd )
   1.612 +    {
   1.613          iommu = drhd->iommu;
   1.614          iommu_flush_context_global(iommu, 0);
   1.615          iommu_flush_iotlb_global(iommu, 0);
   1.616 @@ -493,16 +512,16 @@ static void dma_pte_clear_one(struct dom
   1.617      /* get last level pte */
   1.618      pte = dma_addr_level_pte(domain, addr, 1);
   1.619  
   1.620 -    if (pte) {
   1.621 +    if ( pte )
   1.622 +    {
   1.623          dma_clear_pte(*pte);
   1.624          iommu_flush_cache_entry(drhd->iommu, pte);
   1.625  
   1.626 -        for_each_drhd_unit(drhd) {
   1.627 +        for_each_drhd_unit ( drhd )
   1.628 +        {
   1.629              iommu = drhd->iommu;
   1.630 -            if (cap_caching_mode(iommu->cap))
   1.631 -            {
   1.632 +            if ( cap_caching_mode(iommu->cap) )
   1.633                  iommu_flush_iotlb_psi(iommu, domain->domain_id, addr, 1, 0);
   1.634 -            }
   1.635              else if (cap_rwbf(iommu->cap))
   1.636                  iommu_flush_write_buffer(iommu);
   1.637          }
   1.638 @@ -522,14 +541,14 @@ static void dma_pte_clear_range(struct d
   1.639      end &= PAGE_MASK_4K;
   1.640  
   1.641      /* we don't need lock here, nobody else touches the iova range */
   1.642 -    while (start < end) {
   1.643 +    while ( start < end )
   1.644 +    {
   1.645          dma_pte_clear_one(domain, start);
   1.646          start += PAGE_SIZE_4K;
   1.647      }
   1.648  }
   1.649  
   1.650  /* free page table pages. last level pte should already be cleared */
   1.651 -// static void dma_pte_free_pagetable(struct domain *domain, u64 start, u64 end)
   1.652  void dma_pte_free_pagetable(struct domain *domain, u64 start, u64 end)
   1.653  {
   1.654      struct acpi_drhd_unit *drhd;
   1.655 @@ -549,14 +568,17 @@ void dma_pte_free_pagetable(struct domai
   1.656  
   1.657      /* we don't need lock here, nobody else touches the iova range */
   1.658      level = 2;
   1.659 -    while (level <= total) {
   1.660 +    while ( level <= total )
   1.661 +    {
   1.662          tmp = align_to_level(start, level);
   1.663 -        if (tmp >= end || (tmp + level_size(level) > end))
   1.664 +        if ( (tmp >= end) || ((tmp + level_size(level)) > end) )
   1.665              return;
   1.666  
   1.667 -        while (tmp < end) {
   1.668 +        while ( tmp < end )
   1.669 +        {
   1.670              pte = dma_addr_level_pte(domain, tmp, level);
   1.671 -            if (pte) {
   1.672 +            if ( pte )
   1.673 +            {
   1.674                  free_xenheap_page((void *) maddr_to_virt(dma_pte_addr(*pte)));
   1.675                  dma_clear_pte(*pte);
   1.676                  iommu_flush_cache_entry(iommu, pte);
   1.677 @@ -565,8 +587,10 @@ void dma_pte_free_pagetable(struct domai
   1.678          }
   1.679          level++;
   1.680      }
   1.681 +
   1.682      /* free pgd */
   1.683 -    if (start == 0 && end == ((((u64)1) << addr_width) - 1)) {
   1.684 +    if ( start == 0 && end == ((((u64)1) << addr_width) - 1) )
   1.685 +    {
   1.686          free_xenheap_page((void *)hd->pgd);
   1.687          hd->pgd = NULL;
   1.688      }
   1.689 @@ -580,43 +604,41 @@ static int iommu_set_root_entry(struct i
   1.690      struct root_entry *root;
   1.691      unsigned long flags;
   1.692  
   1.693 -    if (iommu == NULL)
   1.694 +    if ( iommu == NULL )
   1.695          gdprintk(XENLOG_ERR VTDPREFIX,
   1.696 -            "iommu_set_root_entry: iommu == NULL\n");
   1.697 +                 "iommu_set_root_entry: iommu == NULL\n");
   1.698  
   1.699 -    spin_lock_irqsave(&iommu->lock, flags);
   1.700 -    if (!iommu->root_entry) {
   1.701 -        spin_unlock_irqrestore(&iommu->lock, flags);
   1.702 +    if ( unlikely(!iommu->root_entry) )
   1.703 +    {
   1.704          root = (struct root_entry *)alloc_xenheap_page();
   1.705 +        if ( root == NULL )
   1.706 +            return -ENOMEM;
   1.707 +
   1.708          memset((u8*)root, 0, PAGE_SIZE);
   1.709          iommu_flush_cache_page(iommu, root);
   1.710 -        spin_lock_irqsave(&iommu->lock, flags);
   1.711  
   1.712 -        if (!root && !iommu->root_entry) {
   1.713 -            spin_unlock_irqrestore(&iommu->lock, flags);
   1.714 -            return -ENOMEM;
   1.715 -        }
   1.716 -
   1.717 -        if (!iommu->root_entry)
   1.718 -            iommu->root_entry = root;
   1.719 -        else /* somebody is fast */
   1.720 +        if ( cmpxchg((unsigned long *)&iommu->root_entry,
   1.721 +                     0, (unsigned long)root) != 0 )
   1.722              free_xenheap_page((void *)root);
   1.723      }
   1.724 -    spin_unlock_irqrestore(&iommu->lock, flags);
   1.725  
   1.726      addr = iommu->root_entry;
   1.727 +
   1.728      spin_lock_irqsave(&iommu->register_lock, flags);
   1.729 +
   1.730      dmar_writeq(iommu->reg, DMAR_RTADDR_REG, virt_to_maddr(addr));
   1.731      cmd = iommu->gcmd | DMA_GCMD_SRTP;
   1.732      dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
   1.733  
   1.734      /* Make sure hardware complete it */
   1.735 -    while (1) {
   1.736 +    for ( ; ; )
   1.737 +    {
   1.738          sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
   1.739 -        if (sts & DMA_GSTS_RTPS)
   1.740 +        if ( sts & DMA_GSTS_RTPS )
   1.741              break;
   1.742          cpu_relax();
   1.743      }
   1.744 +
   1.745      spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.746  
   1.747      return 0;
   1.748 @@ -628,16 +650,16 @@ static int iommu_enable_translation(stru
   1.749      unsigned long flags;
   1.750  
   1.751      dprintk(XENLOG_INFO VTDPREFIX,
   1.752 -        "iommu_enable_translation: enabling vt-d translation\n");
   1.753 +            "iommu_enable_translation: enabling vt-d translation\n");
   1.754      spin_lock_irqsave(&iommu->register_lock, flags);
   1.755      iommu->gcmd |= DMA_GCMD_TE;
   1.756      dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
   1.757      /* Make sure hardware complete it */
   1.758 -    while (1) {
   1.759 +    for ( ; ; )
   1.760 +    {
   1.761          sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
   1.762 -        if (sts & DMA_GSTS_TES) {
   1.763 +        if ( sts & DMA_GSTS_TES )
   1.764              break;
   1.765 -        }
   1.766          cpu_relax();
   1.767      }
   1.768      spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.769 @@ -654,10 +676,11 @@ int iommu_disable_translation(struct iom
   1.770      dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
   1.771  
   1.772      /* Make sure hardware complete it */
   1.773 -    while(1) {
   1.774 +    for ( ; ; )
   1.775 +    {
   1.776          sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
   1.777 -        if (!(sts & DMA_GSTS_TES))
   1.778 -                break;
   1.779 +        if ( !(sts & DMA_GSTS_TES) )
   1.780 +            break;
   1.781          cpu_relax();
   1.782      }
   1.783      spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.784 @@ -666,13 +689,13 @@ int iommu_disable_translation(struct iom
   1.785  
   1.786  static struct iommu *vector_to_iommu[NR_VECTORS];
   1.787  static int iommu_page_fault_do_one(struct iommu *iommu, int type,
   1.788 -        u8 fault_reason, u16 source_id, u32 addr)
   1.789 +                                   u8 fault_reason, u16 source_id, u32 addr)
   1.790  {
   1.791      dprintk(XENLOG_WARNING VTDPREFIX,
   1.792 -        "iommu_page_fault:%s: DEVICE %x:%x.%x addr %x REASON %x\n",
   1.793 -        (type ? "DMA Read" : "DMA Write"),
   1.794 -        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
   1.795 -        PCI_FUNC(source_id & 0xFF), addr, fault_reason);
   1.796 +            "iommu_page_fault:%s: DEVICE %x:%x.%x addr %x REASON %x\n",
   1.797 +            (type ? "DMA Read" : "DMA Write"),
   1.798 +            (source_id >> 8), PCI_SLOT(source_id & 0xFF),
   1.799 +            PCI_FUNC(source_id & 0xFF), addr, fault_reason);
   1.800  
   1.801      print_vtd_entries(current->domain, (source_id >> 8),(source_id & 0xff),
   1.802                        (addr >> PAGE_SHIFT)); 
   1.803 @@ -681,7 +704,7 @@ static int iommu_page_fault_do_one(struc
   1.804  
   1.805  #define PRIMARY_FAULT_REG_LEN (16)
   1.806  static void iommu_page_fault(int vector, void *dev_id,
   1.807 -        struct cpu_user_regs *regs)
   1.808 +                             struct cpu_user_regs *regs)
   1.809  {
   1.810      struct iommu *iommu = dev_id;
   1.811      int reg, fault_index;
   1.812 @@ -689,29 +712,30 @@ static void iommu_page_fault(int vector,
   1.813      unsigned long flags;
   1.814  
   1.815      dprintk(XENLOG_WARNING VTDPREFIX,
   1.816 -        "iommu_page_fault: iommu->reg = %p\n", iommu->reg);
   1.817 +            "iommu_page_fault: iommu->reg = %p\n", iommu->reg);
   1.818  
   1.819      spin_lock_irqsave(&iommu->register_lock, flags);
   1.820      fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG);
   1.821      spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.822  
   1.823      /* FIXME: ignore advanced fault log */
   1.824 -    if (!(fault_status & DMA_FSTS_PPF))
   1.825 +    if ( !(fault_status & DMA_FSTS_PPF) )
   1.826          return;
   1.827      fault_index = dma_fsts_fault_record_index(fault_status);
   1.828      reg = cap_fault_reg_offset(iommu->cap);
   1.829 -    while (1) {
   1.830 +    for ( ; ; )
   1.831 +    {
   1.832          u8 fault_reason;
   1.833          u16 source_id;
   1.834 -        u32 guest_addr;
   1.835 +        u32 guest_addr, data;
   1.836          int type;
   1.837 -        u32 data;
   1.838  
   1.839          /* highest 32 bits */
   1.840          spin_lock_irqsave(&iommu->register_lock, flags);
   1.841          data = dmar_readl(iommu->reg, reg +
   1.842 -                fault_index * PRIMARY_FAULT_REG_LEN + 12);
   1.843 -        if (!(data & DMA_FRCD_F)) {
   1.844 +                          fault_index * PRIMARY_FAULT_REG_LEN + 12);
   1.845 +        if ( !(data & DMA_FRCD_F) )
   1.846 +        {
   1.847              spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.848              break;
   1.849          }
   1.850 @@ -720,31 +744,32 @@ static void iommu_page_fault(int vector,
   1.851          type = dma_frcd_type(data);
   1.852  
   1.853          data = dmar_readl(iommu->reg, reg +
   1.854 -                fault_index * PRIMARY_FAULT_REG_LEN + 8);
   1.855 +                          fault_index * PRIMARY_FAULT_REG_LEN + 8);
   1.856          source_id = dma_frcd_source_id(data);
   1.857  
   1.858          guest_addr = dmar_readq(iommu->reg, reg +
   1.859 -                fault_index * PRIMARY_FAULT_REG_LEN);
   1.860 +                                fault_index * PRIMARY_FAULT_REG_LEN);
   1.861          guest_addr = dma_frcd_page_addr(guest_addr);
   1.862          /* clear the fault */
   1.863          dmar_writel(iommu->reg, reg +
   1.864 -            fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F);
   1.865 +                    fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F);
   1.866          spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.867  
   1.868          iommu_page_fault_do_one(iommu, type, fault_reason,
   1.869 -                source_id, guest_addr);
   1.870 +                                source_id, guest_addr);
   1.871  
   1.872          fault_index++;
   1.873 -        if (fault_index > cap_num_fault_regs(iommu->cap))
   1.874 +        if ( fault_index > cap_num_fault_regs(iommu->cap) )
   1.875              fault_index = 0;
   1.876      }
   1.877 +
   1.878      /* clear primary fault overflow */
   1.879 -    if (fault_status & DMA_FSTS_PFO) {
   1.880 +    if ( fault_status & DMA_FSTS_PFO )
   1.881 +    {
   1.882          spin_lock_irqsave(&iommu->register_lock, flags);
   1.883          dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
   1.884          spin_unlock_irqrestore(&iommu->register_lock, flags);
   1.885      }
   1.886 -    return;
   1.887  }
   1.888  
   1.889  static void dma_msi_unmask(unsigned int vector)
   1.890 @@ -840,14 +865,15 @@ int iommu_set_interrupt(struct iommu *io
   1.891      irq_vector[vector] = vector;
   1.892      vector_irq[vector] = vector;
   1.893  
   1.894 -    if (!vector) {
   1.895 +    if ( !vector )
   1.896 +    {
   1.897          gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
   1.898          return -EINVAL;
   1.899      }
   1.900  
   1.901      irq_desc[vector].handler = &dma_msi_type;
   1.902      ret = request_irq(vector, iommu_page_fault, 0, "dmar", iommu);
   1.903 -    if (ret)
   1.904 +    if ( ret )
   1.905          gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
   1.906      return vector;
   1.907  }
   1.908 @@ -857,25 +883,27 @@ struct iommu *iommu_alloc(void *hw_data)
   1.909      struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
   1.910      struct iommu *iommu;
   1.911      
   1.912 -    if (nr_iommus > MAX_IOMMUS) {
   1.913 +    if ( nr_iommus > MAX_IOMMUS )
   1.914 +    {
   1.915          gdprintk(XENLOG_ERR VTDPREFIX,
   1.916 -            "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus);
   1.917 +                 "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus);
   1.918          return NULL;
   1.919      }
   1.920 -        
   1.921 +
   1.922      iommu = xmalloc(struct iommu);
   1.923 -    if (!iommu)
   1.924 +    if ( !iommu )
   1.925          return NULL;
   1.926      memset(iommu, 0, sizeof(struct iommu));
   1.927  
   1.928      set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
   1.929      iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
   1.930      dprintk(XENLOG_INFO VTDPREFIX,
   1.931 -        "iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
   1.932 -        iommu->reg, drhd->address);
   1.933 +            "iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
   1.934 +            iommu->reg, drhd->address);
   1.935      nr_iommus++;
   1.936  
   1.937 -    if (!iommu->reg) {
   1.938 +    if ( !iommu->reg )
   1.939 +    {
   1.940          printk(KERN_ERR VTDPREFIX "IOMMU: can't mapping the region\n");
   1.941          goto error;
   1.942      }
   1.943 @@ -888,33 +916,30 @@ struct iommu *iommu_alloc(void *hw_data)
   1.944  
   1.945      drhd->iommu = iommu;
   1.946      return iommu;
   1.947 -error:
   1.948 + error:
   1.949      xfree(iommu);
   1.950      return NULL;
   1.951  }
   1.952  
   1.953  static void free_iommu(struct iommu *iommu)
   1.954  {
   1.955 -    if (!iommu)
   1.956 +    if ( !iommu )
   1.957          return;
   1.958 -    if (iommu->root_entry)
   1.959 +    if ( iommu->root_entry )
   1.960          free_xenheap_page((void *)iommu->root_entry);
   1.961 -    if (iommu->reg)
   1.962 +    if ( iommu->reg )
   1.963          iounmap(iommu->reg);
   1.964      free_irq(iommu->vector);
   1.965      xfree(iommu);
   1.966  }
   1.967  
   1.968 -#define guestwidth_to_adjustwidth(gaw) ({ \
   1.969 -    int agaw; \
   1.970 -    int r = (gaw - 12) % 9; \
   1.971 -    if (r == 0) \
   1.972 -        agaw = gaw; \
   1.973 -    else \
   1.974 -        agaw = gaw + 9 - r; \
   1.975 -    if (agaw > 64) \
   1.976 -        agaw = 64; \
   1.977 +#define guestwidth_to_adjustwidth(gaw) ({       \
   1.978 +    int agaw, r = (gaw - 12) % 9;               \
   1.979 +    agaw = (r == 0) ? gaw : (gaw + 9 - r);      \
   1.980 +    if ( agaw > 64 )                            \
   1.981 +        agaw = 64;                              \
   1.982      agaw; })
   1.983 +
   1.984  int iommu_domain_init(struct domain *domain)
   1.985  {
   1.986      struct hvm_iommu *hd = domain_hvm_iommu(domain);
   1.987 @@ -945,7 +970,7 @@ int iommu_domain_init(struct domain *dom
   1.988      if ( !test_bit(agaw, &sagaw) )
   1.989      {
   1.990          gdprintk(XENLOG_ERR VTDPREFIX,
   1.991 -            "IOMMU: hardware doesn't support the agaw\n");
   1.992 +                 "IOMMU: hardware doesn't support the agaw\n");
   1.993          agaw = find_next_bit(&sagaw, 5, agaw);
   1.994          if (agaw >= 5)
   1.995              return -ENODEV;
   1.996 @@ -965,14 +990,17 @@ static int domain_context_mapping_one(
   1.997      int ret = 0;
   1.998  
   1.999      context = device_to_context_entry(iommu, bus, devfn);
  1.1000 -    if (!context) {
  1.1001 +    if ( !context )
  1.1002 +    {
  1.1003          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1004 -            "domain_context_mapping_one:context == NULL:bdf = %x:%x:%x \n",
  1.1005 -            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1006 +                 "domain_context_mapping_one:context == NULL:"
  1.1007 +                 "bdf = %x:%x:%x\n",
  1.1008 +                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1009          return -ENOMEM;
  1.1010      }
  1.1011      spin_lock_irqsave(&iommu->lock, flags);
  1.1012 -    if (context_present(*context)) {
  1.1013 +    if ( context_present(*context) )
  1.1014 +    {
  1.1015          spin_unlock_irqrestore(&iommu->lock, flags);
  1.1016          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1017                   "domain_context_mapping_one:context present:bdf=%x:%x:%x\n",
  1.1018 @@ -982,8 +1010,8 @@ static int domain_context_mapping_one(
  1.1019  
  1.1020  #ifdef VTD_DEBUG
  1.1021      dprintk(XENLOG_INFO VTDPREFIX,
  1.1022 -        "context_mapping_one_1-%x:%x:%x-*context = %lx %lx\n",
  1.1023 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn), context->hi, context->lo);
  1.1024 +            "context_mapping_one_1-%x:%x:%x-*context = %lx %lx\n",
  1.1025 +            bus, PCI_SLOT(devfn), PCI_FUNC(devfn), context->hi, context->lo);
  1.1026  #endif
  1.1027  
  1.1028      /*
  1.1029 @@ -993,9 +1021,12 @@ static int domain_context_mapping_one(
  1.1030      context_set_domain_id(*context, domain->domain_id);
  1.1031      context_set_address_width(*context, hd->agaw);
  1.1032  
  1.1033 -    if (ecap_pass_thru(iommu->ecap))
  1.1034 +    if ( ecap_pass_thru(iommu->ecap) )
  1.1035 +    {
  1.1036          context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
  1.1037 -    else {
  1.1038 +    }
  1.1039 +    else
  1.1040 +    {
  1.1041          context_set_address_root(*context, virt_to_maddr(hd->pgd));
  1.1042          context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
  1.1043      }
  1.1044 @@ -1006,13 +1037,14 @@ static int domain_context_mapping_one(
  1.1045  
  1.1046  #ifdef VTD_DEBUG
  1.1047      dprintk(XENLOG_INFO VTDPREFIX,
  1.1048 -        "context_mapping_one_2-%x:%x:%x-*context=%lx %lx hd->pgd = %p\n",
  1.1049 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  1.1050 -        context->hi, context->lo, hd->pgd);
  1.1051 +            "context_mapping_one_2-%x:%x:%x-*context=%lx %lx hd->pgd = %p\n",
  1.1052 +            bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  1.1053 +            context->hi, context->lo, hd->pgd);
  1.1054  #endif
  1.1055  
  1.1056 -    if (iommu_flush_context_device(iommu, domain->domain_id,
  1.1057 -                    (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
  1.1058 +    if ( iommu_flush_context_device(iommu, domain->domain_id,
  1.1059 +                                   (((u16)bus) << 8) | devfn,
  1.1060 +                                    DMA_CCMD_MASK_NOBIT, 1) )
  1.1061          iommu_flush_write_buffer(iommu);
  1.1062      else
  1.1063          iommu_flush_iotlb_dsi(iommu, domain->domain_id, 0);
  1.1064 @@ -1025,18 +1057,21 @@ static int __pci_find_next_cap(u8 bus, u
  1.1065      u8 id;
  1.1066      int ttl = 48;
  1.1067  
  1.1068 -    while (ttl--) {
  1.1069 +    while ( ttl-- )
  1.1070 +    {
  1.1071          pos = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
  1.1072 -        if (pos < 0x40)
  1.1073 +        if ( pos < 0x40 )
  1.1074              break;
  1.1075 +
  1.1076          pos &= ~3;
  1.1077          id = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  1.1078 -                 pos + PCI_CAP_LIST_ID);
  1.1079 +                                  pos + PCI_CAP_LIST_ID);
  1.1080  
  1.1081 -        if (id == 0xff)
  1.1082 +        if ( id == 0xff )
  1.1083              break;
  1.1084 -        if (id == cap)
  1.1085 +        if ( id == cap )
  1.1086              return pos;
  1.1087 +
  1.1088          pos += PCI_CAP_LIST_NEXT;
  1.1089      }
  1.1090      return 0;
  1.1091 @@ -1055,17 +1090,18 @@ int pdev_type(struct pci_dev *dev)
  1.1092      u16 status;
  1.1093  
  1.1094      class_device = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
  1.1095 -                 PCI_FUNC(dev->devfn), PCI_CLASS_DEVICE);
  1.1096 -    if (class_device == PCI_CLASS_BRIDGE_PCI)
  1.1097 +                                      PCI_FUNC(dev->devfn), PCI_CLASS_DEVICE);
  1.1098 +    if ( class_device == PCI_CLASS_BRIDGE_PCI )
  1.1099          return DEV_TYPE_PCI_BRIDGE;
  1.1100  
  1.1101      status = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
  1.1102 -                 PCI_FUNC(dev->devfn), PCI_STATUS);
  1.1103 +                                PCI_FUNC(dev->devfn), PCI_STATUS);
  1.1104  
  1.1105 -    if (!(status & PCI_STATUS_CAP_LIST))
  1.1106 +    if ( !(status & PCI_STATUS_CAP_LIST) )
  1.1107          return DEV_TYPE_PCI;
  1.1108  
  1.1109 -    if (__pci_find_next_cap(dev->bus, dev->devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP))
  1.1110 +    if ( __pci_find_next_cap(dev->bus, dev->devfn,
  1.1111 +                            PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) )
  1.1112          return DEV_TYPE_PCIe_ENDPOINT;
  1.1113  
  1.1114      return DEV_TYPE_PCI;
  1.1115 @@ -1084,65 +1120,81 @@ static int domain_context_mapping(
  1.1116      u32 type;
  1.1117  
  1.1118      type = pdev_type(pdev);
  1.1119 -    if (type == DEV_TYPE_PCI_BRIDGE) {
  1.1120 -        sec_bus = read_pci_config_byte(pdev->bus, PCI_SLOT(pdev->devfn),
  1.1121 -                      PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
  1.1122 +    if ( type == DEV_TYPE_PCI_BRIDGE )
  1.1123 +    {
  1.1124 +        sec_bus = read_pci_config_byte(
  1.1125 +            pdev->bus, PCI_SLOT(pdev->devfn),
  1.1126 +            PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
  1.1127  
  1.1128 -        if (bus2bridge[sec_bus].bus == 0) {
  1.1129 +        if ( bus2bridge[sec_bus].bus == 0 )
  1.1130 +        {
  1.1131              bus2bridge[sec_bus].bus   =  pdev->bus;
  1.1132              bus2bridge[sec_bus].devfn =  pdev->devfn;
  1.1133          }
  1.1134  
  1.1135 -        sub_bus = read_pci_config_byte(pdev->bus, PCI_SLOT(pdev->devfn),
  1.1136 -                      PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
  1.1137 +        sub_bus = read_pci_config_byte(
  1.1138 +            pdev->bus, PCI_SLOT(pdev->devfn),
  1.1139 +            PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
  1.1140  
  1.1141 -        if (sec_bus != sub_bus) {
  1.1142 +        if ( sec_bus != sub_bus )
  1.1143 +        {
  1.1144              dprintk(XENLOG_INFO VTDPREFIX,
  1.1145 -                "context_mapping: nested PCI bridge not supported\n");
  1.1146 +                    "context_mapping: nested PCI bridge not supported\n");
  1.1147              dprintk(XENLOG_INFO VTDPREFIX,
  1.1148 -                "    bdf = %x:%x:%x sec_bus = %x sub_bus = %x\n",
  1.1149 -                pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
  1.1150 -                sec_bus, sub_bus);
  1.1151 +                    "    bdf = %x:%x:%x sec_bus = %x sub_bus = %x\n",
  1.1152 +                    pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
  1.1153 +                    sec_bus, sub_bus);
  1.1154          }
  1.1155      }
  1.1156  
  1.1157 -    if (type == DEV_TYPE_PCIe_ENDPOINT) {
  1.1158 +    if ( type == DEV_TYPE_PCIe_ENDPOINT )
  1.1159 +    {
  1.1160          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1161 -            "domain_context_mapping:PCIe : bdf = %x:%x:%x\n",
  1.1162 -            pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1163 +                 "domain_context_mapping:PCIe : bdf = %x:%x:%x\n",
  1.1164 +                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1165          ret = domain_context_mapping_one(domain, iommu,
  1.1166 -                  (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1167 +                                         (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1168      }
  1.1169  
  1.1170      /* PCI devices */
  1.1171 -    if (type == DEV_TYPE_PCI) {
  1.1172 +    if ( type == DEV_TYPE_PCI )
  1.1173 +    {
  1.1174          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1175 -            "domain_context_mapping:PCI: bdf = %x:%x:%x\n",
  1.1176 -            pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1177 +                 "domain_context_mapping:PCI: bdf = %x:%x:%x\n",
  1.1178 +                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1179  
  1.1180 -        if (pdev->bus == 0)
  1.1181 -            ret = domain_context_mapping_one(domain, iommu,
  1.1182 -                      (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1183 -        else {
  1.1184 -            if (bus2bridge[pdev->bus].bus != 0)
  1.1185 +        if ( pdev->bus == 0 )
  1.1186 +        {
  1.1187 +            ret = domain_context_mapping_one(
  1.1188 +                domain, iommu, (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1189 +        }
  1.1190 +        else
  1.1191 +        {
  1.1192 +            if ( bus2bridge[pdev->bus].bus != 0 )
  1.1193                  gdprintk(XENLOG_ERR VTDPREFIX,
  1.1194 -                    "domain_context_mapping:bus2bridge[pdev->bus].bus==0\n");
  1.1195 +                         "domain_context_mapping:bus2bridge"
  1.1196 +                         "[pdev->bus].bus==0\n");
  1.1197  
  1.1198 -            ret = domain_context_mapping_one(domain, iommu,
  1.1199 -                      (u8)(bus2bridge[pdev->bus].bus),
  1.1200 -                      (u8)(bus2bridge[pdev->bus].devfn));
  1.1201 +            ret = domain_context_mapping_one(
  1.1202 +                domain, iommu,
  1.1203 +                (u8)(bus2bridge[pdev->bus].bus),
  1.1204 +                (u8)(bus2bridge[pdev->bus].devfn));
  1.1205  
  1.1206              /* now map everything behind the PCI bridge */
  1.1207 -            for (dev = 0; dev < 32; dev++) {
  1.1208 -                for (func = 0; func < 8; func++) {
  1.1209 -                    ret = domain_context_mapping_one(domain, iommu,
  1.1210 -                              pdev->bus, (u8)PCI_DEVFN(dev, func));
  1.1211 -                    if (ret)
  1.1212 +            for ( dev = 0; dev < 32; dev++ )
  1.1213 +            {
  1.1214 +                for ( func = 0; func < 8; func++ )
  1.1215 +                {
  1.1216 +                    ret = domain_context_mapping_one(
  1.1217 +                        domain, iommu,
  1.1218 +                        pdev->bus, (u8)PCI_DEVFN(dev, func));
  1.1219 +                    if ( ret )
  1.1220                          return ret;
  1.1221                  }
  1.1222              }
  1.1223          }
  1.1224      }
  1.1225 +
  1.1226      return ret;
  1.1227  }
  1.1228  
  1.1229 @@ -1155,23 +1207,28 @@ static int domain_context_unmap_one(
  1.1230      unsigned long flags;
  1.1231  
  1.1232      context = device_to_context_entry(iommu, bus, devfn);
  1.1233 -    if (!context) {
  1.1234 +    if ( !context )
  1.1235 +    {
  1.1236          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1237 -            "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n",
  1.1238 -            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1239 +                 "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n",
  1.1240 +                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1241          return -ENOMEM;
  1.1242      }
  1.1243 +
  1.1244      spin_lock_irqsave(&iommu->lock, flags);
  1.1245 -    if (!context_present(*context)) {
  1.1246 +    if ( !context_present(*context) )
  1.1247 +    {
  1.1248          spin_unlock_irqrestore(&iommu->lock, flags);
  1.1249          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1250 -            "domain_context_unmap_one-%x:%x:%x- context NOT present:return\n",
  1.1251 -            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1252 +                 "domain_context_unmap_one-%x:%x:%x- "
  1.1253 +                 "context NOT present:return\n",
  1.1254 +                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1255          return 0;
  1.1256      }
  1.1257 +
  1.1258      gdprintk(XENLOG_INFO VTDPREFIX,
  1.1259 -        "domain_context_unmap_one_1:bdf = %x:%x:%x\n",
  1.1260 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1261 +             "domain_context_unmap_one_1:bdf = %x:%x:%x\n",
  1.1262 +             bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1263  
  1.1264      context_clear_present(*context);
  1.1265      context_clear_entry(*context);
  1.1266 @@ -1181,8 +1238,8 @@ static int domain_context_unmap_one(
  1.1267      spin_unlock_irqrestore(&iommu->lock, flags);
  1.1268  
  1.1269      gdprintk(XENLOG_INFO VTDPREFIX,
  1.1270 -        "domain_context_unmap_one_2:bdf = %x:%x:%x\n",
  1.1271 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1272 +             "domain_context_unmap_one_2:bdf = %x:%x:%x\n",
  1.1273 +             bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1274  
  1.1275      return 0;
  1.1276  }
  1.1277 @@ -1197,54 +1254,69 @@ static int domain_context_unmap(
  1.1278      u32 type;
  1.1279  
  1.1280      type = pdev_type(pdev);
  1.1281 -    if (type == DEV_TYPE_PCI_BRIDGE) {
  1.1282 -        sec_bus = read_pci_config_byte(pdev->bus, PCI_SLOT(pdev->devfn),
  1.1283 -                      PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
  1.1284 -        sub_bus = read_pci_config_byte(pdev->bus, PCI_SLOT(pdev->devfn),
  1.1285 -                      PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
  1.1286 +    if ( type == DEV_TYPE_PCI_BRIDGE )
  1.1287 +    {
  1.1288 +        sec_bus = read_pci_config_byte(
  1.1289 +            pdev->bus, PCI_SLOT(pdev->devfn),
  1.1290 +            PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
  1.1291 +        sub_bus = read_pci_config_byte(
  1.1292 +            pdev->bus, PCI_SLOT(pdev->devfn),
  1.1293 +            PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
  1.1294  
  1.1295          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1296 -            "domain_context_unmap:BRIDGE:%x:%x:%x sec_bus=%x sub_bus=%x\n",
  1.1297 -            pdev->bus, PCI_SLOT(pdev->devfn),
  1.1298 -            PCI_FUNC(pdev->devfn), sec_bus, sub_bus);
  1.1299 +                 "domain_context_unmap:BRIDGE:%x:%x:%x "
  1.1300 +                 "sec_bus=%x sub_bus=%x\n",
  1.1301 +                 pdev->bus, PCI_SLOT(pdev->devfn),
  1.1302 +                 PCI_FUNC(pdev->devfn), sec_bus, sub_bus);
  1.1303      }
  1.1304  
  1.1305 -    if (type == DEV_TYPE_PCIe_ENDPOINT) {
  1.1306 +    if ( type == DEV_TYPE_PCIe_ENDPOINT )
  1.1307 +    {
  1.1308          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1309                   "domain_context_unmap:PCIe : bdf = %x:%x:%x\n",
  1.1310                   pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1311          ret = domain_context_unmap_one(domain, iommu,
  1.1312 -                  (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1313 +                                       (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1314      }
  1.1315  
  1.1316      /* PCI devices */
  1.1317 -    if (type == DEV_TYPE_PCI) {
  1.1318 +    if ( type == DEV_TYPE_PCI )
  1.1319 +    {
  1.1320          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1321                   "domain_context_unmap:PCI: bdf = %x:%x:%x\n",
  1.1322                   pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1323 -        if (pdev->bus == 0)
  1.1324 -            ret = domain_context_unmap_one(domain, iommu,
  1.1325 -                      (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1326 -        else {
  1.1327 -            if (bus2bridge[pdev->bus].bus != 0)
  1.1328 +        if ( pdev->bus == 0 )
  1.1329 +        {
  1.1330 +            ret = domain_context_unmap_one(
  1.1331 +                domain, iommu,
  1.1332 +                (u8)(pdev->bus), (u8) (pdev->devfn));
  1.1333 +        }
  1.1334 +        else
  1.1335 +        {
  1.1336 +            if ( bus2bridge[pdev->bus].bus != 0 )
  1.1337                  gdprintk(XENLOG_INFO VTDPREFIX,
  1.1338 -                         "domain_context_mapping:bus2bridge[pdev->bus].bus==0\n");
  1.1339 +                         "domain_context_mapping:"
  1.1340 +                         "bus2bridge[pdev->bus].bus==0\n");
  1.1341  
  1.1342              ret = domain_context_unmap_one(domain, iommu,
  1.1343 -                      (u8)(bus2bridge[pdev->bus].bus),
  1.1344 -                      (u8)(bus2bridge[pdev->bus].devfn));
  1.1345 +                                           (u8)(bus2bridge[pdev->bus].bus),
  1.1346 +                                           (u8)(bus2bridge[pdev->bus].devfn));
  1.1347  
  1.1348              /* now map everything behind the PCI bridge */
  1.1349 -            for (dev = 0; dev < 32; dev++) {
  1.1350 -                for (func = 0; func < 8; func++) {
  1.1351 -                    ret = domain_context_unmap_one(domain, iommu,
  1.1352 -                              pdev->bus, (u8)PCI_DEVFN(dev, func));
  1.1353 -                    if (ret)
  1.1354 +            for ( dev = 0; dev < 32; dev++ )
  1.1355 +            {
  1.1356 +                for ( func = 0; func < 8; func++ )
  1.1357 +                {
  1.1358 +                    ret = domain_context_unmap_one(
  1.1359 +                        domain, iommu,
  1.1360 +                        pdev->bus, (u8)PCI_DEVFN(dev, func));
  1.1361 +                    if ( ret )
  1.1362                          return ret;
  1.1363                  }
  1.1364              }
  1.1365          }
  1.1366      }
  1.1367 +
  1.1368      return ret;
  1.1369  }
  1.1370  
  1.1371 @@ -1262,11 +1334,12 @@ void reassign_device_ownership(
  1.1372      unsigned long flags;
  1.1373  
  1.1374      gdprintk(XENLOG_ERR VTDPREFIX,
  1.1375 -        "reassign_device-%x:%x:%x- source = %d target = %d\n",
  1.1376 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  1.1377 -        source->domain_id, target->domain_id);
  1.1378 +             "reassign_device-%x:%x:%x- source = %d target = %d\n",
  1.1379 +             bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  1.1380 +             source->domain_id, target->domain_id);
  1.1381  
  1.1382 -    for_each_pdev(source, pdev) {
  1.1383 +    for_each_pdev( source, pdev )
  1.1384 +    {
  1.1385          if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
  1.1386              continue;
  1.1387  
  1.1388 @@ -1276,9 +1349,7 @@ void reassign_device_ownership(
  1.1389          iommu = drhd->iommu;
  1.1390          domain_context_unmap(source, iommu, pdev);
  1.1391  
  1.1392 -        /*
  1.1393 -         * move pci device from the source domain to target domain.
  1.1394 -         */
  1.1395 +        /* Move pci device from the source domain to target domain. */
  1.1396          spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
  1.1397          spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
  1.1398          list_move(&pdev->list, &target_hd->pdev_list);
  1.1399 @@ -1286,12 +1357,9 @@ void reassign_device_ownership(
  1.1400          spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
  1.1401  
  1.1402          status = domain_context_mapping(target, iommu, pdev);
  1.1403 -        if (status != 0)
  1.1404 +        if ( status != 0 )
  1.1405              gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n");
  1.1406  
  1.1407 -        /*
  1.1408 -         * We are done.
  1.1409 -         */
  1.1410          break;
  1.1411      }
  1.1412  }
  1.1413 @@ -1301,37 +1369,37 @@ void return_devices_to_dom0(struct domai
  1.1414      struct hvm_iommu *hd  = domain_hvm_iommu(d);
  1.1415      struct pci_dev *pdev;
  1.1416  
  1.1417 -    while (!list_empty(&hd->pdev_list)) {
  1.1418 +    while ( !list_empty(&hd->pdev_list) )
  1.1419 +    {
  1.1420          pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
  1.1421          dprintk(XENLOG_INFO VTDPREFIX,
  1.1422 -            "return_devices_to_dom0: bdf = %x:%x:%x\n",
  1.1423 -            pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1424 +                "return_devices_to_dom0: bdf = %x:%x:%x\n",
  1.1425 +                pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1426          reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
  1.1427      }
  1.1428  
  1.1429  #ifdef VTD_DEBUG
  1.1430 -    for_each_pdev(dom0, pdev) {
  1.1431 +    for_each_pdev ( dom0, pdev )
  1.1432          dprintk(XENLOG_INFO VTDPREFIX,
  1.1433 -            "return_devices_to_dom0:%x: bdf = %x:%x:%x\n",
  1.1434 -            dom0->domain_id, pdev->bus,
  1.1435 -            PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1436 -    }
  1.1437 +                "return_devices_to_dom0:%x: bdf = %x:%x:%x\n",
  1.1438 +                dom0->domain_id, pdev->bus,
  1.1439 +                PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1440  #endif
  1.1441  }
  1.1442  
  1.1443  void iommu_domain_teardown(struct domain *d)
  1.1444  {
  1.1445 -  if (list_empty(&acpi_drhd_units))
  1.1446 -      return;
  1.1447 +    if ( list_empty(&acpi_drhd_units) )
  1.1448 +        return;
  1.1449  
  1.1450  #if CONFIG_PAGING_LEVELS == 3
  1.1451 -  {
  1.1452 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
  1.1453 -    int level = agaw_to_level(hd->agaw);
  1.1454 -    struct dma_pte *pgd = NULL;
  1.1455 +    {
  1.1456 +        struct hvm_iommu *hd  = domain_hvm_iommu(d);
  1.1457 +        int level = agaw_to_level(hd->agaw);
  1.1458 +        struct dma_pte *pgd = NULL;
  1.1459  
  1.1460 -    switch (level)
  1.1461 -    {
  1.1462 +        switch ( level )
  1.1463 +        {
  1.1464          case VTD_PAGE_TABLE_LEVEL_3:
  1.1465              if ( hd->pgd )
  1.1466                  free_xenheap_page((void *)hd->pgd);
  1.1467 @@ -1347,10 +1415,10 @@ void iommu_domain_teardown(struct domain
  1.1468              break;
  1.1469          default:
  1.1470              gdprintk(XENLOG_ERR VTDPREFIX,
  1.1471 -                "Unsupported p2m table sharing level!\n");
  1.1472 +                     "Unsupported p2m table sharing level!\n");
  1.1473              break;
  1.1474 +        }
  1.1475      }
  1.1476 -  }
  1.1477  #endif
  1.1478      return_devices_to_dom0(d);
  1.1479  }
  1.1480 @@ -1361,12 +1429,14 @@ static int domain_context_mapped(struct 
  1.1481      struct iommu *iommu;
  1.1482      int ret;
  1.1483  
  1.1484 -    for_each_drhd_unit(drhd) {
  1.1485 +    for_each_drhd_unit ( drhd )
  1.1486 +    {
  1.1487          iommu = drhd->iommu;
  1.1488          ret = device_context_mapped(iommu, pdev->bus, pdev->devfn);
  1.1489 -        if (ret)
  1.1490 +        if ( ret )
  1.1491              return ret;
  1.1492      }
  1.1493 +
  1.1494      return 0;
  1.1495  }
  1.1496  
  1.1497 @@ -1380,24 +1450,26 @@ int iommu_map_page(struct domain *d, pad
  1.1498      iommu = drhd->iommu;
  1.1499  
  1.1500      /* do nothing if dom0 and iommu supports pass thru */
  1.1501 -    if (ecap_pass_thru(iommu->ecap) && (d->domain_id == 0))
  1.1502 +    if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
  1.1503          return 0;
  1.1504  
  1.1505      pte = addr_to_dma_pte(d, gfn << PAGE_SHIFT_4K);
  1.1506 -    if (!pte)
  1.1507 +    if ( !pte )
  1.1508          return -ENOMEM;
  1.1509      dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
  1.1510      dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
  1.1511      iommu_flush_cache_entry(iommu, pte);
  1.1512  
  1.1513 -    for_each_drhd_unit(drhd) {
  1.1514 +    for_each_drhd_unit ( drhd )
  1.1515 +    {
  1.1516          iommu = drhd->iommu;
  1.1517 -        if (cap_caching_mode(iommu->cap))
  1.1518 +        if ( cap_caching_mode(iommu->cap) )
  1.1519              iommu_flush_iotlb_psi(iommu, d->domain_id,
  1.1520                                    gfn << PAGE_SHIFT_4K, 1, 0);
  1.1521 -        else if (cap_rwbf(iommu->cap))
  1.1522 +        else if ( cap_rwbf(iommu->cap) )
  1.1523              iommu_flush_write_buffer(iommu);
  1.1524      }
  1.1525 +
  1.1526      return 0;
  1.1527  }
  1.1528  
  1.1529 @@ -1411,7 +1483,7 @@ int iommu_unmap_page(struct domain *d, d
  1.1530      iommu = drhd->iommu;
  1.1531  
  1.1532      /* do nothing if dom0 and iommu supports pass thru */
  1.1533 -    if (ecap_pass_thru(iommu->ecap) && (d->domain_id == 0))
  1.1534 +    if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
  1.1535          return 0;
  1.1536  
  1.1537      /* get last level pte */
  1.1538 @@ -1422,7 +1494,7 @@ int iommu_unmap_page(struct domain *d, d
  1.1539  }
  1.1540  
  1.1541  int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
  1.1542 -            void *hpa, size_t size, int prot)
  1.1543 +                       void *hpa, size_t size, int prot)
  1.1544  {
  1.1545      struct acpi_drhd_unit *drhd;
  1.1546      struct iommu *iommu;
  1.1547 @@ -1432,16 +1504,17 @@ int iommu_page_mapping(struct domain *do
  1.1548  
  1.1549      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
  1.1550      iommu = drhd->iommu;
  1.1551 -    if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
  1.1552 +    if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
  1.1553          return -EINVAL;
  1.1554      iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
  1.1555      start_pfn = (unsigned long)(((unsigned long) hpa) >> PAGE_SHIFT_4K);
  1.1556      end_pfn = (unsigned long)
  1.1557 -              ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K);
  1.1558 +        ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K);
  1.1559      index = 0;
  1.1560 -    while (start_pfn < end_pfn) {
  1.1561 +    while ( start_pfn < end_pfn )
  1.1562 +    {
  1.1563          pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
  1.1564 -        if (!pte)
  1.1565 +        if ( !pte )
  1.1566              return -ENOMEM;
  1.1567          dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
  1.1568          dma_set_pte_prot(*pte, prot);
  1.1569 @@ -1450,13 +1523,15 @@ int iommu_page_mapping(struct domain *do
  1.1570          index++;
  1.1571      }
  1.1572  
  1.1573 -    for_each_drhd_unit(drhd) {
  1.1574 +    for_each_drhd_unit ( drhd )
  1.1575 +    {
  1.1576          iommu = drhd->iommu;
  1.1577 -        if (cap_caching_mode(iommu->cap))
  1.1578 +        if ( cap_caching_mode(iommu->cap) )
  1.1579              iommu_flush_iotlb_psi(iommu, domain->domain_id, iova, size, 0);
  1.1580 -        else if (cap_rwbf(iommu->cap))
  1.1581 +        else if ( cap_rwbf(iommu->cap) )
  1.1582              iommu_flush_write_buffer(iommu);
  1.1583      }
  1.1584 +
  1.1585      return 0;
  1.1586  }
  1.1587  
  1.1588 @@ -1477,19 +1552,20 @@ void iommu_flush(struct domain *d, dma_a
  1.1589      struct iommu *iommu = NULL;
  1.1590      struct dma_pte *pte = (struct dma_pte *) p2m_entry;
  1.1591  
  1.1592 -    for_each_drhd_unit(drhd) {
  1.1593 +    for_each_drhd_unit ( drhd )
  1.1594 +    {
  1.1595          iommu = drhd->iommu;
  1.1596 -        if (cap_caching_mode(iommu->cap))
  1.1597 +        if ( cap_caching_mode(iommu->cap) )
  1.1598              iommu_flush_iotlb_psi(iommu, d->domain_id,
  1.1599 -                gfn << PAGE_SHIFT_4K, 1, 0);
  1.1600 -        else if (cap_rwbf(iommu->cap))
  1.1601 +                                  gfn << PAGE_SHIFT_4K, 1, 0);
  1.1602 +        else if ( cap_rwbf(iommu->cap) )
  1.1603              iommu_flush_write_buffer(iommu);
  1.1604      }
  1.1605 +
  1.1606      iommu_flush_cache_entry(iommu, pte);
  1.1607  }
  1.1608  
  1.1609 -int
  1.1610 -prepare_device(struct domain *domain, struct pci_dev dev)
  1.1611 +int prepare_device(struct domain *domain, struct pci_dev dev)
  1.1612  {
  1.1613      return 0;
  1.1614  }
  1.1615 @@ -1506,17 +1582,19 @@ static int iommu_prepare_rmrr_dev(
  1.1616      /* page table init */
  1.1617      size = rmrr->end_address - rmrr->base_address + 1;
  1.1618      ret = iommu_page_mapping(d, rmrr->base_address,
  1.1619 -        (void *)rmrr->base_address, size,
  1.1620 -        DMA_PTE_READ|DMA_PTE_WRITE);
  1.1621 -    if (ret)
  1.1622 +                             (void *)rmrr->base_address, size,
  1.1623 +                             DMA_PTE_READ|DMA_PTE_WRITE);
  1.1624 +    if ( ret )
  1.1625          return ret;
  1.1626  
  1.1627 -    if (domain_context_mapped(d, pdev) == 0) {
  1.1628 +    if ( domain_context_mapped(d, pdev) == 0 )
  1.1629 +    {
  1.1630          drhd = acpi_find_matched_drhd_unit(pdev);
  1.1631          ret = domain_context_mapping(d, drhd->iommu, pdev);
  1.1632 -        if (!ret)
  1.1633 +        if ( !ret )
  1.1634              return 0;
  1.1635      }
  1.1636 +
  1.1637      return ret;
  1.1638  }
  1.1639  
  1.1640 @@ -1525,15 +1603,16 @@ void __init setup_dom0_devices(void)
  1.1641      struct hvm_iommu *hd  = domain_hvm_iommu(dom0);
  1.1642      struct acpi_drhd_unit *drhd;
  1.1643      struct pci_dev *pdev;
  1.1644 -    int bus, dev, func;
  1.1645 +    int bus, dev, func, ret;
  1.1646      u32 l;
  1.1647 -    u8 hdr_type;
  1.1648 -    int ret;
  1.1649  
  1.1650  #ifdef DEBUG_VTD_CONTEXT_ENTRY
  1.1651 -    for (bus = 0; bus < 256; bus++) {
  1.1652 -        for (dev = 0; dev < 32; dev++) { 
  1.1653 -            for (func = 0; func < 8; func++) { 
  1.1654 +    for ( bus = 0; bus < 256; bus++ )
  1.1655 +    {
  1.1656 +        for ( dev = 0; dev < 32; dev++ )
  1.1657 +        { 
  1.1658 +            for ( func = 0; func < 8; func++ )
  1.1659 +            {
  1.1660                  struct context_entry *context;
  1.1661                  struct pci_dev device;
  1.1662  
  1.1663 @@ -1541,23 +1620,26 @@ void __init setup_dom0_devices(void)
  1.1664                  device.devfn = PCI_DEVFN(dev, func); 
  1.1665                  drhd = acpi_find_matched_drhd_unit(&device);
  1.1666                  context = device_to_context_entry(drhd->iommu,
  1.1667 -                    bus, PCI_DEVFN(dev, func));
  1.1668 -                if ((context->lo != 0) || (context->hi != 0))
  1.1669 +                                                  bus, PCI_DEVFN(dev, func));
  1.1670 +                if ( (context->lo != 0) || (context->hi != 0) )
  1.1671                      dprintk(XENLOG_INFO VTDPREFIX,
  1.1672 -                        "setup_dom0_devices-%x:%x:%x- context not 0\n",
  1.1673 -                        bus, dev, func);
  1.1674 +                            "setup_dom0_devices-%x:%x:%x- context not 0\n",
  1.1675 +                            bus, dev, func);
  1.1676              }
  1.1677          }    
  1.1678      }        
  1.1679  #endif
  1.1680  
  1.1681 -    for (bus = 0; bus < 256; bus++) {
  1.1682 -        for (dev = 0; dev < 32; dev++) { 
  1.1683 -            for (func = 0; func < 8; func++) { 
  1.1684 +    for ( bus = 0; bus < 256; bus++ )
  1.1685 +    {
  1.1686 +        for ( dev = 0; dev < 32; dev++ )
  1.1687 +        {
  1.1688 +            for ( func = 0; func < 8; func++ )
  1.1689 +            {
  1.1690                  l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
  1.1691                  /* some broken boards return 0 or ~0 if a slot is empty: */
  1.1692 -                if (l == 0xffffffff || l == 0x00000000 ||
  1.1693 -                    l == 0x0000ffff || l == 0xffff0000)
  1.1694 +                if ( (l == 0xffffffff) || (l == 0x00000000) ||
  1.1695 +                     (l == 0x0000ffff) || (l == 0xffff0000) )
  1.1696                      continue;
  1.1697                  pdev = xmalloc(struct pci_dev);
  1.1698                  pdev->bus = bus;
  1.1699 @@ -1566,21 +1648,17 @@ void __init setup_dom0_devices(void)
  1.1700  
  1.1701                  drhd = acpi_find_matched_drhd_unit(pdev);
  1.1702                  ret = domain_context_mapping(dom0, drhd->iommu, pdev);
  1.1703 -                if (ret != 0)
  1.1704 +                if ( ret != 0 )
  1.1705                      gdprintk(XENLOG_ERR VTDPREFIX,
  1.1706 -                        "domain_context_mapping failed\n");
  1.1707 -
  1.1708 -                hdr_type = read_pci_config(bus, dev, func, PCI_HEADER_TYPE);
  1.1709 -                // if ((hdr_type & 0x8) == 0)
  1.1710 -                //      break;
  1.1711 +                             "domain_context_mapping failed\n");
  1.1712              }
  1.1713          }
  1.1714      }
  1.1715 -    for_each_pdev(dom0, pdev) {
  1.1716 +
  1.1717 +    for_each_pdev ( dom0, pdev )
  1.1718          dprintk(XENLOG_INFO VTDPREFIX,
  1.1719 -            "setup_dom0_devices: bdf = %x:%x:%x\n",
  1.1720 -            pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1721 -    }
  1.1722 +                "setup_dom0_devices: bdf = %x:%x:%x\n",
  1.1723 +                pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  1.1724  }
  1.1725  
  1.1726  void clear_fault_bit(struct iommu *iommu)
  1.1727 @@ -1588,47 +1666,32 @@ void clear_fault_bit(struct iommu *iommu
  1.1728      u64 val;
  1.1729  
  1.1730      val = dmar_readq(
  1.1731 -            iommu->reg,
  1.1732 -            cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
  1.1733 +        iommu->reg,
  1.1734 +        cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
  1.1735      dmar_writeq(
  1.1736 -            iommu->reg,
  1.1737 -            cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
  1.1738 -            val);
  1.1739 +        iommu->reg,
  1.1740 +        cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
  1.1741 +        val);
  1.1742      dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
  1.1743  }
  1.1744  
  1.1745 -/*
  1.1746 - * Called from ACPI discovery code, once all DMAR's and RMRR's are done
  1.1747 - * scanning, we need to run through and initialize as much of it as necessary
  1.1748 - */
  1.1749 -int vtd_enable = 1;
  1.1750 -static void setup_vtd_enable(char *s)
  1.1751 -{
  1.1752 -    if ( !strcmp(s, "0") )
  1.1753 -        vtd_enable = 0;
  1.1754 -    else if ( !strcmp(s, "1") )
  1.1755 -        vtd_enable = 1;
  1.1756 -    else
  1.1757 -        dprintk(XENLOG_INFO VTDPREFIX,
  1.1758 -            "Unknown vtd_enable value specified: '%s'\n", s);
  1.1759 -    dprintk(XENLOG_INFO VTDPREFIX, "vtd_enable = %x\n", vtd_enable);
  1.1760 -}
  1.1761 -custom_param("vtd", setup_vtd_enable);
  1.1762 -
  1.1763  static int init_vtd_hw(void)
  1.1764  {
  1.1765      struct acpi_drhd_unit *drhd;
  1.1766      struct iommu *iommu;
  1.1767      int ret;
  1.1768  
  1.1769 -    for_each_drhd_unit(drhd) {
  1.1770 +    for_each_drhd_unit ( drhd )
  1.1771 +    {
  1.1772          iommu = drhd->iommu;
  1.1773          ret = iommu_set_root_entry(iommu);
  1.1774 -        if (ret) {
  1.1775 +        if ( ret )
  1.1776 +        {
  1.1777              gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n");
  1.1778              return -EIO;
  1.1779          }
  1.1780      }
  1.1781 +
  1.1782      return 0;
  1.1783  }
  1.1784  
  1.1785 @@ -1638,16 +1701,18 @@ static int enable_vtd_translation(void)
  1.1786      struct iommu *iommu;
  1.1787      int vector = 0;
  1.1788  
  1.1789 -    for_each_drhd_unit(drhd) {
  1.1790 +    for_each_drhd_unit ( drhd )
  1.1791 +    {
  1.1792          iommu = drhd->iommu;
  1.1793          vector = iommu_set_interrupt(iommu);
  1.1794          dma_msi_data_init(iommu, vector);
  1.1795          dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
  1.1796          iommu->vector = vector;
  1.1797          clear_fault_bit(iommu);
  1.1798 -        if (vtd_enable && iommu_enable_translation(iommu))
  1.1799 +        if ( iommu_enable_translation(iommu) )
  1.1800              return -EIO;
  1.1801      }
  1.1802 +
  1.1803      return 0;
  1.1804  }
  1.1805  
  1.1806 @@ -1657,13 +1722,15 @@ static void setup_dom0_rmrr(void)
  1.1807      struct pci_dev *pdev;
  1.1808      int ret;
  1.1809  
  1.1810 -    for_each_rmrr_device(rmrr, pdev)
  1.1811 +    for_each_rmrr_device ( rmrr, pdev )
  1.1812          ret = iommu_prepare_rmrr_dev(dom0, rmrr, pdev);
  1.1813 -        if (ret)
  1.1814 -            gdprintk(XENLOG_ERR VTDPREFIX,
  1.1815 -                "IOMMU: mapping reserved region failed\n");
  1.1816 -    end_for_each_rmrr_device(rmrr, pdev)
  1.1817 -}
  1.1818 +
  1.1819 +    if ( ret )
  1.1820 +        gdprintk(XENLOG_ERR VTDPREFIX,
  1.1821 +                 "IOMMU: mapping reserved region failed\n");
  1.1822 +
  1.1823 +    end_for_each_rmrr_device ( rmrr, pdev )
  1.1824 +        }
  1.1825  
  1.1826  int iommu_setup(void)
  1.1827  {
  1.1828 @@ -1672,7 +1739,7 @@ int iommu_setup(void)
  1.1829      struct iommu *iommu;
  1.1830      unsigned long i;
  1.1831  
  1.1832 -    if (!vtd_enabled)
  1.1833 +    if ( !vtd_enabled )
  1.1834          return 0;
  1.1835  
  1.1836      INIT_LIST_HEAD(&hd->pdev_list);
  1.1837 @@ -1690,21 +1757,22 @@ int iommu_setup(void)
  1.1838      iommu = drhd->iommu;
  1.1839  
  1.1840      /* setup 1:1 page table for dom0 */
  1.1841 -    for (i = 0; i < max_page; i++)
  1.1842 +    for ( i = 0; i < max_page; i++ )
  1.1843          iommu_map_page(dom0, i, i);
  1.1844  
  1.1845 -    if (init_vtd_hw())
  1.1846 +    if ( init_vtd_hw() )
  1.1847          goto error;
  1.1848      setup_dom0_devices();
  1.1849      setup_dom0_rmrr();
  1.1850 -    if (enable_vtd_translation())
  1.1851 +    if ( enable_vtd_translation() )
  1.1852          goto error;
  1.1853  
  1.1854      return 0;
  1.1855  
  1.1856 -error:
  1.1857 + error:
  1.1858      printk("iommu_setup() failed\n");
  1.1859 -    for_each_drhd_unit(drhd) {
  1.1860 +    for_each_drhd_unit ( drhd )
  1.1861 +    {
  1.1862          iommu = drhd->iommu;
  1.1863          free_iommu(iommu);
  1.1864      }
  1.1865 @@ -1718,24 +1786,24 @@ int assign_device(struct domain *d, u8 b
  1.1866      struct pci_dev *pdev;
  1.1867      int ret = 0;
  1.1868  
  1.1869 -    if (list_empty(&acpi_drhd_units))
  1.1870 +    if ( list_empty(&acpi_drhd_units) )
  1.1871          return ret;
  1.1872  
  1.1873      dprintk(XENLOG_INFO VTDPREFIX,
  1.1874 -        "assign_device: bus = %x dev = %x func = %x\n",
  1.1875 -        bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1876 +            "assign_device: bus = %x dev = %x func = %x\n",
  1.1877 +            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  1.1878  
  1.1879      reassign_device_ownership(dom0, d, bus, devfn);
  1.1880  
  1.1881      /* setup rmrr identify mapping just once per domain */
  1.1882 -    if (list_empty(&hd->pdev_list))
  1.1883 +    if ( list_empty(&hd->pdev_list) )
  1.1884          for_each_rmrr_device(rmrr, pdev)
  1.1885              ret = iommu_prepare_rmrr_dev(d, rmrr, pdev);
  1.1886 -            if (ret)
  1.1887 -                gdprintk(XENLOG_ERR VTDPREFIX,
  1.1888 -                    "IOMMU: mapping reserved region failed\n");
  1.1889 -        end_for_each_rmrr_device(rmrr, pdev)
  1.1890 -    return ret;
  1.1891 +    if ( ret )
  1.1892 +        gdprintk(XENLOG_ERR VTDPREFIX,
  1.1893 +                 "IOMMU: mapping reserved region failed\n");
  1.1894 +    end_for_each_rmrr_device(rmrr, pdev)
  1.1895 +        return ret;
  1.1896  }
  1.1897  
  1.1898  void iommu_set_pgd(struct domain *d)
  1.1899 @@ -1743,9 +1811,10 @@ void iommu_set_pgd(struct domain *d)
  1.1900      struct hvm_iommu *hd  = domain_hvm_iommu(d);
  1.1901      unsigned long p2m_table;
  1.1902  
  1.1903 -    if (hd->pgd) {
  1.1904 +    if ( hd->pgd )
  1.1905 +    {
  1.1906          gdprintk(XENLOG_INFO VTDPREFIX,
  1.1907 -            "iommu_set_pgd_1: hd->pgd = %p\n", hd->pgd);
  1.1908 +                 "iommu_set_pgd_1: hd->pgd = %p\n", hd->pgd);
  1.1909          hd->pgd = NULL;
  1.1910      }
  1.1911      p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table));
  1.1912 @@ -1762,46 +1831,47 @@ void iommu_set_pgd(struct domain *d)
  1.1913          int i;
  1.1914  
  1.1915          spin_lock_irqsave(&hd->mapping_lock, flags);
  1.1916 -        if (!hd->pgd) {
  1.1917 +        if ( !hd->pgd )
  1.1918 +        {
  1.1919              pgd = (struct dma_pte *)alloc_xenheap_page();
  1.1920              memset((u8*)pgd, 0, PAGE_SIZE);
  1.1921 -            if (!hd->pgd)
  1.1922 +            if ( !hd->pgd )
  1.1923                  hd->pgd = pgd;
  1.1924              else /* somebody is fast */
  1.1925                  free_xenheap_page((void *) pgd);
  1.1926          }
  1.1927  
  1.1928          l3e = map_domain_page(p2m_table);
  1.1929 -        switch(level)
  1.1930 +        switch ( level )
  1.1931          {
  1.1932 -            case VTD_PAGE_TABLE_LEVEL_3:        /* Weybridge */
  1.1933 -                /* We only support 8 entries for the PAE L3 p2m table */
  1.1934 -                for ( i = 0; i < 8 ; i++ )
  1.1935 -                {
  1.1936 -                    /* Don't create new L2 entry, use ones from p2m table */
  1.1937 -                    pgd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
  1.1938 -                }
  1.1939 -                break;
  1.1940 +        case VTD_PAGE_TABLE_LEVEL_3:        /* Weybridge */
  1.1941 +            /* We only support 8 entries for the PAE L3 p2m table */
  1.1942 +            for ( i = 0; i < 8 ; i++ )
  1.1943 +            {
  1.1944 +                /* Don't create new L2 entry, use ones from p2m table */
  1.1945 +                pgd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
  1.1946 +            }
  1.1947 +            break;
  1.1948  
  1.1949 -            case VTD_PAGE_TABLE_LEVEL_4:        /* Stoakley */
  1.1950 -                /* We allocate one more page for the top vtd page table. */
  1.1951 -                pmd = (struct dma_pte *)alloc_xenheap_page();
  1.1952 -                memset((u8*)pmd, 0, PAGE_SIZE);
  1.1953 -                pte = &pgd[0];
  1.1954 -                dma_set_pte_addr(*pte, virt_to_maddr(pmd));
  1.1955 -                dma_set_pte_readable(*pte);
  1.1956 -                dma_set_pte_writable(*pte);
  1.1957 +        case VTD_PAGE_TABLE_LEVEL_4:        /* Stoakley */
  1.1958 +            /* We allocate one more page for the top vtd page table. */
  1.1959 +            pmd = (struct dma_pte *)alloc_xenheap_page();
  1.1960 +            memset((u8*)pmd, 0, PAGE_SIZE);
  1.1961 +            pte = &pgd[0];
  1.1962 +            dma_set_pte_addr(*pte, virt_to_maddr(pmd));
  1.1963 +            dma_set_pte_readable(*pte);
  1.1964 +            dma_set_pte_writable(*pte);
  1.1965  
  1.1966 -                for ( i = 0; i < 8; i++ )
  1.1967 -                {
  1.1968 -                    /* Don't create new L2 entry, use ones from p2m table */
  1.1969 -                    pmd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
  1.1970 -                }
  1.1971 -                break;
  1.1972 -            default:
  1.1973 -                gdprintk(XENLOG_ERR VTDPREFIX,
  1.1974 -                    "iommu_set_pgd:Unsupported p2m table sharing level!\n");
  1.1975 -                break;
  1.1976 +            for ( i = 0; i < 8; i++ )
  1.1977 +            {
  1.1978 +                /* Don't create new L2 entry, use ones from p2m table */
  1.1979 +                pmd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
  1.1980 +            }
  1.1981 +            break;
  1.1982 +        default:
  1.1983 +            gdprintk(XENLOG_ERR VTDPREFIX,
  1.1984 +                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
  1.1985 +            break;
  1.1986          }
  1.1987          unmap_domain_page(l3e);
  1.1988          spin_unlock_irqrestore(&hd->mapping_lock, flags);
  1.1989 @@ -1813,37 +1883,37 @@ void iommu_set_pgd(struct domain *d)
  1.1990          l3_pgentry_t *l3e;
  1.1991          mfn_t pgd_mfn;
  1.1992  
  1.1993 -        switch (level)
  1.1994 +        switch ( level )
  1.1995          {
  1.1996 -            case VTD_PAGE_TABLE_LEVEL_3:
  1.1997 -                l3e = map_domain_page(p2m_table);
  1.1998 -                if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
  1.1999 -                {
  1.2000 -                    gdprintk(XENLOG_ERR VTDPREFIX,
  1.2001 -                        "iommu_set_pgd: second level wasn't there\n");
  1.2002 -                    unmap_domain_page(l3e);
  1.2003 -                    return;
  1.2004 -                }
  1.2005 -                pgd_mfn = _mfn(l3e_get_pfn(*l3e));
  1.2006 +        case VTD_PAGE_TABLE_LEVEL_3:
  1.2007 +            l3e = map_domain_page(p2m_table);
  1.2008 +            if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
  1.2009 +            {
  1.2010 +                gdprintk(XENLOG_ERR VTDPREFIX,
  1.2011 +                         "iommu_set_pgd: second level wasn't there\n");
  1.2012                  unmap_domain_page(l3e);
  1.2013 -                hd->pgd = maddr_to_virt(pagetable_get_paddr(
  1.2014 -                      pagetable_from_mfn(pgd_mfn)));
  1.2015 -                break;
  1.2016 +                return;
  1.2017 +            }
  1.2018 +            pgd_mfn = _mfn(l3e_get_pfn(*l3e));
  1.2019 +            unmap_domain_page(l3e);
  1.2020 +            hd->pgd = maddr_to_virt(pagetable_get_paddr(
  1.2021 +                pagetable_from_mfn(pgd_mfn)));
  1.2022 +            break;
  1.2023  
  1.2024 -            case VTD_PAGE_TABLE_LEVEL_4:
  1.2025 -                pgd_mfn = _mfn(p2m_table);
  1.2026 -                hd->pgd = maddr_to_virt(pagetable_get_paddr(
  1.2027 -                      pagetable_from_mfn(pgd_mfn)));
  1.2028 -                break;
  1.2029 -            default:
  1.2030 -                gdprintk(XENLOG_ERR VTDPREFIX,
  1.2031 -                    "iommu_set_pgd:Unsupported p2m table sharing level!\n");
  1.2032 -                break;
  1.2033 +        case VTD_PAGE_TABLE_LEVEL_4:
  1.2034 +            pgd_mfn = _mfn(p2m_table);
  1.2035 +            hd->pgd = maddr_to_virt(pagetable_get_paddr(
  1.2036 +                pagetable_from_mfn(pgd_mfn)));
  1.2037 +            break;
  1.2038 +        default:
  1.2039 +            gdprintk(XENLOG_ERR VTDPREFIX,
  1.2040 +                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
  1.2041 +            break;
  1.2042          }
  1.2043      }
  1.2044  #endif
  1.2045      gdprintk(XENLOG_INFO VTDPREFIX,
  1.2046 -        "iommu_set_pgd: hd->pgd = %p\n", hd->pgd);
  1.2047 +             "iommu_set_pgd: hd->pgd = %p\n", hd->pgd);
  1.2048  }
  1.2049  
  1.2050  
  1.2051 @@ -1854,11 +1924,10 @@ int iommu_suspend(void)
  1.2052      struct iommu *iommu;
  1.2053      int i = 0;
  1.2054  
  1.2055 -    if (!vtd_enable)
  1.2056 -        return 0;
  1.2057 +    flush_all();
  1.2058  
  1.2059 -    flush_all();
  1.2060 -    for_each_drhd_unit(drhd) {
  1.2061 +    for_each_drhd_unit ( drhd )
  1.2062 +    {
  1.2063          iommu = drhd->iommu;
  1.2064          iommu_state[DMAR_RTADDR_REG * i] =
  1.2065              (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG);
  1.2066 @@ -1890,36 +1959,44 @@ int iommu_resume(void)
  1.2067      struct iommu *iommu;
  1.2068      int i = 0;
  1.2069  
  1.2070 -    if (!vtd_enable)
  1.2071 -        return 0;
  1.2072 -
  1.2073      flush_all();
  1.2074  
  1.2075      init_vtd_hw();
  1.2076 -    for_each_drhd_unit(drhd) {
  1.2077 +    for_each_drhd_unit ( drhd )
  1.2078 +    {
  1.2079          iommu = drhd->iommu;
  1.2080          dmar_writeq( iommu->reg, DMAR_RTADDR_REG,
  1.2081 -            (u64) iommu_state[DMAR_RTADDR_REG * i]);
  1.2082 +                     (u64) iommu_state[DMAR_RTADDR_REG * i]);
  1.2083          dmar_writel(iommu->reg, DMAR_FECTL_REG,
  1.2084 -            (u32) iommu_state[DMAR_FECTL_REG * i]);
  1.2085 +                    (u32) iommu_state[DMAR_FECTL_REG * i]);
  1.2086          dmar_writel(iommu->reg, DMAR_FEDATA_REG,
  1.2087 -            (u32) iommu_state[DMAR_FEDATA_REG * i]);
  1.2088 +                    (u32) iommu_state[DMAR_FEDATA_REG * i]);
  1.2089          dmar_writel(iommu->reg, DMAR_FEADDR_REG,
  1.2090 -            (u32) iommu_state[DMAR_FEADDR_REG * i]);
  1.2091 +                    (u32) iommu_state[DMAR_FEADDR_REG * i]);
  1.2092          dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
  1.2093 -            (u32) iommu_state[DMAR_FEUADDR_REG * i]);
  1.2094 +                    (u32) iommu_state[DMAR_FEUADDR_REG * i]);
  1.2095          dmar_writel(iommu->reg, DMAR_PLMBASE_REG,
  1.2096 -            (u32) iommu_state[DMAR_PLMBASE_REG * i]);
  1.2097 +                    (u32) iommu_state[DMAR_PLMBASE_REG * i]);
  1.2098          dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG,
  1.2099 -            (u32) iommu_state[DMAR_PLMLIMIT_REG * i]);
  1.2100 +                    (u32) iommu_state[DMAR_PLMLIMIT_REG * i]);
  1.2101          dmar_writeq(iommu->reg, DMAR_PHMBASE_REG,
  1.2102 -            (u64) iommu_state[DMAR_PHMBASE_REG * i]);
  1.2103 +                    (u64) iommu_state[DMAR_PHMBASE_REG * i]);
  1.2104          dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG,
  1.2105 -            (u64) iommu_state[DMAR_PHMLIMIT_REG * i]);
  1.2106 +                    (u64) iommu_state[DMAR_PHMLIMIT_REG * i]);
  1.2107  
  1.2108 -        if (iommu_enable_translation(iommu))
  1.2109 +        if ( iommu_enable_translation(iommu) )
  1.2110              return -EIO;
  1.2111          i++;
  1.2112      }
  1.2113      return 0;
  1.2114  }
  1.2115 +
  1.2116 +/*
  1.2117 + * Local variables:
  1.2118 + * mode: C
  1.2119 + * c-set-style: "BSD"
  1.2120 + * c-basic-offset: 4
  1.2121 + * tab-width: 4
  1.2122 + * indent-tabs-mode: nil
  1.2123 + * End:
  1.2124 + */