ia64/xen-unstable

changeset 17273:c927f758fcba

AMD IOMMU: Fix up coding style issue in amd iommu files
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 19 16:16:24 2008 +0000 (2008-03-19)
parents 7d8892a90c90
children 153b541c204c
files xen/drivers/passthrough/amd/iommu_acpi.c xen/drivers/passthrough/amd/iommu_detect.c xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/drivers/passthrough/iommu.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_acpi.c	Wed Mar 19 14:13:17 2008 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c	Wed Mar 19 16:16:24 2008 +0000
     1.3 @@ -29,12 +29,12 @@ extern unsigned short ivrs_bdf_entries;
     1.4  extern struct ivrs_mappings *ivrs_mappings;
     1.5  
     1.6  static struct amd_iommu * __init find_iommu_from_bdf_cap(
     1.7 -           u16 bdf, u8 cap_offset)
     1.8 +    u16 bdf, u8 cap_offset)
     1.9  {
    1.10      struct amd_iommu *iommu;
    1.11  
    1.12 -    for_each_amd_iommu( iommu )
    1.13 -        if ( iommu->bdf == bdf && iommu->cap_offset == cap_offset )
    1.14 +    for_each_amd_iommu ( iommu )
    1.15 +        if ( (iommu->bdf == bdf) && (iommu->cap_offset == cap_offset) )
    1.16              return iommu;
    1.17  
    1.18      return NULL;
    1.19 @@ -57,15 +57,17 @@ static void __init reserve_iommu_exclusi
    1.20      iommu->exclusion_limit = limit;
    1.21  }
    1.22  
    1.23 -static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu,
    1.24 -           unsigned long base, unsigned long limit)
    1.25 +static void __init reserve_iommu_exclusion_range_all(
    1.26 +    struct amd_iommu *iommu,
    1.27 +    unsigned long base, unsigned long limit)
    1.28  {
    1.29      reserve_iommu_exclusion_range(iommu, base, limit);
    1.30      iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
    1.31  }
    1.32  
    1.33 -static void __init reserve_unity_map_for_device(u16 bdf, unsigned long base,
    1.34 -           unsigned long length, u8 iw, u8 ir)
    1.35 +static void __init reserve_unity_map_for_device(
    1.36 +    u16 bdf, unsigned long base,
    1.37 +    unsigned long length, u8 iw, u8 ir)
    1.38  {
    1.39      unsigned long old_top, new_top;
    1.40  
    1.41 @@ -80,7 +82,7 @@ static void __init reserve_unity_map_for
    1.42          if ( ivrs_mappings[bdf].addr_range_start < base )
    1.43              base = ivrs_mappings[bdf].addr_range_start;
    1.44          length = new_top - base;
    1.45 -   }
    1.46 +    }
    1.47  
    1.48      /* extend r/w permissioms and keep aggregate */
    1.49      if ( iw )
    1.50 @@ -93,7 +95,7 @@ static void __init reserve_unity_map_for
    1.51  }
    1.52  
    1.53  static int __init register_exclusion_range_for_all_devices(
    1.54 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
    1.55 +    unsigned long base, unsigned long limit, u8 iw, u8 ir)
    1.56  {
    1.57      unsigned long range_top, iommu_top, length;
    1.58      struct amd_iommu *iommu;
    1.59 @@ -105,7 +107,7 @@ static int __init register_exclusion_ran
    1.60      iommu_top = max_page * PAGE_SIZE;
    1.61      if ( base < iommu_top )
    1.62      {
    1.63 -        if (range_top > iommu_top)
    1.64 +        if ( range_top > iommu_top )
    1.65              range_top = iommu_top;
    1.66          length = range_top - base;
    1.67          /* reserve r/w unity-mapped page entries for devices */
    1.68 @@ -116,7 +118,7 @@ static int __init register_exclusion_ran
    1.69          base = iommu_top;
    1.70      }
    1.71      /* register IOMMU exclusion range settings */
    1.72 -    if (limit >= iommu_top)
    1.73 +    if ( limit >= iommu_top )
    1.74      {
    1.75          for_each_amd_iommu( iommu )
    1.76              reserve_iommu_exclusion_range_all(iommu, base, limit);
    1.77 @@ -125,8 +127,8 @@ static int __init register_exclusion_ran
    1.78      return 0;
    1.79  }
    1.80  
    1.81 -static int __init register_exclusion_range_for_device(u16 bdf,
    1.82 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
    1.83 +static int __init register_exclusion_range_for_device(
    1.84 +    u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir)
    1.85  {
    1.86      unsigned long range_top, iommu_top, length;
    1.87      struct amd_iommu *iommu;
    1.88 @@ -147,7 +149,7 @@ static int __init register_exclusion_ran
    1.89      iommu_top = max_page * PAGE_SIZE;
    1.90      if ( base < iommu_top )
    1.91      {
    1.92 -        if (range_top > iommu_top)
    1.93 +        if ( range_top > iommu_top )
    1.94              range_top = iommu_top;
    1.95          length = range_top - base;
    1.96          /* reserve unity-mapped page entries for device */
    1.97 @@ -159,8 +161,8 @@ static int __init register_exclusion_ran
    1.98          base = iommu_top;
    1.99      }
   1.100  
   1.101 -   /* register IOMMU exclusion range settings for device */
   1.102 -   if ( limit >= iommu_top  )
   1.103 +    /* register IOMMU exclusion range settings for device */
   1.104 +    if ( limit >= iommu_top  )
   1.105      {
   1.106          reserve_iommu_exclusion_range(iommu, base, limit);
   1.107          ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
   1.108 @@ -171,8 +173,8 @@ static int __init register_exclusion_ran
   1.109  }
   1.110  
   1.111  static int __init register_exclusion_range_for_iommu_devices(
   1.112 -           struct amd_iommu *iommu,
   1.113 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.114 +    struct amd_iommu *iommu,
   1.115 +    unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.116  {
   1.117      unsigned long range_top, iommu_top, length;
   1.118      u16 bus, devfn, bdf, req;
   1.119 @@ -183,7 +185,7 @@ static int __init register_exclusion_ran
   1.120      iommu_top = max_page * PAGE_SIZE;
   1.121      if ( base < iommu_top )
   1.122      {
   1.123 -        if (range_top > iommu_top)
   1.124 +        if ( range_top > iommu_top )
   1.125              range_top = iommu_top;
   1.126          length = range_top - base;
   1.127          /* reserve r/w unity-mapped page entries for devices */
   1.128 @@ -205,19 +207,19 @@ static int __init register_exclusion_ran
   1.129      }
   1.130  
   1.131      /* register IOMMU exclusion range settings */
   1.132 -    if (limit >= iommu_top)
   1.133 +    if ( limit >= iommu_top )
   1.134          reserve_iommu_exclusion_range_all(iommu, base, limit);
   1.135      return 0;
   1.136  }
   1.137  
   1.138  static int __init parse_ivmd_device_select(
   1.139 -           struct acpi_ivmd_block_header *ivmd_block,
   1.140 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.141 +    struct acpi_ivmd_block_header *ivmd_block,
   1.142 +    unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.143  {
   1.144      u16 bdf;
   1.145  
   1.146      bdf = ivmd_block->header.dev_id;
   1.147 -    if (bdf >= ivrs_bdf_entries)
   1.148 +    if ( bdf >= ivrs_bdf_entries )
   1.149      {
   1.150          dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
   1.151          return -ENODEV;
   1.152 @@ -227,44 +229,41 @@ static int __init parse_ivmd_device_sele
   1.153  }
   1.154  
   1.155  static int __init parse_ivmd_device_range(
   1.156 -           struct acpi_ivmd_block_header *ivmd_block,
   1.157 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.158 +    struct acpi_ivmd_block_header *ivmd_block,
   1.159 +    unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.160  {
   1.161      u16 first_bdf, last_bdf, bdf;
   1.162      int error;
   1.163  
   1.164      first_bdf = ivmd_block->header.dev_id;
   1.165 -    if (first_bdf >= ivrs_bdf_entries)
   1.166 -    {
   1.167 -       dprintk(XENLOG_ERR, "IVMD Error: "
   1.168 -                    "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
   1.169 -       return -ENODEV;
   1.170 -    }
   1.171 -
   1.172 -    last_bdf = ivmd_block->last_dev_id;
   1.173 -    if (last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf)
   1.174 +    if ( first_bdf >= ivrs_bdf_entries )
   1.175      {
   1.176          dprintk(XENLOG_ERR, "IVMD Error: "
   1.177 -                    "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
   1.178 +                "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
   1.179          return -ENODEV;
   1.180      }
   1.181  
   1.182 -      dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
   1.183 -                    first_bdf, last_bdf);
   1.184 -
   1.185 -    for ( bdf = first_bdf, error = 0;
   1.186 -       bdf <= last_bdf && !error; ++bdf )
   1.187 +    last_bdf = ivmd_block->last_dev_id;
   1.188 +    if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
   1.189      {
   1.190 -       error = register_exclusion_range_for_device(
   1.191 -                     bdf, base, limit, iw, ir);
   1.192 +        dprintk(XENLOG_ERR, "IVMD Error: "
   1.193 +                "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
   1.194 +        return -ENODEV;
   1.195      }
   1.196  
   1.197 -   return error;
   1.198 +    dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
   1.199 +            first_bdf, last_bdf);
   1.200 +
   1.201 +    for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ )
   1.202 +        error = register_exclusion_range_for_device(
   1.203 +            bdf, base, limit, iw, ir);
   1.204 +
   1.205 +    return error;
   1.206  }
   1.207  
   1.208  static int __init parse_ivmd_device_iommu(
   1.209 -           struct acpi_ivmd_block_header *ivmd_block,
   1.210 -           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.211 +    struct acpi_ivmd_block_header *ivmd_block,
   1.212 +    unsigned long base, unsigned long limit, u8 iw, u8 ir)
   1.213  {
   1.214      struct amd_iommu *iommu;
   1.215  
   1.216 @@ -273,14 +272,14 @@ static int __init parse_ivmd_device_iomm
   1.217                                      ivmd_block->cap_offset);
   1.218      if ( !iommu )
   1.219      {
   1.220 -       dprintk(XENLOG_ERR,
   1.221 -           "IVMD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
   1.222 -            ivmd_block->header.dev_id, ivmd_block->cap_offset);
   1.223 -       return -ENODEV;
   1.224 +        dprintk(XENLOG_ERR,
   1.225 +                "IVMD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
   1.226 +                ivmd_block->header.dev_id, ivmd_block->cap_offset);
   1.227 +        return -ENODEV;
   1.228      }
   1.229  
   1.230      return register_exclusion_range_for_iommu_devices(
   1.231 -                 iommu, base, limit, iw, ir);
   1.232 +        iommu, base, limit, iw, ir);
   1.233  }
   1.234  
   1.235  static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
   1.236 @@ -288,11 +287,11 @@ static int __init parse_ivmd_block(struc
   1.237      unsigned long start_addr, mem_length, base, limit;
   1.238      u8 iw, ir;
   1.239  
   1.240 -    if (ivmd_block->header.length <
   1.241 -       sizeof(struct acpi_ivmd_block_header))
   1.242 +    if ( ivmd_block->header.length <
   1.243 +         sizeof(struct acpi_ivmd_block_header) )
   1.244      {
   1.245 -       dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
   1.246 -       return -ENODEV;
   1.247 +        dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
   1.248 +        return -ENODEV;
   1.249      }
   1.250  
   1.251      start_addr = (unsigned long)ivmd_block->start_addr;
   1.252 @@ -301,7 +300,7 @@ static int __init parse_ivmd_block(struc
   1.253      limit = (start_addr + mem_length - 1) & PAGE_MASK;
   1.254  
   1.255      dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n",
   1.256 -                  ivmd_block->header.type);
   1.257 +            ivmd_block->header.type);
   1.258      dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr);
   1.259      dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length);
   1.260  
   1.261 @@ -322,27 +321,27 @@ static int __init parse_ivmd_block(struc
   1.262      }
   1.263      else
   1.264      {
   1.265 -       dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
   1.266 -       return -ENODEV;
   1.267 +        dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
   1.268 +        return -ENODEV;
   1.269      }
   1.270  
   1.271      switch( ivmd_block->header.type )
   1.272      {
   1.273      case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
   1.274          return register_exclusion_range_for_all_devices(
   1.275 -           base, limit, iw, ir);
   1.276 +            base, limit, iw, ir);
   1.277  
   1.278      case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
   1.279          return parse_ivmd_device_select(ivmd_block,
   1.280 -           base, limit, iw, ir);
   1.281 +                                        base, limit, iw, ir);
   1.282  
   1.283      case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
   1.284          return parse_ivmd_device_range(ivmd_block,
   1.285 -            base, limit, iw, ir);
   1.286 +                                       base, limit, iw, ir);
   1.287  
   1.288      case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
   1.289          return parse_ivmd_device_iommu(ivmd_block,
   1.290 -           base, limit, iw, ir);
   1.291 +                                       base, limit, iw, ir);
   1.292  
   1.293      default:
   1.294          dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n");
   1.295 @@ -350,8 +349,8 @@ static int __init parse_ivmd_block(struc
   1.296      }
   1.297  }
   1.298  
   1.299 -static u16 __init parse_ivhd_device_padding(u16 pad_length,
   1.300 -           u16 header_length, u16 block_length)
   1.301 +static u16 __init parse_ivhd_device_padding(
   1.302 +    u16 pad_length, u16 header_length, u16 block_length)
   1.303  {
   1.304      if ( header_length < (block_length + pad_length) )
   1.305      {
   1.306 @@ -363,7 +362,7 @@ static u16 __init parse_ivhd_device_padd
   1.307  }
   1.308  
   1.309  static u16 __init parse_ivhd_device_select(
   1.310 -           union acpi_ivhd_device *ivhd_device)
   1.311 +    union acpi_ivhd_device *ivhd_device)
   1.312  {
   1.313      u16 bdf;
   1.314  
   1.315 @@ -385,8 +384,8 @@ static u16 __init parse_ivhd_device_sele
   1.316  }
   1.317  
   1.318  static u16 __init parse_ivhd_device_range(
   1.319 -           union acpi_ivhd_device *ivhd_device,
   1.320 -           u16 header_length, u16 block_length)
   1.321 +    union acpi_ivhd_device *ivhd_device,
   1.322 +    u16 header_length, u16 block_length)
   1.323  {
   1.324      u16 dev_length, first_bdf, last_bdf, bdf;
   1.325      u8 sys_mgt;
   1.326 @@ -399,7 +398,8 @@ static u16 __init parse_ivhd_device_rang
   1.327      }
   1.328  
   1.329      if ( ivhd_device->range.trailer.type !=
   1.330 -        AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END) {
   1.331 +         AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   1.332 +    {
   1.333          dprintk(XENLOG_ERR, "IVHD Error: "
   1.334                  "Invalid Range: End_Type 0x%x\n",
   1.335                  ivhd_device->range.trailer.type);
   1.336 @@ -409,35 +409,35 @@ static u16 __init parse_ivhd_device_rang
   1.337      first_bdf = ivhd_device->header.dev_id;
   1.338      if ( first_bdf >= ivrs_bdf_entries )
   1.339      {
   1.340 -       dprintk(XENLOG_ERR, "IVHD Error: "
   1.341 -           "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   1.342 -       return 0;
   1.343 +        dprintk(XENLOG_ERR, "IVHD Error: "
   1.344 +                "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   1.345 +        return 0;
   1.346      }
   1.347  
   1.348      last_bdf = ivhd_device->range.trailer.dev_id;
   1.349 -    if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
   1.350 +    if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
   1.351      {
   1.352 -       dprintk(XENLOG_ERR, "IVHD Error: "
   1.353 -           "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   1.354 -       return 0;
   1.355 +        dprintk(XENLOG_ERR, "IVHD Error: "
   1.356 +                "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   1.357 +        return 0;
   1.358      }
   1.359  
   1.360      dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
   1.361 -        first_bdf, last_bdf);
   1.362 +            first_bdf, last_bdf);
   1.363  
   1.364      /* override flags for range of devices */
   1.365      sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   1.366 -                                 AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.367 -                                 AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.368 -    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   1.369 +                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.370 +                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.371 +    for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
   1.372          ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   1.373  
   1.374      return dev_length;
   1.375  }
   1.376  
   1.377  static u16 __init parse_ivhd_device_alias(
   1.378 -           union acpi_ivhd_device *ivhd_device,
   1.379 -           u16 header_length, u16 block_length)
   1.380 +    union acpi_ivhd_device *ivhd_device,
   1.381 +    u16 header_length, u16 block_length)
   1.382  {
   1.383      u16 dev_length, alias_id, bdf;
   1.384  
   1.385 @@ -445,7 +445,7 @@ static u16 __init parse_ivhd_device_alia
   1.386      if ( header_length < (block_length + dev_length) )
   1.387      {
   1.388          dprintk(XENLOG_ERR, "IVHD Error: "
   1.389 -            "Invalid Device_Entry Length!\n");
   1.390 +                "Invalid Device_Entry Length!\n");
   1.391          return 0;
   1.392      }
   1.393  
   1.394 @@ -460,9 +460,9 @@ static u16 __init parse_ivhd_device_alia
   1.395      alias_id = ivhd_device->alias.dev_id;
   1.396      if ( alias_id >= ivrs_bdf_entries )
   1.397      {
   1.398 -       dprintk(XENLOG_ERR, "IVHD Error: "
   1.399 -               "Invalid Alias Dev_Id 0x%x\n", alias_id);
   1.400 -       return 0;
   1.401 +        dprintk(XENLOG_ERR, "IVHD Error: "
   1.402 +                "Invalid Alias Dev_Id 0x%x\n", alias_id);
   1.403 +        return 0;
   1.404      }
   1.405  
   1.406      dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
   1.407 @@ -470,18 +470,18 @@ static u16 __init parse_ivhd_device_alia
   1.408      /* override requestor_id and flags for device */
   1.409      ivrs_mappings[bdf].dte_requestor_id = alias_id;
   1.410      ivrs_mappings[bdf].dte_sys_mgt_enable =
   1.411 -            get_field_from_byte(ivhd_device->header.flags,
   1.412 -                                AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.413 -                                AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.414 +        get_field_from_byte(ivhd_device->header.flags,
   1.415 +                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.416 +                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.417      ivrs_mappings[alias_id].dte_sys_mgt_enable =
   1.418 -            ivrs_mappings[bdf].dte_sys_mgt_enable;
   1.419 +        ivrs_mappings[bdf].dte_sys_mgt_enable;
   1.420  
   1.421      return dev_length;
   1.422  }
   1.423  
   1.424  static u16 __init parse_ivhd_device_alias_range(
   1.425 -           union acpi_ivhd_device *ivhd_device,
   1.426 -           u16 header_length, u16 block_length)
   1.427 +    union acpi_ivhd_device *ivhd_device,
   1.428 +    u16 header_length, u16 block_length)
   1.429  {
   1.430  
   1.431      u16 dev_length, first_bdf, last_bdf, alias_id, bdf;
   1.432 @@ -496,7 +496,7 @@ static u16 __init parse_ivhd_device_alia
   1.433      }
   1.434  
   1.435      if ( ivhd_device->alias_range.trailer.type !=
   1.436 -       AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   1.437 +         AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   1.438      {
   1.439          dprintk(XENLOG_ERR, "IVHD Error: "
   1.440                  "Invalid Range: End_Type 0x%x\n",
   1.441 @@ -536,7 +536,7 @@ static u16 __init parse_ivhd_device_alia
   1.442      sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   1.443                                    AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.444                                    AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.445 -    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   1.446 +    for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
   1.447      {
   1.448          ivrs_mappings[bdf].dte_requestor_id = alias_id;
   1.449          ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   1.450 @@ -547,8 +547,8 @@ static u16 __init parse_ivhd_device_alia
   1.451  }
   1.452  
   1.453  static u16 __init parse_ivhd_device_extended(
   1.454 -           union acpi_ivhd_device *ivhd_device,
   1.455 -           u16 header_length, u16 block_length)
   1.456 +    union acpi_ivhd_device *ivhd_device,
   1.457 +    u16 header_length, u16 block_length)
   1.458  {
   1.459      u16 dev_length, bdf;
   1.460  
   1.461 @@ -578,8 +578,8 @@ static u16 __init parse_ivhd_device_exte
   1.462  }
   1.463  
   1.464  static u16 __init parse_ivhd_device_extended_range(
   1.465 -           union acpi_ivhd_device *ivhd_device,
   1.466 -           u16 header_length, u16 block_length)
   1.467 +    union acpi_ivhd_device *ivhd_device,
   1.468 +    u16 header_length, u16 block_length)
   1.469  {
   1.470      u16 dev_length, first_bdf, last_bdf, bdf;
   1.471      u8 sys_mgt;
   1.472 @@ -593,7 +593,7 @@ static u16 __init parse_ivhd_device_exte
   1.473      }
   1.474  
   1.475      if ( ivhd_device->extended_range.trailer.type !=
   1.476 -        AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   1.477 +         AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   1.478      {
   1.479          dprintk(XENLOG_ERR, "IVHD Error: "
   1.480                  "Invalid Range: End_Type 0x%x\n",
   1.481 @@ -604,13 +604,13 @@ static u16 __init parse_ivhd_device_exte
   1.482      first_bdf = ivhd_device->header.dev_id;
   1.483      if ( first_bdf >= ivrs_bdf_entries )
   1.484      {
   1.485 -       dprintk(XENLOG_ERR, "IVHD Error: "
   1.486 -           "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   1.487 -       return 0;
   1.488 +        dprintk(XENLOG_ERR, "IVHD Error: "
   1.489 +                "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   1.490 +        return 0;
   1.491      }
   1.492  
   1.493      last_bdf = ivhd_device->extended_range.trailer.dev_id;
   1.494 -    if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
   1.495 +    if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
   1.496      {
   1.497          dprintk(XENLOG_ERR, "IVHD Error: "
   1.498                  "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   1.499 @@ -624,7 +624,7 @@ static u16 __init parse_ivhd_device_exte
   1.500      sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   1.501                                    AMD_IOMMU_ACPI_SYS_MGT_MASK,
   1.502                                    AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   1.503 -    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   1.504 +    for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
   1.505          ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   1.506  
   1.507      return dev_length;
   1.508 @@ -637,20 +637,20 @@ static int __init parse_ivhd_block(struc
   1.509      struct amd_iommu *iommu;
   1.510  
   1.511      if ( ivhd_block->header.length <
   1.512 -        sizeof(struct acpi_ivhd_block_header) )
   1.513 +         sizeof(struct acpi_ivhd_block_header) )
   1.514      {
   1.515          dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n");
   1.516          return -ENODEV;
   1.517      }
   1.518  
   1.519      iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id,
   1.520 -            ivhd_block->cap_offset);
   1.521 +                                    ivhd_block->cap_offset);
   1.522      if ( !iommu )
   1.523      {
   1.524          dprintk(XENLOG_ERR,
   1.525                  "IVHD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
   1.526                  ivhd_block->header.dev_id, ivhd_block->cap_offset);
   1.527 -       return -ENODEV;
   1.528 +        return -ENODEV;
   1.529      }
   1.530  
   1.531      dprintk(XENLOG_INFO, "IVHD Block:\n");
   1.532 @@ -668,29 +668,29 @@ static int __init parse_ivhd_block(struc
   1.533                                            AMD_IOMMU_ACPI_COHERENT_MASK,
   1.534                                            AMD_IOMMU_ACPI_COHERENT_SHIFT);
   1.535      iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags,
   1.536 -                                          AMD_IOMMU_ACPI_IOTLB_SUP_MASK,
   1.537 -                                          AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT);
   1.538 +                                               AMD_IOMMU_ACPI_IOTLB_SUP_MASK,
   1.539 +                                               AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT);
   1.540      iommu->isochronous = get_field_from_byte(ivhd_block->header.flags,
   1.541 -                                          AMD_IOMMU_ACPI_ISOC_MASK,
   1.542 -                                          AMD_IOMMU_ACPI_ISOC_SHIFT);
   1.543 +                                             AMD_IOMMU_ACPI_ISOC_MASK,
   1.544 +                                             AMD_IOMMU_ACPI_ISOC_SHIFT);
   1.545      iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags,
   1.546 -                                          AMD_IOMMU_ACPI_RES_PASS_PW_MASK,
   1.547 -                                          AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT);
   1.548 +                                             AMD_IOMMU_ACPI_RES_PASS_PW_MASK,
   1.549 +                                             AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT);
   1.550      iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags,
   1.551 -                                          AMD_IOMMU_ACPI_PASS_PW_MASK,
   1.552 -                                          AMD_IOMMU_ACPI_PASS_PW_SHIFT);
   1.553 +                                         AMD_IOMMU_ACPI_PASS_PW_MASK,
   1.554 +                                         AMD_IOMMU_ACPI_PASS_PW_SHIFT);
   1.555      iommu->ht_tunnel_enable = get_field_from_byte(
   1.556 -                                          ivhd_block->header.flags,
   1.557 -                                          AMD_IOMMU_ACPI_HT_TUN_ENB_MASK,
   1.558 -                                          AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT);
   1.559 +        ivhd_block->header.flags,
   1.560 +        AMD_IOMMU_ACPI_HT_TUN_ENB_MASK,
   1.561 +        AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT);
   1.562  
   1.563      /* parse Device Entries */
   1.564      block_length = sizeof(struct acpi_ivhd_block_header);
   1.565 -    while( ivhd_block->header.length >=
   1.566 -       (block_length + sizeof(struct acpi_ivhd_device_header)) )
   1.567 +    while ( ivhd_block->header.length >=
   1.568 +            (block_length + sizeof(struct acpi_ivhd_device_header)) )
   1.569      {
   1.570          ivhd_device = (union acpi_ivhd_device *)
   1.571 -                ((u8 *)ivhd_block + block_length);
   1.572 +            ((u8 *)ivhd_block + block_length);
   1.573  
   1.574          dprintk(XENLOG_INFO, "IVHD Device Entry:\n");
   1.575          dprintk(XENLOG_INFO, " Type 0x%x\n",
   1.576 @@ -700,7 +700,7 @@ static int __init parse_ivhd_block(struc
   1.577          dprintk(XENLOG_INFO, " Flags 0x%x\n",
   1.578                  ivhd_device->header.flags);
   1.579  
   1.580 -        switch( ivhd_device->header.type )
   1.581 +        switch ( ivhd_device->header.type )
   1.582          {
   1.583          case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD:
   1.584              dev_length = parse_ivhd_device_padding(
   1.585 @@ -716,7 +716,8 @@ static int __init parse_ivhd_block(struc
   1.586              dev_length = parse_ivhd_device_select(ivhd_device);
   1.587              break;
   1.588          case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START:
   1.589 -            dev_length = parse_ivhd_device_range(ivhd_device,
   1.590 +            dev_length = parse_ivhd_device_range(
   1.591 +                ivhd_device,
   1.592                  ivhd_block->header.length, block_length);
   1.593              break;
   1.594          case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT:
   1.595 @@ -741,7 +742,7 @@ static int __init parse_ivhd_block(struc
   1.596              break;
   1.597          default:
   1.598              dprintk(XENLOG_ERR, "IVHD Error: "
   1.599 -                "Invalid Device Type!\n");
   1.600 +                    "Invalid Device Type!\n");
   1.601              dev_length = 0;
   1.602              break;
   1.603          }
   1.604 @@ -759,7 +760,7 @@ static int __init parse_ivrs_block(struc
   1.605      struct acpi_ivhd_block_header *ivhd_block;
   1.606      struct acpi_ivmd_block_header *ivmd_block;
   1.607  
   1.608 -    switch(ivrs_block->type)
   1.609 +    switch ( ivrs_block->type )
   1.610      {
   1.611      case AMD_IOMMU_ACPI_IVHD_TYPE:
   1.612          ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block;
   1.613 @@ -786,7 +787,7 @@ void __init dump_acpi_table_header(struc
   1.614  
   1.615      printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n");
   1.616      printk(XENLOG_INFO " Signature ");
   1.617 -    for ( i = 0; i < ACPI_NAME_SIZE; ++i )
   1.618 +    for ( i = 0; i < ACPI_NAME_SIZE; i++ )
   1.619          printk("%c", table->signature[i]);
   1.620      printk("\n");
   1.621  
   1.622 @@ -795,28 +796,27 @@ void __init dump_acpi_table_header(struc
   1.623      printk(" CheckSum 0x%x\n", table->checksum);
   1.624  
   1.625      printk(" OEM_Id ");
   1.626 -    for ( i = 0; i < ACPI_OEM_ID_SIZE; ++i )
   1.627 +    for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ )
   1.628          printk("%c", table->oem_id[i]);
   1.629      printk("\n");
   1.630  
   1.631      printk(" OEM_Table_Id ");
   1.632 -    for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; ++i )
   1.633 +    for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ )
   1.634          printk("%c", table->oem_table_id[i]);
   1.635      printk("\n");
   1.636  
   1.637      printk(" OEM_Revision 0x%x\n", table->oem_revision);
   1.638  
   1.639      printk(" Creator_Id ");
   1.640 -    for ( i = 0; i < ACPI_NAME_SIZE; ++i )
   1.641 +    for ( i = 0; i < ACPI_NAME_SIZE; i++ )
   1.642          printk("%c", table->asl_compiler_id[i]);
   1.643      printk("\n");
   1.644  
   1.645      printk(" Creator_Revision 0x%x\n",
   1.646 -       table->asl_compiler_revision);
   1.647 +           table->asl_compiler_revision);
   1.648  }
   1.649  
   1.650 -int __init parse_ivrs_table(unsigned long phys_addr,
   1.651 -                                  unsigned long size)
   1.652 +int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size)
   1.653  {
   1.654      struct acpi_ivrs_block_header *ivrs_block;
   1.655      unsigned long length, i;
   1.656 @@ -834,7 +834,7 @@ int __init parse_ivrs_table(unsigned lon
   1.657      /* validate checksum: sum of entire table == 0 */
   1.658      checksum = 0;
   1.659      raw_table = (u8 *)table;
   1.660 -    for ( i = 0; i < table->length; ++i )
   1.661 +    for ( i = 0; i < table->length; i++ )
   1.662          checksum += raw_table[i];
   1.663      if ( checksum )
   1.664      {
   1.665 @@ -845,11 +845,10 @@ int __init parse_ivrs_table(unsigned lon
   1.666  
   1.667      /* parse IVRS blocks */
   1.668      length = sizeof(struct acpi_ivrs_table_header);
   1.669 -    while( error == 0 && table->length >
   1.670 -       (length + sizeof(struct acpi_ivrs_block_header)) )
   1.671 +    while ( (error == 0) && (table->length > (length + sizeof(*ivrs_block))) )
   1.672      {
   1.673          ivrs_block = (struct acpi_ivrs_block_header *)
   1.674 -                ((u8 *)table + length);
   1.675 +            ((u8 *)table + length);
   1.676  
   1.677          dprintk(XENLOG_INFO, "IVRS Block:\n");
   1.678          dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type);
   1.679 @@ -857,16 +856,16 @@ int __init parse_ivrs_table(unsigned lon
   1.680          dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length);
   1.681          dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id);
   1.682  
   1.683 -        if (table->length >= (length + ivrs_block->length))
   1.684 -           error = parse_ivrs_block(ivrs_block);
   1.685 -        else
   1.686 +        if ( table->length < (length + ivrs_block->length) )
   1.687          {
   1.688 -           dprintk(XENLOG_ERR, "IVRS Error: "
   1.689 -               "Table Length Exceeded: 0x%x -> 0x%lx\n",
   1.690 -               table->length,
   1.691 -               (length + ivrs_block->length));
   1.692 -           return -ENODEV;
   1.693 +            dprintk(XENLOG_ERR, "IVRS Error: "
   1.694 +                    "Table Length Exceeded: 0x%x -> 0x%lx\n",
   1.695 +                    table->length,
   1.696 +                    (length + ivrs_block->length));
   1.697 +            return -ENODEV;
   1.698          }
   1.699 +
   1.700 +        error = parse_ivrs_block(ivrs_block);
   1.701          length += ivrs_block->length;
   1.702      }
   1.703  
     2.1 --- a/xen/drivers/passthrough/amd/iommu_detect.c	Wed Mar 19 14:13:17 2008 +0000
     2.2 +++ b/xen/drivers/passthrough/amd/iommu_detect.c	Wed Mar 19 16:16:24 2008 +0000
     2.3 @@ -26,8 +26,8 @@
     2.4  #include "../pci-direct.h"
     2.5  #include "../pci_regs.h"
     2.6  
     2.7 -static int __init valid_bridge_bus_config(int bus, int dev, int func,
     2.8 -            int *sec_bus, int *sub_bus)
     2.9 +static int __init valid_bridge_bus_config(
    2.10 +    int bus, int dev, int func, int *sec_bus, int *sub_bus)
    2.11  {
    2.12      int pri_bus;
    2.13  
    2.14 @@ -35,7 +35,7 @@ static int __init valid_bridge_bus_confi
    2.15      *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS);
    2.16      *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS);
    2.17  
    2.18 -    return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus );
    2.19 +    return ((pri_bus == bus) && (*sec_bus > bus) && (*sub_bus >= *sec_bus));
    2.20  }
    2.21  
    2.22  int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu)
    2.23 @@ -49,9 +49,11 @@ int __init get_iommu_last_downstream_bus
    2.24      iommu->downstream_bus_present[bus] = 1;
    2.25      dev = PCI_SLOT(iommu->first_devfn);
    2.26      multi_func = PCI_FUNC(iommu->first_devfn) > 0;
    2.27 -    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) {
    2.28 +    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; devfn++ )
    2.29 +    {
    2.30          /* skipping to next device#? */
    2.31 -        if ( dev != PCI_SLOT(devfn) ) {
    2.32 +        if ( dev != PCI_SLOT(devfn) )
    2.33 +        {
    2.34              dev = PCI_SLOT(devfn);
    2.35              multi_func = 0;
    2.36          }
    2.37 @@ -62,14 +64,15 @@ int __init get_iommu_last_downstream_bus
    2.38              continue;
    2.39  
    2.40          hdr_type = read_pci_config_byte(bus, dev, func,
    2.41 -                PCI_HEADER_TYPE);
    2.42 +                                        PCI_HEADER_TYPE);
    2.43          if ( func == 0 )
    2.44              multi_func = IS_PCI_MULTI_FUNCTION(hdr_type);
    2.45  
    2.46          if ( (func == 0 || multi_func) &&
    2.47 -            IS_PCI_TYPE1_HEADER(hdr_type) ) {
    2.48 -            if (!valid_bridge_bus_config(bus, dev, func,
    2.49 -                &sec_bus, &sub_bus))
    2.50 +             IS_PCI_TYPE1_HEADER(hdr_type) )
    2.51 +        {
    2.52 +            if ( !valid_bridge_bus_config(bus, dev, func,
    2.53 +                                          &sec_bus, &sub_bus) )
    2.54                  return -ENODEV;
    2.55  
    2.56              if ( sub_bus > iommu->last_downstream_bus )
    2.57 @@ -84,18 +87,18 @@ int __init get_iommu_last_downstream_bus
    2.58  }
    2.59  
    2.60  int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
    2.61 -            struct amd_iommu *iommu)
    2.62 +                                  struct amd_iommu *iommu)
    2.63  {
    2.64      u32 cap_header, cap_range, misc_info;
    2.65      u64 mmio_bar;
    2.66  
    2.67 -    mmio_bar = (u64)read_pci_config(bus, dev, func,
    2.68 -            cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
    2.69 +    mmio_bar = (u64)read_pci_config(
    2.70 +        bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
    2.71      mmio_bar |= read_pci_config(bus, dev, func,
    2.72 -            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); 
    2.73 +                                cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET);
    2.74      iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF;
    2.75  
    2.76 -    if ( (mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 )
    2.77 +    if ( ((mmio_bar & 0x1) == 0) || (iommu->mmio_base_phys == 0) )
    2.78      {
    2.79          dprintk(XENLOG_ERR ,
    2.80                  "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
    2.81 @@ -106,42 +109,37 @@ int __init get_iommu_capabilities(u8 bus
    2.82      iommu->cap_offset = cap_ptr;
    2.83  
    2.84      cap_header = read_pci_config(bus, dev, func, cap_ptr);
    2.85 -    iommu->revision = get_field_from_reg_u32(cap_header,
    2.86 -                  PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
    2.87 -    iommu->iotlb_support = get_field_from_reg_u32(cap_header,
    2.88 -                PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT);
    2.89 -    iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
    2.90 -                    PCI_CAP_HT_TUNNEL_MASK,
    2.91 -                    PCI_CAP_HT_TUNNEL_SHIFT);
    2.92 -    iommu->pte_not_present_cached = get_field_from_reg_u32(cap_header,
    2.93 -                    PCI_CAP_NP_CACHE_MASK,
    2.94 -                    PCI_CAP_NP_CACHE_SHIFT);
    2.95 +    iommu->revision = get_field_from_reg_u32(
    2.96 +        cap_header, PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
    2.97 +    iommu->iotlb_support = get_field_from_reg_u32(
    2.98 +        cap_header, PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT);
    2.99 +    iommu->ht_tunnel_support = get_field_from_reg_u32(
   2.100 +        cap_header, PCI_CAP_HT_TUNNEL_MASK, PCI_CAP_HT_TUNNEL_SHIFT);
   2.101 +    iommu->pte_not_present_cached = get_field_from_reg_u32(
   2.102 +        cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT);
   2.103  
   2.104      cap_range = read_pci_config(bus, dev, func,
   2.105 -            cap_ptr + PCI_CAP_RANGE_OFFSET);
   2.106 -    iommu->unit_id = get_field_from_reg_u32(cap_range,
   2.107 -                PCI_CAP_UNIT_ID_MASK,
   2.108 -                PCI_CAP_UNIT_ID_SHIFT);
   2.109 -    iommu->root_bus = get_field_from_reg_u32(cap_range,
   2.110 -                PCI_CAP_BUS_NUMBER_MASK,
   2.111 -                PCI_CAP_BUS_NUMBER_SHIFT);
   2.112 -    iommu->first_devfn = get_field_from_reg_u32(cap_range,
   2.113 -                PCI_CAP_FIRST_DEVICE_MASK,
   2.114 -                PCI_CAP_FIRST_DEVICE_SHIFT);
   2.115 -    iommu->last_devfn = get_field_from_reg_u32(cap_range,
   2.116 -                PCI_CAP_LAST_DEVICE_MASK,
   2.117 -                PCI_CAP_LAST_DEVICE_SHIFT);
   2.118 +                                cap_ptr + PCI_CAP_RANGE_OFFSET);
   2.119 +    iommu->unit_id = get_field_from_reg_u32(
   2.120 +        cap_range, PCI_CAP_UNIT_ID_MASK, PCI_CAP_UNIT_ID_SHIFT);
   2.121 +    iommu->root_bus = get_field_from_reg_u32(
   2.122 +        cap_range, PCI_CAP_BUS_NUMBER_MASK, PCI_CAP_BUS_NUMBER_SHIFT);
   2.123 +    iommu->first_devfn = get_field_from_reg_u32(
   2.124 +        cap_range, PCI_CAP_FIRST_DEVICE_MASK, PCI_CAP_FIRST_DEVICE_SHIFT);
   2.125 +    iommu->last_devfn = get_field_from_reg_u32(
   2.126 +        cap_range, PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT);
   2.127  
   2.128      misc_info = read_pci_config(bus, dev, func,
   2.129 -            cap_ptr + PCI_MISC_INFO_OFFSET);
   2.130 -    iommu->msi_number = get_field_from_reg_u32(misc_info,
   2.131 -                PCI_CAP_MSI_NUMBER_MASK,
   2.132 -                PCI_CAP_MSI_NUMBER_SHIFT);
   2.133 +                                cap_ptr + PCI_MISC_INFO_OFFSET);
   2.134 +    iommu->msi_number = get_field_from_reg_u32(
   2.135 +        misc_info, PCI_CAP_MSI_NUMBER_MASK, PCI_CAP_MSI_NUMBER_SHIFT);
   2.136 +
   2.137      return 0;
   2.138  }
   2.139  
   2.140 -static int __init scan_caps_for_iommu(int bus, int dev, int func,
   2.141 -            iommu_detect_callback_ptr_t iommu_detect_callback)
   2.142 +static int __init scan_caps_for_iommu(
   2.143 +    int bus, int dev, int func,
   2.144 +    iommu_detect_callback_ptr_t iommu_detect_callback)
   2.145  {
   2.146      int cap_ptr, cap_id, cap_type;
   2.147      u32 cap_header;
   2.148 @@ -149,32 +147,35 @@ static int __init scan_caps_for_iommu(in
   2.149  
   2.150      count = 0;
   2.151      cap_ptr = read_pci_config_byte(bus, dev, func,
   2.152 -            PCI_CAPABILITY_LIST);
   2.153 -    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
   2.154 -        count < PCI_MAX_CAP_BLOCKS && !error ) {
   2.155 +                                   PCI_CAPABILITY_LIST);
   2.156 +    while ( (cap_ptr >= PCI_MIN_CAP_OFFSET) &&
   2.157 +            (count < PCI_MAX_CAP_BLOCKS) &&
   2.158 +            !error )
   2.159 +    {
   2.160          cap_ptr &= PCI_CAP_PTR_MASK;
   2.161          cap_header = read_pci_config(bus, dev, func, cap_ptr);
   2.162 -        cap_id = get_field_from_reg_u32(cap_header,
   2.163 -                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
   2.164 +        cap_id = get_field_from_reg_u32(
   2.165 +            cap_header, PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
   2.166  
   2.167 -        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) {
   2.168 -            cap_type = get_field_from_reg_u32(cap_header,
   2.169 -                    PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
   2.170 -            if ( cap_type == PCI_CAP_TYPE_IOMMU ) {
   2.171 +        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE )
   2.172 +        {
   2.173 +            cap_type = get_field_from_reg_u32(
   2.174 +                cap_header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
   2.175 +            if ( cap_type == PCI_CAP_TYPE_IOMMU )
   2.176                  error = iommu_detect_callback(
   2.177 -                        bus, dev, func, cap_ptr);
   2.178 -            }
   2.179 +                    bus, dev, func, cap_ptr);
   2.180          }
   2.181  
   2.182 -        cap_ptr = get_field_from_reg_u32(cap_header,
   2.183 -                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
   2.184 -        ++count;    }
   2.185 +        cap_ptr = get_field_from_reg_u32(
   2.186 +            cap_header, PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
   2.187 +        count++;
   2.188 +    }
   2.189  
   2.190      return error;
   2.191  }
   2.192  
   2.193 -static int __init scan_functions_for_iommu(int bus, int dev,
   2.194 -            iommu_detect_callback_ptr_t iommu_detect_callback)
   2.195 +static int __init scan_functions_for_iommu(
   2.196 +    int bus, int dev, iommu_detect_callback_ptr_t iommu_detect_callback)
   2.197  {
   2.198      int func, hdr_type;
   2.199      int count, error = 0;
   2.200 @@ -182,19 +183,20 @@ static int __init scan_functions_for_iom
   2.201      func = 0;
   2.202      count = 1;
   2.203      while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func,
   2.204 -            PCI_VENDOR_ID)) && !error && func < count ) {
   2.205 +                                                   PCI_VENDOR_ID)) &&
   2.206 +            !error && (func < count) )
   2.207 +    {
   2.208          hdr_type = read_pci_config_byte(bus, dev, func,
   2.209 -                PCI_HEADER_TYPE);
   2.210 +                                        PCI_HEADER_TYPE);
   2.211  
   2.212          if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) )
   2.213              count = PCI_MAX_FUNC_COUNT;
   2.214  
   2.215          if ( IS_PCI_TYPE0_HEADER(hdr_type) ||
   2.216 -            IS_PCI_TYPE1_HEADER(hdr_type) ) {
   2.217 -            error =  scan_caps_for_iommu(bus, dev, func,
   2.218 -                    iommu_detect_callback);
   2.219 -        }
   2.220 -        ++func;
   2.221 +             IS_PCI_TYPE1_HEADER(hdr_type) )
   2.222 +            error = scan_caps_for_iommu(bus, dev, func,
   2.223 +                                        iommu_detect_callback);
   2.224 +        func++;
   2.225      }
   2.226  
   2.227      return error;
   2.228 @@ -205,12 +207,10 @@ int __init scan_for_iommu(iommu_detect_c
   2.229  {
   2.230      int bus, dev, error = 0;
   2.231  
   2.232 -    for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) {
   2.233 -        for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) {
   2.234 -            error =  scan_functions_for_iommu(bus, dev,
   2.235 -                  iommu_detect_callback);
   2.236 -        }
   2.237 -    }
   2.238 +    for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus )
   2.239 +        for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev )
   2.240 +            error = scan_functions_for_iommu(bus, dev,
   2.241 +                                             iommu_detect_callback);
   2.242  
   2.243      return error;
   2.244  }
     3.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Wed Mar 19 14:13:17 2008 +0000
     3.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Wed Mar 19 16:16:24 2008 +0000
     3.3 @@ -32,26 +32,28 @@ int __init map_iommu_mmio_region(struct 
     3.4  {
     3.5      unsigned long mfn;
     3.6  
     3.7 -    if ( nr_amd_iommus > MAX_AMD_IOMMUS ) {
     3.8 +    if ( nr_amd_iommus > MAX_AMD_IOMMUS )
     3.9 +    {
    3.10          gdprintk(XENLOG_ERR,
    3.11 -            "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
    3.12 +                 "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
    3.13          return -ENOMEM;
    3.14      }
    3.15  
    3.16 -    iommu->mmio_base = (void *) fix_to_virt(FIX_IOMMU_MMIO_BASE_0 +
    3.17 -                       nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
    3.18 -    mfn = (unsigned long)iommu->mmio_base_phys >> PAGE_SHIFT;
    3.19 +    iommu->mmio_base = (void *)fix_to_virt(
    3.20 +        FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
    3.21 +    mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);
    3.22      map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
    3.23 -                    MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
    3.24 +                     MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
    3.25  
    3.26 -    memset((u8*)iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
    3.27 +    memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
    3.28  
    3.29      return 0;
    3.30  }
    3.31  
    3.32  void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
    3.33  {
    3.34 -    if ( iommu->mmio_base ) {
    3.35 +    if ( iommu->mmio_base )
    3.36 +    {
    3.37          iounmap(iommu->mmio_base);
    3.38          iommu->mmio_base = NULL;
    3.39      }
    3.40 @@ -67,16 +69,16 @@ void __init register_iommu_dev_table_in_
    3.41      addr_hi = addr_64 >> 32;
    3.42  
    3.43      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    3.44 -        IOMMU_DEV_TABLE_BASE_LOW_MASK,
    3.45 -        IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
    3.46 +                         IOMMU_DEV_TABLE_BASE_LOW_MASK,
    3.47 +                         IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
    3.48      set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
    3.49 -        entry, IOMMU_DEV_TABLE_SIZE_MASK,
    3.50 -        IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
    3.51 +                         entry, IOMMU_DEV_TABLE_SIZE_MASK,
    3.52 +                         IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
    3.53      writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
    3.54  
    3.55      set_field_in_reg_u32((u32)addr_hi, 0,
    3.56 -        IOMMU_DEV_TABLE_BASE_HIGH_MASK,
    3.57 -        IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
    3.58 +                         IOMMU_DEV_TABLE_BASE_HIGH_MASK,
    3.59 +                         IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
    3.60      writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
    3.61  }
    3.62  
    3.63 @@ -91,49 +93,49 @@ void __init register_iommu_cmd_buffer_in
    3.64      addr_hi = addr_64 >> 32;
    3.65  
    3.66      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    3.67 -        IOMMU_CMD_BUFFER_BASE_LOW_MASK,
    3.68 -        IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
    3.69 +                         IOMMU_CMD_BUFFER_BASE_LOW_MASK,
    3.70 +                         IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
    3.71      writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
    3.72  
    3.73      power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
    3.74          IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
    3.75  
    3.76      set_field_in_reg_u32((u32)addr_hi, 0,
    3.77 -        IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
    3.78 -        IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
    3.79 +                         IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
    3.80 +                         IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
    3.81      set_field_in_reg_u32(power_of2_entries, entry,
    3.82 -        IOMMU_CMD_BUFFER_LENGTH_MASK,
    3.83 -        IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
    3.84 +                         IOMMU_CMD_BUFFER_LENGTH_MASK,
    3.85 +                         IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
    3.86      writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
    3.87  }
    3.88  
    3.89  static void __init set_iommu_translation_control(struct amd_iommu *iommu,
    3.90 -            int enable)
    3.91 +                                                 int enable)
    3.92  {
    3.93      u32 entry;
    3.94  
    3.95      entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    3.96      set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
    3.97 -        IOMMU_CONTROL_ENABLED, entry,
    3.98 -        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
    3.99 -        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
   3.100 +                         IOMMU_CONTROL_ENABLED, entry,
   3.101 +                         IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
   3.102 +                         IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
   3.103      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   3.104 -        IOMMU_CONTROL_ENABLED, entry,
   3.105 -        IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
   3.106 -        IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
   3.107 +                         IOMMU_CONTROL_ENABLED, entry,
   3.108 +                         IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
   3.109 +                         IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
   3.110      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   3.111  }
   3.112  
   3.113  static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
   3.114 -            int enable)
   3.115 +                                                    int enable)
   3.116  {
   3.117      u32 entry;
   3.118  
   3.119      entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   3.120      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   3.121 -        IOMMU_CONTROL_ENABLED, entry,
   3.122 -        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
   3.123 -        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
   3.124 +                         IOMMU_CONTROL_ENABLED, entry,
   3.125 +                         IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
   3.126 +                         IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
   3.127      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   3.128  }
   3.129  
   3.130 @@ -146,34 +148,34 @@ static void __init register_iommu_exclus
   3.131      addr_hi = iommu->exclusion_limit >> 32;
   3.132  
   3.133      set_field_in_reg_u32((u32)addr_hi, 0,
   3.134 -        IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
   3.135 -        IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
   3.136 +                         IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
   3.137 +                         IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
   3.138      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
   3.139  
   3.140      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   3.141 -        IOMMU_EXCLUSION_LIMIT_LOW_MASK,
   3.142 -        IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
   3.143 +                         IOMMU_EXCLUSION_LIMIT_LOW_MASK,
   3.144 +                         IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
   3.145      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
   3.146  
   3.147      addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
   3.148      addr_hi = iommu->exclusion_base >> 32;
   3.149  
   3.150      set_field_in_reg_u32((u32)addr_hi, 0,
   3.151 -        IOMMU_EXCLUSION_BASE_HIGH_MASK,
   3.152 -        IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
   3.153 +                         IOMMU_EXCLUSION_BASE_HIGH_MASK,
   3.154 +                         IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
   3.155      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
   3.156  
   3.157      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   3.158 -        IOMMU_EXCLUSION_BASE_LOW_MASK,
   3.159 -        IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
   3.160 +                         IOMMU_EXCLUSION_BASE_LOW_MASK,
   3.161 +                         IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
   3.162  
   3.163      set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
   3.164 -        IOMMU_EXCLUSION_ALLOW_ALL_MASK,
   3.165 -        IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
   3.166 +                         IOMMU_EXCLUSION_ALLOW_ALL_MASK,
   3.167 +                         IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
   3.168  
   3.169      set_field_in_reg_u32(iommu->exclusion_enable, entry,
   3.170 -        IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
   3.171 -        IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
   3.172 +                         IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
   3.173 +                         IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
   3.174      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
   3.175  }
   3.176  
   3.177 @@ -184,5 +186,3 @@ void __init enable_iommu(struct amd_iomm
   3.178      set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
   3.179      printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
   3.180  }
   3.181 -
   3.182 -
     4.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Wed Mar 19 14:13:17 2008 +0000
     4.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Wed Mar 19 16:16:24 2008 +0000
     4.3 @@ -132,7 +132,8 @@ void flush_command_buffer(struct amd_iom
     4.4      send_iommu_command(iommu, cmd);
     4.5  
     4.6      /* wait for 'ComWaitInt' to signal comp#endifletion? */
     4.7 -    if ( amd_iommu_poll_comp_wait ) {
     4.8 +    if ( amd_iommu_poll_comp_wait )
     4.9 +    {
    4.10          loop_count = amd_iommu_poll_comp_wait;
    4.11          do {
    4.12              status = readl(iommu->mmio_base +
    4.13 @@ -152,8 +153,10 @@ void flush_command_buffer(struct amd_iom
    4.14                     IOMMU_STATUS_MMIO_OFFSET);
    4.15          }
    4.16          else
    4.17 +        {
    4.18              dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:"
    4.19                      " ComWaitInt bit did not assert!\n");
    4.20 +        }
    4.21      }
    4.22  }
    4.23  
    4.24 @@ -234,7 +237,7 @@ static void amd_iommu_set_page_directory
    4.25  }
    4.26  
    4.27  void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
    4.28 -           u8 sys_mgt, u8 dev_ex, u8 paging_mode)
    4.29 +                                   u8 sys_mgt, u8 dev_ex, u8 paging_mode)
    4.30  {
    4.31      u64 addr_hi, addr_lo;
    4.32      u32 entry;
    4.33 @@ -397,7 +400,7 @@ int amd_iommu_map_page(struct domain *d,
    4.34      spin_lock_irqsave(&hd->mapping_lock, flags);
    4.35  
    4.36      pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
    4.37 -    if ( pte == 0 )
    4.38 +    if ( pte == NULL )
    4.39      {
    4.40          dprintk(XENLOG_ERR,
    4.41                  "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
    4.42 @@ -428,7 +431,7 @@ int amd_iommu_unmap_page(struct domain *
    4.43      spin_lock_irqsave(&hd->mapping_lock, flags);
    4.44  
    4.45      pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
    4.46 -    if ( pte == 0 )
    4.47 +    if ( pte == NULL )
    4.48      {
    4.49          dprintk(XENLOG_ERR,
    4.50                  "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
    4.51 @@ -441,7 +444,7 @@ int amd_iommu_unmap_page(struct domain *
    4.52      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    4.53  
    4.54      /* send INVALIDATE_IOMMU_PAGES command */
    4.55 -    for_each_amd_iommu(iommu)
    4.56 +    for_each_amd_iommu ( iommu )
    4.57      {
    4.58          spin_lock_irqsave(&iommu->lock, flags);
    4.59          invalidate_iommu_page(iommu, io_addr, requestor_id);
    4.60 @@ -453,9 +456,9 @@ int amd_iommu_unmap_page(struct domain *
    4.61  }
    4.62  
    4.63  int amd_iommu_reserve_domain_unity_map(
    4.64 -           struct domain *domain,
    4.65 -           unsigned long phys_addr,
    4.66 -           unsigned long size, int iw, int ir)
    4.67 +    struct domain *domain,
    4.68 +    unsigned long phys_addr,
    4.69 +    unsigned long size, int iw, int ir)
    4.70  {
    4.71      unsigned long flags, npages, i;
    4.72      void *pte;
    4.73 @@ -466,17 +469,18 @@ int amd_iommu_reserve_domain_unity_map(
    4.74      spin_lock_irqsave(&hd->mapping_lock, flags);
    4.75      for ( i = 0; i < npages; ++i )
    4.76      {
    4.77 -        pte = get_pte_from_page_tables(hd->root_table,
    4.78 -           hd->paging_mode, phys_addr>>PAGE_SHIFT);
    4.79 -        if ( pte == 0 )
    4.80 +        pte = get_pte_from_page_tables(
    4.81 +            hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
    4.82 +        if ( pte == NULL )
    4.83          {
    4.84              dprintk(XENLOG_ERR,
    4.85 -                    "AMD IOMMU: Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
    4.86 +                    "AMD IOMMU: Invalid IO pagetable entry "
    4.87 +                    "phys_addr = %lx\n", phys_addr);
    4.88              spin_unlock_irqrestore(&hd->mapping_lock, flags);
    4.89              return -EFAULT;
    4.90          }
    4.91          set_page_table_entry_present((u32 *)pte,
    4.92 -           phys_addr, iw, ir);
    4.93 +                                     phys_addr, iw, ir);
    4.94          phys_addr += PAGE_SIZE;
    4.95      }
    4.96      spin_unlock_irqrestore(&hd->mapping_lock, flags);
     5.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Mar 19 14:13:17 2008 +0000
     5.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Mar 19 16:16:24 2008 +0000
     5.3 @@ -168,7 +168,7 @@ int iommu_detect_callback(u8 bus, u8 dev
     5.4      list_add_tail(&iommu->list, &amd_iommu_head);
     5.5  
     5.6      /* allocate resources for this IOMMU */
     5.7 -    if (allocate_iommu_resources(iommu) != 0)
     5.8 +    if ( allocate_iommu_resources(iommu) != 0 )
     5.9          goto error_out;
    5.10  
    5.11      return 0;
    5.12 @@ -208,7 +208,7 @@ static int __init amd_iommu_init(void)
    5.13      }
    5.14  
    5.15      /* assign default values for device entries */
    5.16 -    for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
    5.17 +    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
    5.18      {
    5.19          ivrs_mappings[bdf].dte_requestor_id = bdf;
    5.20          ivrs_mappings[bdf].dte_sys_mgt_enable =
    5.21 @@ -288,7 +288,8 @@ void amd_iommu_setup_domain_device(
    5.22          sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
    5.23          dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
    5.24          amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
    5.25 -            req_id, sys_mgt, dev_ex, hd->paging_mode);
    5.26 +                                      req_id, sys_mgt, dev_ex,
    5.27 +                                      hd->paging_mode);
    5.28  
    5.29          invalidate_dev_table_entry(iommu, req_id);
    5.30          flush_command_buffer(iommu);
    5.31 @@ -317,8 +318,8 @@ void __init amd_iommu_setup_dom0_devices
    5.32              {
    5.33                  l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
    5.34                  /* some broken boards return 0 or ~0 if a slot is empty: */
    5.35 -                if ( l == 0xffffffff || l == 0x00000000 ||
    5.36 -                     l == 0x0000ffff || l == 0xffff0000 )
    5.37 +                if ( (l == 0xffffffff) || (l == 0x00000000) ||
    5.38 +                     (l == 0x0000ffff) || (l == 0xffff0000) )
    5.39                      continue;
    5.40  
    5.41                  pdev = xmalloc(struct pci_dev);
    5.42 @@ -368,22 +369,22 @@ int amd_iommu_detect(void)
    5.43          /* allocate 'ivrs mappings' table */
    5.44          /* note: the table has entries to accomodate all IOMMUs */
    5.45          last_bus = 0;
    5.46 -        for_each_amd_iommu (iommu)
    5.47 -           if (iommu->last_downstream_bus > last_bus)
    5.48 -               last_bus = iommu->last_downstream_bus;
    5.49 +        for_each_amd_iommu ( iommu )
    5.50 +            if ( iommu->last_downstream_bus > last_bus )
    5.51 +                last_bus = iommu->last_downstream_bus;
    5.52  
    5.53          ivrs_bdf_entries = (last_bus + 1) *
    5.54 -                IOMMU_DEV_TABLE_ENTRIES_PER_BUS;
    5.55 +            IOMMU_DEV_TABLE_ENTRIES_PER_BUS;
    5.56          ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
    5.57  
    5.58          if ( !ivrs_mappings )
    5.59          {
    5.60              dprintk(XENLOG_ERR, "AMD IOMMU:"
    5.61 -                        " Error allocating IVRS DevMappings table\n");
    5.62 +                    " Error allocating IVRS DevMappings table\n");
    5.63              goto error_out;
    5.64          }
    5.65          memset(ivrs_mappings, 0,
    5.66 -            ivrs_bdf_entries * sizeof(struct ivrs_mappings));
    5.67 +               ivrs_bdf_entries * sizeof(struct ivrs_mappings));
    5.68      }
    5.69  
    5.70      if ( amd_iommu_init() != 0 )
    5.71 @@ -424,6 +425,7 @@ static int allocate_domain_resources(str
    5.72      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    5.73  
    5.74      return 0;
    5.75 +
    5.76   error_out:
    5.77      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    5.78      return -ENOMEM;
    5.79 @@ -433,7 +435,7 @@ static int get_paging_mode(unsigned long
    5.80  {
    5.81      int level = 1;
    5.82  
    5.83 -    BUG_ON ( !max_page );
    5.84 +    BUG_ON(!max_page);
    5.85  
    5.86      if ( entries > max_page )
    5.87          entries = max_page;
    5.88 @@ -441,8 +443,7 @@ static int get_paging_mode(unsigned long
    5.89      while ( entries > PTE_PER_TABLE_SIZE )
    5.90      {
    5.91          entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
    5.92 -        ++level;
    5.93 -        if ( level > 6 )
    5.94 +        if ( ++level > 6 )
    5.95              return -ENOMEM;
    5.96      }
    5.97  
    5.98 @@ -509,7 +510,7 @@ static int reassign_device( struct domai
    5.99      int bdf;
   5.100      unsigned long flags;
   5.101  
   5.102 -    for_each_pdev( source, pdev )
   5.103 +    for_each_pdev ( source, pdev )
   5.104      {
   5.105          if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
   5.106              continue;
   5.107 @@ -522,23 +523,7 @@ static int reassign_device( struct domai
   5.108          iommu = (bdf < ivrs_bdf_entries) ?
   5.109              find_iommu_for_device(bus, pdev->devfn) : NULL;
   5.110  
   5.111 -        if ( iommu )
   5.112 -        {
   5.113 -            amd_iommu_disable_domain_device(source, iommu, bdf);
   5.114 -            /* Move pci device from the source domain to target domain. */
   5.115 -            spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
   5.116 -            spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
   5.117 -            list_move(&pdev->list, &target_hd->pdev_list);
   5.118 -            spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
   5.119 -            spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
   5.120 -
   5.121 -            amd_iommu_setup_domain_device(target, iommu, bdf);
   5.122 -            gdprintk(XENLOG_INFO ,
   5.123 -                     "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
   5.124 -                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   5.125 -                     source->domain_id, target->domain_id);
   5.126 -        }
   5.127 -        else
   5.128 +        if ( !iommu )
   5.129          {
   5.130              gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
   5.131                       " %x:%x.%x cannot be assigned to domain %d\n", 
   5.132 @@ -546,6 +531,20 @@ static int reassign_device( struct domai
   5.133              return -ENODEV;
   5.134          }
   5.135  
   5.136 +        amd_iommu_disable_domain_device(source, iommu, bdf);
   5.137 +        /* Move pci device from the source domain to target domain. */
   5.138 +        spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
   5.139 +        spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
   5.140 +        list_move(&pdev->list, &target_hd->pdev_list);
   5.141 +        spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
   5.142 +        spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
   5.143 +
   5.144 +        amd_iommu_setup_domain_device(target, iommu, bdf);
   5.145 +        gdprintk(XENLOG_INFO ,
   5.146 +                 "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
   5.147 +                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   5.148 +                 source->domain_id, target->domain_id);
   5.149 +
   5.150          break;
   5.151      }
   5.152      return 0;
   5.153 @@ -557,9 +556,10 @@ int amd_iommu_assign_device(struct domai
   5.154      int req_id;
   5.155      req_id = ivrs_mappings[bdf].dte_requestor_id;
   5.156  
   5.157 -    if (ivrs_mappings[req_id].unity_map_enable)
   5.158 +    if ( ivrs_mappings[req_id].unity_map_enable )
   5.159      {
   5.160 -        amd_iommu_reserve_domain_unity_map(d,
   5.161 +        amd_iommu_reserve_domain_unity_map(
   5.162 +            d,
   5.163              ivrs_mappings[req_id].addr_range_start,
   5.164              ivrs_mappings[req_id].addr_range_length,
   5.165              ivrs_mappings[req_id].write_permission,
   5.166 @@ -606,7 +606,7 @@ static void deallocate_next_page_table(v
   5.167              {
   5.168                  deallocate_next_page_table(next_table,
   5.169                                             next_index, next_level);
   5.170 -                ++next_index;
   5.171 +                next_index++;
   5.172              } while (next_index < PTE_PER_TABLE_SIZE);
   5.173          }
   5.174  
   5.175 @@ -622,11 +622,12 @@ static void deallocate_iommu_page_tables
   5.176      if ( hd ->root_table )
   5.177      {
   5.178          index = 0;
   5.179 +
   5.180          do
   5.181          {
   5.182              deallocate_next_page_table(hd->root_table,
   5.183                                         index, hd->paging_mode);
   5.184 -            ++index;
   5.185 +            index++;
   5.186          } while ( index < PTE_PER_TABLE_SIZE );
   5.187  
   5.188          free_xenheap_page(hd ->root_table);
   5.189 @@ -644,7 +645,8 @@ void amd_iommu_domain_destroy(struct dom
   5.190      release_domain_devices(d);
   5.191  }
   5.192  
   5.193 -void amd_iommu_return_device(struct domain *s, struct domain *t, u8 bus, u8 devfn)
   5.194 +void amd_iommu_return_device(
   5.195 +    struct domain *s, struct domain *t, u8 bus, u8 devfn)
   5.196  {
   5.197      pdev_flr(bus, devfn);
   5.198      reassign_device(s, t, bus, devfn);
     6.1 --- a/xen/drivers/passthrough/iommu.c	Wed Mar 19 14:13:17 2008 +0000
     6.2 +++ b/xen/drivers/passthrough/iommu.c	Wed Mar 19 16:16:24 2008 +0000
     6.3 @@ -50,7 +50,7 @@ int assign_device(struct domain *d, u8 b
     6.4  {
     6.5      struct hvm_iommu *hd = domain_hvm_iommu(d);
     6.6  
     6.7 -    if ( !iommu_enabled || !hd->platform_ops)
     6.8 +    if ( !iommu_enabled || !hd->platform_ops )
     6.9          return 0;
    6.10  
    6.11      return hd->platform_ops->assign_device(d, bus, devfn);
    6.12 @@ -65,7 +65,7 @@ void iommu_domain_destroy(struct domain 
    6.13      struct g2m_ioport *ioport;
    6.14      struct dev_intx_gsi_link *digl;
    6.15  
    6.16 -    if ( !iommu_enabled || !hd->platform_ops)
    6.17 +    if ( !iommu_enabled || !hd->platform_ops )
    6.18          return;
    6.19  
    6.20      if ( hvm_irq_dpci != NULL )
    6.21 @@ -109,7 +109,7 @@ int iommu_map_page(struct domain *d, uns
    6.22  {
    6.23      struct hvm_iommu *hd = domain_hvm_iommu(d);
    6.24  
    6.25 -    if ( !iommu_enabled || !hd->platform_ops)
    6.26 +    if ( !iommu_enabled || !hd->platform_ops )
    6.27          return 0;
    6.28  
    6.29      return hd->platform_ops->map_page(d, gfn, mfn);
    6.30 @@ -119,7 +119,7 @@ int iommu_unmap_page(struct domain *d, u
    6.31  {
    6.32      struct hvm_iommu *hd = domain_hvm_iommu(d);
    6.33  
    6.34 -    if ( !iommu_enabled || !hd->platform_ops)
    6.35 +    if ( !iommu_enabled || !hd->platform_ops )
    6.36          return 0;
    6.37  
    6.38      return hd->platform_ops->unmap_page(d, gfn);
    6.39 @@ -129,7 +129,7 @@ void deassign_device(struct domain *d, u
    6.40  {
    6.41      struct hvm_iommu *hd = domain_hvm_iommu(d);
    6.42  
    6.43 -    if ( !iommu_enabled || !hd->platform_ops)
    6.44 +    if ( !iommu_enabled || !hd->platform_ops )
    6.45          return;
    6.46  
    6.47      return hd->platform_ops->reassign_device(d, dom0, bus, devfn);