ia64/xen-unstable

changeset 17153:0e22182446fa

Add ACPI tables support for AMD IOMMU

Configuration information for AMD IOMMU control fields are descirbed
by I/O virtualization Reporting Structure (IVRS) table, this patch set
parses IVRS table and updates iommu control flags according to the result.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 13:21:49 2008 +0000 (2008-02-28)
parents 36529ef3ef23
children 15c3c136206d
files xen/drivers/acpi/tables.c xen/drivers/passthrough/amd/Makefile xen/drivers/passthrough/amd/iommu_acpi.c xen/drivers/passthrough/amd/iommu_detect.c xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/include/asm-x86/amd-iommu.h xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h xen/include/asm-x86/hvm/svm/amd-iommu-defs.h xen/include/asm-x86/hvm/svm/amd-iommu-proto.h xen/include/xen/acpi.h
line diff
     1.1 --- a/xen/drivers/acpi/tables.c	Thu Feb 28 13:19:38 2008 +0000
     1.2 +++ b/xen/drivers/acpi/tables.c	Thu Feb 28 13:21:49 2008 +0000
     1.3 @@ -60,6 +60,7 @@ static char *acpi_table_signatures[ACPI_
     1.4  	[ACPI_HPET] = "HPET",
     1.5  	[ACPI_MCFG] = "MCFG",
     1.6  	[ACPI_DMAR] = "DMAR",
     1.7 +	[ACPI_IVRS] = "IVRS",
     1.8  };
     1.9  
    1.10  static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
     2.1 --- a/xen/drivers/passthrough/amd/Makefile	Thu Feb 28 13:19:38 2008 +0000
     2.2 +++ b/xen/drivers/passthrough/amd/Makefile	Thu Feb 28 13:21:49 2008 +0000
     2.3 @@ -2,3 +2,4 @@ obj-y += iommu_detect.o
     2.4  obj-y += iommu_init.o
     2.5  obj-y += iommu_map.o
     2.6  obj-y += pci_amd_iommu.o
     2.7 +obj-y += iommu_acpi.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c	Thu Feb 28 13:21:49 2008 +0000
     3.3 @@ -0,0 +1,874 @@
     3.4 +/*
     3.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     3.6 + * Author: Leo Duran <leo.duran@amd.com>
     3.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     3.8 + *
     3.9 + * This program is free software; you can redistribute it and/or modify
    3.10 + * it under the terms of the GNU General Public License as published by
    3.11 + * the Free Software Foundation; either version 2 of the License, or
    3.12 + * (at your option) any later version.
    3.13 + *
    3.14 + * This program is distributed in the hope that it will be useful,
    3.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    3.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    3.17 + * GNU General Public License for more details.
    3.18 + *
    3.19 + * You should have received a copy of the GNU General Public License
    3.20 + * along with this program; if not, write to the Free Software
    3.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    3.22 + */
    3.23 +
    3.24 +#include <xen/config.h>
    3.25 +#include <xen/errno.h>
    3.26 +#include <asm/amd-iommu.h>
    3.27 +#include <asm/hvm/svm/amd-iommu-proto.h>
    3.28 +#include <asm/hvm/svm/amd-iommu-acpi.h>
    3.29 +
    3.30 +extern unsigned long amd_iommu_page_entries;
    3.31 +extern unsigned short ivrs_bdf_entries;
    3.32 +extern struct ivrs_mappings *ivrs_mappings;
    3.33 +
    3.34 +static struct amd_iommu * __init find_iommu_from_bdf_cap(
    3.35 +           u16 bdf, u8 cap_offset)
    3.36 +{
    3.37 +    struct amd_iommu *iommu;
    3.38 +
    3.39 +    for_each_amd_iommu( iommu )
    3.40 +        if ( iommu->bdf == bdf && iommu->cap_offset == cap_offset )
    3.41 +            return iommu;
    3.42 +
    3.43 +    return NULL;
    3.44 +}
    3.45 +
    3.46 +static void __init reserve_iommu_exclusion_range(struct amd_iommu *iommu,
    3.47 +           unsigned long base, unsigned long limit)
    3.48 +{
    3.49 +    /* need to extend exclusion range? */
    3.50 +    if ( iommu->exclusion_enable )
    3.51 +    {
    3.52 +        if ( iommu->exclusion_base < base )
    3.53 +            base = iommu->exclusion_base;
    3.54 +        if ( iommu->exclusion_limit > limit )
    3.55 +            limit = iommu->exclusion_limit;
    3.56 +    }
    3.57 +
    3.58 +    iommu->exclusion_enable = IOMMU_CONTROL_ENABLED;
    3.59 +    iommu->exclusion_base = base;
    3.60 +    iommu->exclusion_limit = limit;
    3.61 +}
    3.62 +
    3.63 +static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu,
    3.64 +           unsigned long base, unsigned long limit)
    3.65 +{
    3.66 +    reserve_iommu_exclusion_range(iommu, base, limit);
    3.67 +    iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
    3.68 +}
    3.69 +
    3.70 +static void __init reserve_unity_map_for_device(u16 bdf, unsigned long base,
    3.71 +           unsigned long length, u8 iw, u8 ir)
    3.72 +{
    3.73 +    unsigned long old_top, new_top;
    3.74 +
    3.75 +    /* need to extend unity-mapped range? */
    3.76 +    if ( ivrs_mappings[bdf].unity_map_enable )
    3.77 +    {
    3.78 +        old_top = ivrs_mappings[bdf].addr_range_start +
    3.79 +            ivrs_mappings[bdf].addr_range_length;
    3.80 +        new_top = base + length;
    3.81 +        if ( old_top > new_top )
    3.82 +            new_top = old_top;
    3.83 +        if ( ivrs_mappings[bdf].addr_range_start < base )
    3.84 +            base = ivrs_mappings[bdf].addr_range_start;
    3.85 +        length = new_top - base;
    3.86 +   }
    3.87 +
    3.88 +    /* extend r/w permissioms and keep aggregate */
    3.89 +    if ( iw )
    3.90 +        ivrs_mappings[bdf].write_permission = IOMMU_CONTROL_ENABLED;
    3.91 +    if ( ir )
    3.92 +        ivrs_mappings[bdf].read_permission = IOMMU_CONTROL_ENABLED;
    3.93 +    ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED;
    3.94 +    ivrs_mappings[bdf].addr_range_start = base;
    3.95 +    ivrs_mappings[bdf].addr_range_length = length;
    3.96 +}
    3.97 +
    3.98 +static int __init register_exclusion_range_for_all_devices(
    3.99 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.100 +{
   3.101 +    unsigned long range_top, iommu_top, length;
   3.102 +    struct amd_iommu *iommu;
   3.103 +    u16 bdf;
   3.104 +
   3.105 +    /* is part of exclusion range inside of IOMMU virtual address space? */
   3.106 +    /* note: 'limit' parameter is assumed to be page-aligned */
   3.107 +    range_top = limit + PAGE_SIZE;
   3.108 +    iommu_top = max_page * PAGE_SIZE;
   3.109 +    if ( base < iommu_top )
   3.110 +    {
   3.111 +        if (range_top > iommu_top)
   3.112 +            range_top = iommu_top;
   3.113 +        length = range_top - base;
   3.114 +        /* reserve r/w unity-mapped page entries for devices */
   3.115 +        /* note: these entries are part of the exclusion range */
   3.116 +        for (bdf = 0; bdf < ivrs_bdf_entries; ++bdf)
   3.117 +            reserve_unity_map_for_device(bdf, base, length, iw, ir);
   3.118 +        /* push 'base' just outside of virtual address space */
   3.119 +        base = iommu_top;
   3.120 +    }
   3.121 +    /* register IOMMU exclusion range settings */
   3.122 +    if (limit >= iommu_top)
   3.123 +    {
   3.124 +        for_each_amd_iommu( iommu )
   3.125 +            reserve_iommu_exclusion_range_all(iommu, base, limit);
   3.126 +    }
   3.127 +
   3.128 +    return 0;
   3.129 +}
   3.130 +
   3.131 +static int __init register_exclusion_range_for_device(u16 bdf,
   3.132 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.133 +{
   3.134 +    unsigned long range_top, iommu_top, length;
   3.135 +    struct amd_iommu *iommu;
   3.136 +    u16 bus, devfn, req;
   3.137 +
   3.138 +    bus = bdf >> 8;
   3.139 +    devfn = bdf & 0xFF;
   3.140 +    iommu = find_iommu_for_device(bus, devfn);
   3.141 +    if ( !iommu )
   3.142 +    {
   3.143 +        dprintk(XENLOG_ERR, "IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
   3.144 +        return -ENODEV;
   3.145 +    }
   3.146 +    req = ivrs_mappings[bdf].dte_requestor_id;
   3.147 +
   3.148 +    /* note: 'limit' parameter is assumed to be page-aligned */
   3.149 +    range_top = limit + PAGE_SIZE;
   3.150 +    iommu_top = max_page * PAGE_SIZE;
   3.151 +    if ( base < iommu_top )
   3.152 +    {
   3.153 +        if (range_top > iommu_top)
   3.154 +            range_top = iommu_top;
   3.155 +        length = range_top - base;
   3.156 +        /* reserve unity-mapped page entries for device */
   3.157 +        /* note: these entries are part of the exclusion range */
   3.158 +        reserve_unity_map_for_device(bdf, base, length, iw, ir);
   3.159 +        reserve_unity_map_for_device(req, base, length, iw, ir);
   3.160 +
   3.161 +        /* push 'base' just outside of virtual address space */
   3.162 +        base = iommu_top;
   3.163 +    }
   3.164 +
   3.165 +   /* register IOMMU exclusion range settings for device */
   3.166 +   if ( limit >= iommu_top  )
   3.167 +    {
   3.168 +        reserve_iommu_exclusion_range(iommu, base, limit);
   3.169 +        ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
   3.170 +        ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
   3.171 +    }
   3.172 +
   3.173 +    return 0;
   3.174 +}
   3.175 +
   3.176 +static int __init register_exclusion_range_for_iommu_devices(
   3.177 +           struct amd_iommu *iommu,
   3.178 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.179 +{
   3.180 +    unsigned long range_top, iommu_top, length;
   3.181 +    u16 bus, devfn, bdf, req;
   3.182 +
   3.183 +    /* is part of exclusion range inside of IOMMU virtual address space? */
   3.184 +    /* note: 'limit' parameter is assumed to be page-aligned */
   3.185 +    range_top = limit + PAGE_SIZE;
   3.186 +    iommu_top = max_page * PAGE_SIZE;
   3.187 +    if ( base < iommu_top )
   3.188 +    {
   3.189 +        if (range_top > iommu_top)
   3.190 +            range_top = iommu_top;
   3.191 +        length = range_top - base;
   3.192 +        /* reserve r/w unity-mapped page entries for devices */
   3.193 +        /* note: these entries are part of the exclusion range */
   3.194 +        for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
   3.195 +        {
   3.196 +            bus = bdf >> 8;
   3.197 +            devfn = bdf & 0xFF;
   3.198 +            if ( iommu == find_iommu_for_device(bus, devfn) )
   3.199 +            {
   3.200 +                reserve_unity_map_for_device(bdf, base, length, iw, ir);
   3.201 +                req = ivrs_mappings[bdf].dte_requestor_id;
   3.202 +                reserve_unity_map_for_device(req, base, length, iw, ir);
   3.203 +            }
   3.204 +        }
   3.205 +
   3.206 +        /* push 'base' just outside of virtual address space */
   3.207 +        base = iommu_top;
   3.208 +    }
   3.209 +
   3.210 +    /* register IOMMU exclusion range settings */
   3.211 +    if (limit >= iommu_top)
   3.212 +        reserve_iommu_exclusion_range_all(iommu, base, limit);
   3.213 +    return 0;
   3.214 +}
   3.215 +
   3.216 +static int __init parse_ivmd_device_select(
   3.217 +           struct acpi_ivmd_block_header *ivmd_block,
   3.218 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.219 +{
   3.220 +    u16 bdf;
   3.221 +
   3.222 +    bdf = ivmd_block->header.dev_id;
   3.223 +    if (bdf >= ivrs_bdf_entries)
   3.224 +    {
   3.225 +        dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
   3.226 +        return -ENODEV;
   3.227 +    }
   3.228 +
   3.229 +    return register_exclusion_range_for_device(bdf, base, limit, iw, ir);
   3.230 +}
   3.231 +
   3.232 +static int __init parse_ivmd_device_range(
   3.233 +           struct acpi_ivmd_block_header *ivmd_block,
   3.234 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.235 +{
   3.236 +    u16 first_bdf, last_bdf, bdf;
   3.237 +    int error;
   3.238 +
   3.239 +    first_bdf = ivmd_block->header.dev_id;
   3.240 +    if (first_bdf >= ivrs_bdf_entries)
   3.241 +    {
   3.242 +       dprintk(XENLOG_ERR, "IVMD Error: "
   3.243 +                    "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
   3.244 +       return -ENODEV;
   3.245 +    }
   3.246 +
   3.247 +    last_bdf = ivmd_block->last_dev_id;
   3.248 +    if (last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf)
   3.249 +    {
   3.250 +        dprintk(XENLOG_ERR, "IVMD Error: "
   3.251 +                    "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
   3.252 +        return -ENODEV;
   3.253 +    }
   3.254 +
   3.255 +      dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
   3.256 +                    first_bdf, last_bdf);
   3.257 +
   3.258 +    for ( bdf = first_bdf, error = 0;
   3.259 +       bdf <= last_bdf && !error; ++bdf )
   3.260 +    {
   3.261 +       error = register_exclusion_range_for_device(
   3.262 +                     bdf, base, limit, iw, ir);
   3.263 +    }
   3.264 +
   3.265 +   return error;
   3.266 +}
   3.267 +
   3.268 +static int __init parse_ivmd_device_iommu(
   3.269 +           struct acpi_ivmd_block_header *ivmd_block,
   3.270 +           unsigned long base, unsigned long limit, u8 iw, u8 ir)
   3.271 +{
   3.272 +    struct amd_iommu *iommu;
   3.273 +
   3.274 +    /* find target IOMMU */
   3.275 +    iommu = find_iommu_from_bdf_cap(ivmd_block->header.dev_id,
   3.276 +                                    ivmd_block->cap_offset);
   3.277 +    if ( !iommu )
   3.278 +    {
   3.279 +       dprintk(XENLOG_ERR,
   3.280 +           "IVMD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
   3.281 +            ivmd_block->header.dev_id, ivmd_block->cap_offset);
   3.282 +       return -ENODEV;
   3.283 +    }
   3.284 +
   3.285 +    return register_exclusion_range_for_iommu_devices(
   3.286 +                 iommu, base, limit, iw, ir);
   3.287 +}
   3.288 +
   3.289 +static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
   3.290 +{
   3.291 +    unsigned long start_addr, mem_length, base, limit;
   3.292 +    u8 iw, ir;
   3.293 +
   3.294 +    if (ivmd_block->header.length <
   3.295 +       sizeof(struct acpi_ivmd_block_header))
   3.296 +    {
   3.297 +       dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
   3.298 +       return -ENODEV;
   3.299 +    }
   3.300 +
   3.301 +    start_addr = (unsigned long)ivmd_block->start_addr;
   3.302 +    mem_length = (unsigned long)ivmd_block->mem_length;
   3.303 +    base = start_addr & PAGE_MASK;
   3.304 +    limit = (start_addr + mem_length - 1) & PAGE_MASK;
   3.305 +
   3.306 +    dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n",
   3.307 +                  ivmd_block->header.type);
   3.308 +    dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr);
   3.309 +    dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length);
   3.310 +
   3.311 +    if ( get_field_from_byte(ivmd_block->header.flags,
   3.312 +                             AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
   3.313 +                             AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT) )
   3.314 +        iw = ir = IOMMU_CONTROL_ENABLED;
   3.315 +    else if ( get_field_from_byte(ivmd_block->header.flags,
   3.316 +                                  AMD_IOMMU_ACPI_UNITY_MAPPING_MASK,
   3.317 +                                  AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT) )
   3.318 +    {
   3.319 +        iw = get_field_from_byte(ivmd_block->header.flags,
   3.320 +                                 AMD_IOMMU_ACPI_IW_PERMISSION_MASK,
   3.321 +                                 AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT);
   3.322 +        ir = get_field_from_byte(ivmd_block->header.flags,
   3.323 +                                 AMD_IOMMU_ACPI_IR_PERMISSION_MASK,
   3.324 +                                 AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT);
   3.325 +    }
   3.326 +    else
   3.327 +    {
   3.328 +       dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
   3.329 +       return -ENODEV;
   3.330 +    }
   3.331 +
   3.332 +    switch( ivmd_block->header.type )
   3.333 +    {
   3.334 +    case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
   3.335 +        return register_exclusion_range_for_all_devices(
   3.336 +           base, limit, iw, ir);
   3.337 +
   3.338 +    case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
   3.339 +        return parse_ivmd_device_select(ivmd_block,
   3.340 +           base, limit, iw, ir);
   3.341 +
   3.342 +    case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
   3.343 +        return parse_ivmd_device_range(ivmd_block,
   3.344 +            base, limit, iw, ir);
   3.345 +
   3.346 +    case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
   3.347 +        return parse_ivmd_device_iommu(ivmd_block,
   3.348 +           base, limit, iw, ir);
   3.349 +
   3.350 +    default:
   3.351 +        dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n");
   3.352 +        return -ENODEV;
   3.353 +    }
   3.354 +}
   3.355 +
   3.356 +static u16 __init parse_ivhd_device_padding(u16 pad_length,
   3.357 +           u16 header_length, u16 block_length)
   3.358 +{
   3.359 +    if ( header_length < (block_length + pad_length) )
   3.360 +    {
   3.361 +        dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
   3.362 +        return 0;
   3.363 +    }
   3.364 +
   3.365 +    return pad_length;
   3.366 +}
   3.367 +
   3.368 +static u16 __init parse_ivhd_device_select(
   3.369 +           union acpi_ivhd_device *ivhd_device)
   3.370 +{
   3.371 +    u16 bdf;
   3.372 +
   3.373 +    bdf = ivhd_device->header.dev_id;
   3.374 +    if ( bdf >= ivrs_bdf_entries )
   3.375 +    {
   3.376 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.377 +                "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
   3.378 +        return 0;
   3.379 +    }
   3.380 +
   3.381 +    /* override flags for device */
   3.382 +    ivrs_mappings[bdf].dte_sys_mgt_enable =
   3.383 +        get_field_from_byte(ivhd_device->header.flags,
   3.384 +                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.385 +                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.386 +
   3.387 +    return sizeof(struct acpi_ivhd_device_header);
   3.388 +}
   3.389 +
   3.390 +static u16 __init parse_ivhd_device_range(
   3.391 +           union acpi_ivhd_device *ivhd_device,
   3.392 +           u16 header_length, u16 block_length)
   3.393 +{
   3.394 +    u16 dev_length, first_bdf, last_bdf, bdf;
   3.395 +    u8 sys_mgt;
   3.396 +
   3.397 +    dev_length = sizeof(struct acpi_ivhd_device_range);
   3.398 +    if ( header_length < (block_length + dev_length) )
   3.399 +    {
   3.400 +        dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
   3.401 +        return 0;
   3.402 +    }
   3.403 +
   3.404 +    if ( ivhd_device->range.trailer.type !=
   3.405 +        AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END) {
   3.406 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.407 +                "Invalid Range: End_Type 0x%x\n",
   3.408 +                ivhd_device->range.trailer.type);
   3.409 +        return 0;
   3.410 +    }
   3.411 +
   3.412 +    first_bdf = ivhd_device->header.dev_id;
   3.413 +    if ( first_bdf >= ivrs_bdf_entries )
   3.414 +    {
   3.415 +       dprintk(XENLOG_ERR, "IVHD Error: "
   3.416 +           "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   3.417 +       return 0;
   3.418 +    }
   3.419 +
   3.420 +    last_bdf = ivhd_device->range.trailer.dev_id;
   3.421 +    if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
   3.422 +    {
   3.423 +       dprintk(XENLOG_ERR, "IVHD Error: "
   3.424 +           "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   3.425 +       return 0;
   3.426 +    }
   3.427 +
   3.428 +    dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
   3.429 +        first_bdf, last_bdf);
   3.430 +
   3.431 +    /* override flags for range of devices */
   3.432 +    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   3.433 +                                 AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.434 +                                 AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.435 +    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   3.436 +        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   3.437 +
   3.438 +    return dev_length;
   3.439 +}
   3.440 +
   3.441 +static u16 __init parse_ivhd_device_alias(
   3.442 +           union acpi_ivhd_device *ivhd_device,
   3.443 +           u16 header_length, u16 block_length)
   3.444 +{
   3.445 +    u16 dev_length, alias_id, bdf;
   3.446 +
   3.447 +    dev_length = sizeof(struct acpi_ivhd_device_alias);
   3.448 +    if ( header_length < (block_length + dev_length) )
   3.449 +    {
   3.450 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.451 +            "Invalid Device_Entry Length!\n");
   3.452 +        return 0;
   3.453 +    }
   3.454 +
   3.455 +    bdf = ivhd_device->header.dev_id;
   3.456 +    if ( bdf >= ivrs_bdf_entries )
   3.457 +    {
   3.458 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.459 +                "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
   3.460 +        return 0;
   3.461 +    }
   3.462 +
   3.463 +    alias_id = ivhd_device->alias.dev_id;
   3.464 +    if ( alias_id >= ivrs_bdf_entries )
   3.465 +    {
   3.466 +       dprintk(XENLOG_ERR, "IVHD Error: "
   3.467 +               "Invalid Alias Dev_Id 0x%x\n", alias_id);
   3.468 +       return 0;
   3.469 +    }
   3.470 +
   3.471 +    dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
   3.472 +
   3.473 +    /* override requestor_id and flags for device */
   3.474 +    ivrs_mappings[bdf].dte_requestor_id = alias_id;
   3.475 +    ivrs_mappings[bdf].dte_sys_mgt_enable =
   3.476 +            get_field_from_byte(ivhd_device->header.flags,
   3.477 +                                AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.478 +                                AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.479 +    ivrs_mappings[alias_id].dte_sys_mgt_enable =
   3.480 +            ivrs_mappings[bdf].dte_sys_mgt_enable;
   3.481 +
   3.482 +    return dev_length;
   3.483 +}
   3.484 +
   3.485 +static u16 __init parse_ivhd_device_alias_range(
   3.486 +           union acpi_ivhd_device *ivhd_device,
   3.487 +           u16 header_length, u16 block_length)
   3.488 +{
   3.489 +
   3.490 +    u16 dev_length, first_bdf, last_bdf, alias_id, bdf;
   3.491 +    u8 sys_mgt;
   3.492 +
   3.493 +    dev_length = sizeof(struct acpi_ivhd_device_alias_range);
   3.494 +    if ( header_length < (block_length + dev_length) )
   3.495 +    {
   3.496 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.497 +                "Invalid Device_Entry Length!\n");
   3.498 +        return 0;
   3.499 +    }
   3.500 +
   3.501 +    if ( ivhd_device->alias_range.trailer.type !=
   3.502 +       AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   3.503 +    {
   3.504 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.505 +                "Invalid Range: End_Type 0x%x\n",
   3.506 +                ivhd_device->alias_range.trailer.type);
   3.507 +        return 0;
   3.508 +    }
   3.509 +
   3.510 +    first_bdf = ivhd_device->header.dev_id;
   3.511 +    if ( first_bdf >= ivrs_bdf_entries )
   3.512 +    {
   3.513 +        dprintk(XENLOG_ERR,"IVHD Error: "
   3.514 +                "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   3.515 +        return 0;
   3.516 +    }
   3.517 +
   3.518 +    last_bdf = ivhd_device->alias_range.trailer.dev_id;
   3.519 +    if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
   3.520 +    {
   3.521 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.522 +                "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   3.523 +        return 0;
   3.524 +    }
   3.525 +
   3.526 +    alias_id = ivhd_device->alias_range.alias.dev_id;
   3.527 +    if ( alias_id >= ivrs_bdf_entries )
   3.528 +    {
   3.529 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.530 +                "Invalid Alias Dev_Id 0x%x\n", alias_id);
   3.531 +        return 0;
   3.532 +    }
   3.533 +
   3.534 +    dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
   3.535 +            first_bdf, last_bdf);
   3.536 +    dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
   3.537 +
   3.538 +    /* override requestor_id and flags for range of devices */
   3.539 +    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   3.540 +                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.541 +                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.542 +    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   3.543 +    {
   3.544 +        ivrs_mappings[bdf].dte_requestor_id = alias_id;
   3.545 +        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   3.546 +    }
   3.547 +    ivrs_mappings[alias_id].dte_sys_mgt_enable = sys_mgt;
   3.548 +
   3.549 +    return dev_length;
   3.550 +}
   3.551 +
   3.552 +static u16 __init parse_ivhd_device_extended(
   3.553 +           union acpi_ivhd_device *ivhd_device,
   3.554 +           u16 header_length, u16 block_length)
   3.555 +{
   3.556 +    u16 dev_length, bdf;
   3.557 +
   3.558 +    dev_length = sizeof(struct acpi_ivhd_device_extended);
   3.559 +    if ( header_length < (block_length + dev_length) )
   3.560 +    {
   3.561 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.562 +                "Invalid Device_Entry Length!\n");
   3.563 +        return 0;
   3.564 +    }
   3.565 +
   3.566 +    bdf = ivhd_device->header.dev_id;
   3.567 +    if ( bdf >= ivrs_bdf_entries )
   3.568 +    {
   3.569 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.570 +                "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
   3.571 +        return 0;
   3.572 +    }
   3.573 +
   3.574 +    /* override flags for device */
   3.575 +    ivrs_mappings[bdf].dte_sys_mgt_enable =
   3.576 +        get_field_from_byte(ivhd_device->header.flags,
   3.577 +                            AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.578 +                            AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.579 +
   3.580 +    return dev_length;
   3.581 +}
   3.582 +
   3.583 +static u16 __init parse_ivhd_device_extended_range(
   3.584 +           union acpi_ivhd_device *ivhd_device,
   3.585 +           u16 header_length, u16 block_length)
   3.586 +{
   3.587 +    u16 dev_length, first_bdf, last_bdf, bdf;
   3.588 +    u8 sys_mgt;
   3.589 +
   3.590 +    dev_length = sizeof(struct acpi_ivhd_device_extended_range);
   3.591 +    if ( header_length < (block_length + dev_length) )
   3.592 +    {
   3.593 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.594 +                "Invalid Device_Entry Length!\n");
   3.595 +        return 0;
   3.596 +    }
   3.597 +
   3.598 +    if ( ivhd_device->extended_range.trailer.type !=
   3.599 +        AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
   3.600 +    {
   3.601 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.602 +                "Invalid Range: End_Type 0x%x\n",
   3.603 +                ivhd_device->extended_range.trailer.type);
   3.604 +        return 0;
   3.605 +    }
   3.606 +
   3.607 +    first_bdf = ivhd_device->header.dev_id;
   3.608 +    if ( first_bdf >= ivrs_bdf_entries )
   3.609 +    {
   3.610 +       dprintk(XENLOG_ERR, "IVHD Error: "
   3.611 +           "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
   3.612 +       return 0;
   3.613 +    }
   3.614 +
   3.615 +    last_bdf = ivhd_device->extended_range.trailer.dev_id;
   3.616 +    if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
   3.617 +    {
   3.618 +        dprintk(XENLOG_ERR, "IVHD Error: "
   3.619 +                "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
   3.620 +        return 0;
   3.621 +    }
   3.622 +
   3.623 +    dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
   3.624 +            first_bdf, last_bdf);
   3.625 +
   3.626 +    /* override flags for range of devices */
   3.627 +    sys_mgt = get_field_from_byte(ivhd_device->header.flags,
   3.628 +                                  AMD_IOMMU_ACPI_SYS_MGT_MASK,
   3.629 +                                  AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
   3.630 +    for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
   3.631 +        ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
   3.632 +
   3.633 +    return dev_length;
   3.634 +}
   3.635 +
   3.636 +static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
   3.637 +{
   3.638 +    union acpi_ivhd_device *ivhd_device;
   3.639 +    u16 block_length, dev_length;
   3.640 +    struct amd_iommu *iommu;
   3.641 +
   3.642 +    if ( ivhd_block->header.length <
   3.643 +        sizeof(struct acpi_ivhd_block_header) )
   3.644 +    {
   3.645 +        dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n");
   3.646 +        return -ENODEV;
   3.647 +    }
   3.648 +
   3.649 +    iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id,
   3.650 +            ivhd_block->cap_offset);
   3.651 +    if ( !iommu )
   3.652 +    {
   3.653 +        dprintk(XENLOG_ERR,
   3.654 +                "IVHD Error: No IOMMU for Dev_Id 0x%x  Cap 0x%x\n",
   3.655 +                ivhd_block->header.dev_id, ivhd_block->cap_offset);
   3.656 +       return -ENODEV;
   3.657 +    }
   3.658 +
   3.659 +    dprintk(XENLOG_INFO, "IVHD Block:\n");
   3.660 +    dprintk(XENLOG_INFO, " Cap_Offset 0x%x\n",
   3.661 +            ivhd_block->cap_offset);
   3.662 +    dprintk(XENLOG_INFO, " MMIO_BAR_Phys 0x%lx\n",
   3.663 +            (unsigned long)ivhd_block->mmio_base);
   3.664 +    dprintk(XENLOG_INFO, " PCI_Segment 0x%x\n",
   3.665 +            ivhd_block->pci_segment);
   3.666 +    dprintk(XENLOG_INFO, " IOMMU_Info 0x%x\n",
   3.667 +            ivhd_block->iommu_info);
   3.668 +
   3.669 +    /* override IOMMU support flags */
   3.670 +    iommu->coherent = get_field_from_byte(ivhd_block->header.flags,
   3.671 +                                          AMD_IOMMU_ACPI_COHERENT_MASK,
   3.672 +                                          AMD_IOMMU_ACPI_COHERENT_SHIFT);
   3.673 +    iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags,
   3.674 +                                          AMD_IOMMU_ACPI_IOTLB_SUP_MASK,
   3.675 +                                          AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT);
   3.676 +    iommu->isochronous = get_field_from_byte(ivhd_block->header.flags,
   3.677 +                                          AMD_IOMMU_ACPI_ISOC_MASK,
   3.678 +                                          AMD_IOMMU_ACPI_ISOC_SHIFT);
   3.679 +    iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags,
   3.680 +                                          AMD_IOMMU_ACPI_RES_PASS_PW_MASK,
   3.681 +                                          AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT);
   3.682 +    iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags,
   3.683 +                                          AMD_IOMMU_ACPI_PASS_PW_MASK,
   3.684 +                                          AMD_IOMMU_ACPI_PASS_PW_SHIFT);
   3.685 +    iommu->ht_tunnel_enable = get_field_from_byte(
   3.686 +                                          ivhd_block->header.flags,
   3.687 +                                          AMD_IOMMU_ACPI_HT_TUN_ENB_MASK,
   3.688 +                                          AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT);
   3.689 +
   3.690 +    /* parse Device Entries */
   3.691 +    block_length = sizeof(struct acpi_ivhd_block_header);
   3.692 +    while( ivhd_block->header.length >=
   3.693 +       (block_length + sizeof(struct acpi_ivhd_device_header)) )
   3.694 +    {
   3.695 +        ivhd_device = (union acpi_ivhd_device *)
   3.696 +                ((u8 *)ivhd_block + block_length);
   3.697 +
   3.698 +        dprintk(XENLOG_INFO, "IVHD Device Entry:\n");
   3.699 +        dprintk(XENLOG_INFO, " Type 0x%x\n",
   3.700 +                ivhd_device->header.type);
   3.701 +        dprintk(XENLOG_INFO, " Dev_Id 0x%x\n",
   3.702 +                ivhd_device->header.dev_id);
   3.703 +        dprintk(XENLOG_INFO, " Flags 0x%x\n",
   3.704 +                ivhd_device->header.flags);
   3.705 +
   3.706 +        switch( ivhd_device->header.type )
   3.707 +        {
   3.708 +        case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD:
   3.709 +            dev_length = parse_ivhd_device_padding(
   3.710 +                sizeof(u32),
   3.711 +                ivhd_block->header.length, block_length);
   3.712 +            break;
   3.713 +        case AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD:
   3.714 +            dev_length = parse_ivhd_device_padding(
   3.715 +                sizeof(u64),
   3.716 +                ivhd_block->header.length, block_length);
   3.717 +            break;
   3.718 +        case AMD_IOMMU_ACPI_IVHD_DEV_SELECT:
   3.719 +            dev_length = parse_ivhd_device_select(ivhd_device);
   3.720 +            break;
   3.721 +        case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START:
   3.722 +            dev_length = parse_ivhd_device_range(ivhd_device,
   3.723 +                ivhd_block->header.length, block_length);
   3.724 +            break;
   3.725 +        case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT:
   3.726 +            dev_length = parse_ivhd_device_alias(
   3.727 +                ivhd_device,
   3.728 +                ivhd_block->header.length, block_length);
   3.729 +            break;
   3.730 +        case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE:
   3.731 +            dev_length = parse_ivhd_device_alias_range(
   3.732 +                ivhd_device,
   3.733 +                ivhd_block->header.length, block_length);
   3.734 +            break;
   3.735 +        case AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT:
   3.736 +            dev_length = parse_ivhd_device_extended(
   3.737 +                ivhd_device,
   3.738 +                ivhd_block->header.length, block_length);
   3.739 +            break;
   3.740 +        case AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE:
   3.741 +            dev_length = parse_ivhd_device_extended_range(
   3.742 +                ivhd_device,
   3.743 +                ivhd_block->header.length, block_length);
   3.744 +            break;
   3.745 +        default:
   3.746 +            dprintk(XENLOG_ERR, "IVHD Error: "
   3.747 +                "Invalid Device Type!\n");
   3.748 +            dev_length = 0;
   3.749 +            break;
   3.750 +        }
   3.751 +
   3.752 +        block_length += dev_length;
   3.753 +        if ( !dev_length )
   3.754 +            return -ENODEV;
   3.755 +    }
   3.756 +
   3.757 +    return 0;
   3.758 +}
   3.759 +
   3.760 +static int __init parse_ivrs_block(struct acpi_ivrs_block_header *ivrs_block)
   3.761 +{
   3.762 +    struct acpi_ivhd_block_header *ivhd_block;
   3.763 +    struct acpi_ivmd_block_header *ivmd_block;
   3.764 +
   3.765 +    switch(ivrs_block->type)
   3.766 +    {
   3.767 +    case AMD_IOMMU_ACPI_IVHD_TYPE:
   3.768 +        ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block;
   3.769 +        return parse_ivhd_block(ivhd_block);
   3.770 +
   3.771 +    case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
   3.772 +    case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
   3.773 +    case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
   3.774 +    case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
   3.775 +        ivmd_block = (struct acpi_ivmd_block_header *)ivrs_block;
   3.776 +        return parse_ivmd_block(ivmd_block);
   3.777 +
   3.778 +    default:
   3.779 +        dprintk(XENLOG_ERR, "IVRS Error: Invalid Block Type!\n");
   3.780 +        return -ENODEV;
   3.781 +    }
   3.782 +
   3.783 +    return 0;
   3.784 +}
   3.785 +
   3.786 +void __init dump_acpi_table_header(struct acpi_table_header *table)
   3.787 +{
   3.788 +    int i;
   3.789 +
   3.790 +    printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n");
   3.791 +    printk(XENLOG_INFO " Signature ");
   3.792 +    for ( i = 0; i < ACPI_NAME_SIZE; ++i )
   3.793 +        printk("%c", table->signature[i]);
   3.794 +    printk("\n");
   3.795 +
   3.796 +    printk(" Length 0x%x\n", table->length);
   3.797 +    printk(" Revision 0x%x\n", table->revision);
   3.798 +    printk(" CheckSum 0x%x\n", table->checksum);
   3.799 +
   3.800 +    printk(" OEM_Id ");
   3.801 +    for ( i = 0; i < ACPI_OEM_ID_SIZE; ++i )
   3.802 +        printk("%c", table->oem_id[i]);
   3.803 +    printk("\n");
   3.804 +
   3.805 +    printk(" OEM_Table_Id ");
   3.806 +    for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; ++i )
   3.807 +        printk("%c", table->oem_table_id[i]);
   3.808 +    printk("\n");
   3.809 +
   3.810 +    printk(" OEM_Revision 0x%x\n", table->oem_revision);
   3.811 +
   3.812 +    printk(" Creator_Id ");
   3.813 +    for ( i = 0; i < ACPI_NAME_SIZE; ++i )
   3.814 +        printk("%c", table->asl_compiler_id[i]);
   3.815 +    printk("\n");
   3.816 +
   3.817 +    printk(" Creator_Revision 0x%x\n",
   3.818 +       table->asl_compiler_revision);
   3.819 +}
   3.820 +
   3.821 +int __init parse_ivrs_table(unsigned long phys_addr,
   3.822 +                                  unsigned long size)
   3.823 +{
   3.824 +    struct acpi_ivrs_block_header *ivrs_block;
   3.825 +    unsigned long length, i;
   3.826 +    u8 checksum, *raw_table;
   3.827 +    int error = 0;
   3.828 +    struct acpi_table_header  *table =
   3.829 +        (struct acpi_table_header *) __acpi_map_table(phys_addr, size);
   3.830 +
   3.831 +    BUG_ON(!table);
   3.832 +
   3.833 +#if 0
   3.834 +    dump_acpi_table_header(table);
   3.835 +#endif
   3.836 +
   3.837 +    /* validate checksum: sum of entire table == 0 */
   3.838 +    checksum = 0;
   3.839 +    raw_table = (u8 *)table;
   3.840 +    for ( i = 0; i < table->length; ++i )
   3.841 +        checksum += raw_table[i];
   3.842 +    if ( checksum )
   3.843 +    {
   3.844 +        dprintk(XENLOG_ERR, "IVRS Error: "
   3.845 +                "Invalid Checksum 0x%x\n", checksum);
   3.846 +        return -ENODEV;
   3.847 +    }
   3.848 +
   3.849 +    /* parse IVRS blocks */
   3.850 +    length = sizeof(struct acpi_ivrs_table_header);
   3.851 +    while( error == 0 && table->length >
   3.852 +       (length + sizeof(struct acpi_ivrs_block_header)) )
   3.853 +    {
   3.854 +        ivrs_block = (struct acpi_ivrs_block_header *)
   3.855 +                ((u8 *)table + length);
   3.856 +
   3.857 +        dprintk(XENLOG_INFO, "IVRS Block:\n");
   3.858 +        dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type);
   3.859 +        dprintk(XENLOG_INFO, " Flags 0x%x\n", ivrs_block->flags);
   3.860 +        dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length);
   3.861 +        dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id);
   3.862 +
   3.863 +        if (table->length >= (length + ivrs_block->length))
   3.864 +           error = parse_ivrs_block(ivrs_block);
   3.865 +        else
   3.866 +        {
   3.867 +           dprintk(XENLOG_ERR, "IVRS Error: "
   3.868 +               "Table Length Exceeded: 0x%x -> 0x%lx\n",
   3.869 +               table->length,
   3.870 +               (length + ivrs_block->length));
   3.871 +           return -ENODEV;
   3.872 +        }
   3.873 +        length += ivrs_block->length;
   3.874 +    }
   3.875 +
   3.876 +    return error;
   3.877 +}
     4.1 --- a/xen/drivers/passthrough/amd/iommu_detect.c	Thu Feb 28 13:19:38 2008 +0000
     4.2 +++ b/xen/drivers/passthrough/amd/iommu_detect.c	Thu Feb 28 13:21:49 2008 +0000
     4.3 @@ -86,31 +86,25 @@ int __init get_iommu_last_downstream_bus
     4.4  int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
     4.5              struct amd_iommu *iommu)
     4.6  {
     4.7 -    u32 cap_header, cap_range;
     4.8 +    u32 cap_header, cap_range, misc_info;
     4.9      u64 mmio_bar;
    4.10  
    4.11 -#if HACK_BIOS_SETTINGS
    4.12 -    /* remove it when BIOS available */
    4.13 -    write_pci_config(bus, dev, func,
    4.14 -        cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
    4.15 -    write_pci_config(bus, dev, func,
    4.16 -        cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
    4.17 -    /* remove it when BIOS available */
    4.18 -#endif
    4.19 +    mmio_bar = (u64)read_pci_config(bus, dev, func,
    4.20 +            cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
    4.21 +    mmio_bar |= read_pci_config(bus, dev, func,
    4.22 +            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); 
    4.23 +    iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF;
    4.24  
    4.25 -    mmio_bar = (u64)read_pci_config(bus, dev, func,
    4.26 -             cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
    4.27 -    mmio_bar |= read_pci_config(bus, dev, func,
    4.28 -            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) &
    4.29 -            PCI_CAP_MMIO_BAR_LOW_MASK;
    4.30 -    iommu->mmio_base_phys = (unsigned long)mmio_bar;
    4.31 -
    4.32 -    if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) {
    4.33 +    if ( (mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 )
    4.34 +    {
    4.35          dprintk(XENLOG_ERR ,
    4.36                  "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
    4.37          return -ENODEV;
    4.38      }
    4.39  
    4.40 +    iommu->bdf = (bus << 8) | PCI_DEVFN(dev, func);
    4.41 +    iommu->cap_offset = cap_ptr;
    4.42 +
    4.43      cap_header = read_pci_config(bus, dev, func, cap_ptr);
    4.44      iommu->revision = get_field_from_reg_u32(cap_header,
    4.45                    PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
    4.46 @@ -119,12 +113,15 @@ int __init get_iommu_capabilities(u8 bus
    4.47      iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
    4.48                      PCI_CAP_HT_TUNNEL_MASK,
    4.49                      PCI_CAP_HT_TUNNEL_SHIFT);
    4.50 -    iommu->not_present_cached = get_field_from_reg_u32(cap_header,
    4.51 +    iommu->pte_not_present_cached = get_field_from_reg_u32(cap_header,
    4.52                      PCI_CAP_NP_CACHE_MASK,
    4.53                      PCI_CAP_NP_CACHE_SHIFT);
    4.54  
    4.55      cap_range = read_pci_config(bus, dev, func,
    4.56              cap_ptr + PCI_CAP_RANGE_OFFSET);
    4.57 +    iommu->unit_id = get_field_from_reg_u32(cap_range,
    4.58 +                PCI_CAP_UNIT_ID_MASK,
    4.59 +                PCI_CAP_UNIT_ID_SHIFT);
    4.60      iommu->root_bus = get_field_from_reg_u32(cap_range,
    4.61                  PCI_CAP_BUS_NUMBER_MASK,
    4.62                  PCI_CAP_BUS_NUMBER_SHIFT);
    4.63 @@ -135,6 +132,11 @@ int __init get_iommu_capabilities(u8 bus
    4.64                  PCI_CAP_LAST_DEVICE_MASK,
    4.65                  PCI_CAP_LAST_DEVICE_SHIFT);
    4.66  
    4.67 +    misc_info = read_pci_config(bus, dev, func,
    4.68 +            cap_ptr + PCI_MISC_INFO_OFFSET);
    4.69 +    iommu->msi_number = get_field_from_reg_u32(misc_info,
    4.70 +                PCI_CAP_MSI_NUMBER_MASK,
    4.71 +                PCI_CAP_MSI_NUMBER_SHIFT);
    4.72      return 0;
    4.73  }
    4.74  
     5.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Thu Feb 28 13:19:38 2008 +0000
     5.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Thu Feb 28 13:21:49 2008 +0000
     5.3 @@ -137,8 +137,49 @@ static void __init set_iommu_command_buf
     5.4      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
     5.5  }
     5.6  
     5.7 +static void __init register_iommu_exclusion_range(struct amd_iommu *iommu)
     5.8 +{
     5.9 +    u64 addr_lo, addr_hi;
    5.10 +    u32 entry;
    5.11 +
    5.12 +    addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;
    5.13 +    addr_hi = iommu->exclusion_limit >> 32;
    5.14 +
    5.15 +    set_field_in_reg_u32((u32)addr_hi, 0,
    5.16 +        IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
    5.17 +        IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
    5.18 +    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
    5.19 +
    5.20 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    5.21 +        IOMMU_EXCLUSION_LIMIT_LOW_MASK,
    5.22 +        IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
    5.23 +    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
    5.24 +
    5.25 +    addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
    5.26 +    addr_hi = iommu->exclusion_base >> 32;
    5.27 +
    5.28 +    set_field_in_reg_u32((u32)addr_hi, 0,
    5.29 +        IOMMU_EXCLUSION_BASE_HIGH_MASK,
    5.30 +        IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
    5.31 +    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
    5.32 +
    5.33 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    5.34 +        IOMMU_EXCLUSION_BASE_LOW_MASK,
    5.35 +        IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
    5.36 +
    5.37 +    set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
    5.38 +        IOMMU_EXCLUSION_ALLOW_ALL_MASK,
    5.39 +        IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
    5.40 +
    5.41 +    set_field_in_reg_u32(iommu->exclusion_enable, entry,
    5.42 +        IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
    5.43 +        IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
    5.44 +    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
    5.45 +}
    5.46 +
    5.47  void __init enable_iommu(struct amd_iommu *iommu)
    5.48  {
    5.49 +    register_iommu_exclusion_range(iommu);
    5.50      set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
    5.51      set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
    5.52      printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
     6.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Feb 28 13:19:38 2008 +0000
     6.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Feb 28 13:21:49 2008 +0000
     6.3 @@ -234,16 +234,19 @@ static void amd_iommu_set_page_directory
     6.4  }
     6.5  
     6.6  void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
     6.7 -                                   u8 paging_mode)
     6.8 +           u8 sys_mgt, u8 dev_ex, u8 paging_mode)
     6.9  {
    6.10      u64 addr_hi, addr_lo;
    6.11      u32 entry;
    6.12  
    6.13 -    dte[6] = dte[5] = dte[4] = 0;
    6.14 +    dte[7] = dte[6] = dte[5] = dte[4] = 0;
    6.15  
    6.16 -    set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
    6.17 +    set_field_in_reg_u32(sys_mgt, 0,
    6.18                           IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
    6.19                           IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
    6.20 +    set_field_in_reg_u32(dev_ex, entry,
    6.21 +                         IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK,
    6.22 +                         IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry);
    6.23      dte[3] = entry;
    6.24  
    6.25      set_field_in_reg_u32(domain_id, 0,
    6.26 @@ -448,3 +451,34 @@ int amd_iommu_unmap_page(struct domain *
    6.27  
    6.28      return 0;
    6.29  }
    6.30 +
    6.31 +int amd_iommu_reserve_domain_unity_map(
    6.32 +           struct domain *domain,
    6.33 +           unsigned long phys_addr,
    6.34 +           unsigned long size, int iw, int ir)
    6.35 +{
    6.36 +    unsigned long flags, npages, i;
    6.37 +    void *pte;
    6.38 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
    6.39 +
    6.40 +    npages = region_to_pages(phys_addr, size);
    6.41 +
    6.42 +    spin_lock_irqsave(&hd->mapping_lock, flags);
    6.43 +    for ( i = 0; i < npages; ++i )
    6.44 +    {
    6.45 +        pte = get_pte_from_page_tables(hd->root_table,
    6.46 +           hd->paging_mode, phys_addr>>PAGE_SHIFT);
    6.47 +        if ( pte == 0 )
    6.48 +        {
    6.49 +            dprintk(XENLOG_ERR,
    6.50 +                    "AMD IOMMU: Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
    6.51 +            spin_unlock_irqrestore(&hd->mapping_lock, flags);
    6.52 +            return -EFAULT;
    6.53 +        }
    6.54 +        set_page_table_entry_present((u32 *)pte,
    6.55 +           phys_addr, iw, ir);
    6.56 +        phys_addr += PAGE_SIZE;
    6.57 +    }
    6.58 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    6.59 +    return 0;
    6.60 +}
     7.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Feb 28 13:19:38 2008 +0000
     7.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Feb 28 13:21:49 2008 +0000
     7.3 @@ -20,6 +20,7 @@
     7.4  
     7.5  #include <asm/amd-iommu.h>
     7.6  #include <asm/hvm/svm/amd-iommu-proto.h>
     7.7 +#include <asm/hvm/svm/amd-iommu-acpi.h>
     7.8  #include <xen/sched.h>
     7.9  #include <asm/mm.h>
    7.10  #include "../pci-direct.h"
    7.11 @@ -30,6 +31,9 @@ long amd_iommu_poll_comp_wait = COMPLETI
    7.12  static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
    7.13  int nr_amd_iommus = 0;
    7.14  
    7.15 +unsigned short ivrs_bdf_entries = 0;
    7.16 +struct ivrs_mappings *ivrs_mappings = NULL;
    7.17 +
    7.18  /* will set if amd-iommu HW is found */
    7.19  int amd_iommu_enabled = 0;
    7.20  
    7.21 @@ -82,13 +86,12 @@ static void __init detect_cleanup(void)
    7.22          deallocate_iommu_resources(iommu);
    7.23          xfree(iommu);
    7.24      }
    7.25 -}
    7.26  
    7.27 -static int requestor_id_from_bdf(int bdf)
    7.28 -{
    7.29 -    /* HACK - HACK */
    7.30 -    /* account for possible 'aliasing' by parent device */
    7.31 -    return bdf;
    7.32 +    if ( ivrs_mappings )
    7.33 +    {
    7.34 +        xfree(ivrs_mappings);
    7.35 +        ivrs_mappings = NULL;
    7.36 +    }
    7.37  }
    7.38  
    7.39  static int __init allocate_iommu_table_struct(struct table_struct *table,
    7.40 @@ -179,21 +182,52 @@ static int __init amd_iommu_init(void)
    7.41  {
    7.42      struct amd_iommu *iommu;
    7.43      unsigned long flags;
    7.44 +    u16 bdf;
    7.45  
    7.46      for_each_amd_iommu ( iommu )
    7.47      {
    7.48          spin_lock_irqsave(&iommu->lock, flags);
    7.49  
    7.50 +        /* assign default IOMMU values */
    7.51 +        iommu->coherent = IOMMU_CONTROL_ENABLED;
    7.52 +        iommu->isochronous = IOMMU_CONTROL_ENABLED;
    7.53 +        iommu->res_pass_pw = IOMMU_CONTROL_ENABLED;
    7.54 +        iommu->pass_pw = IOMMU_CONTROL_ENABLED;
    7.55 +        iommu->ht_tunnel_enable = iommu->ht_tunnel_support ?
    7.56 +            IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED;
    7.57 +        iommu->exclusion_enable = IOMMU_CONTROL_DISABLED;
    7.58 +        iommu->exclusion_allow_all = IOMMU_CONTROL_DISABLED;
    7.59 +
    7.60          /* register IOMMU data strucures in MMIO space */
    7.61          if ( map_iommu_mmio_region(iommu) != 0 )
    7.62              goto error_out;
    7.63          register_iommu_dev_table_in_mmio_space(iommu);
    7.64          register_iommu_cmd_buffer_in_mmio_space(iommu);
    7.65  
    7.66 +        spin_unlock_irqrestore(&iommu->lock, flags);
    7.67 +    }
    7.68 +
    7.69 +    /* assign default values for device entries */
    7.70 +    for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
    7.71 +    {
    7.72 +        ivrs_mappings[bdf].dte_requestor_id = bdf;
    7.73 +        ivrs_mappings[bdf].dte_sys_mgt_enable =
    7.74 +            IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
    7.75 +        ivrs_mappings[bdf].dte_allow_exclusion =
    7.76 +            IOMMU_CONTROL_DISABLED;
    7.77 +        ivrs_mappings[bdf].unity_map_enable =
    7.78 +            IOMMU_CONTROL_DISABLED;
    7.79 +    }
    7.80 +
    7.81 +    if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 )
    7.82 +        dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n");
    7.83 +
    7.84 +    for_each_amd_iommu ( iommu )
    7.85 +    {
    7.86 +        spin_lock_irqsave(&iommu->lock, flags);
    7.87          /* enable IOMMU translation services */
    7.88          enable_iommu(iommu);
    7.89          nr_amd_iommus++;
    7.90 -
    7.91          spin_unlock_irqrestore(&iommu->lock, flags);
    7.92      }
    7.93  
    7.94 @@ -229,31 +263,38 @@ struct amd_iommu *find_iommu_for_device(
    7.95  }
    7.96  
    7.97  void amd_iommu_setup_domain_device(
    7.98 -    struct domain *domain, struct amd_iommu *iommu, int requestor_id)
    7.99 +    struct domain *domain, struct amd_iommu *iommu, int bdf)
   7.100  {
   7.101      void *dte;
   7.102      u64 root_ptr;
   7.103      unsigned long flags;
   7.104 +    int req_id;
   7.105 +    u8 sys_mgt, dev_ex;
   7.106      struct hvm_iommu *hd = domain_hvm_iommu(domain);
   7.107  
   7.108 -    BUG_ON( !hd->root_table||!hd->paging_mode );
   7.109 +    BUG_ON( !hd->root_table || !hd->paging_mode );
   7.110  
   7.111      root_ptr = (u64)virt_to_maddr(hd->root_table);
   7.112 +    /* get device-table entry */
   7.113 +    req_id = ivrs_mappings[bdf].dte_requestor_id;
   7.114      dte = iommu->dev_table.buffer +
   7.115 -        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.116 +        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.117  
   7.118      if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
   7.119      {
   7.120          spin_lock_irqsave(&iommu->lock, flags); 
   7.121  
   7.122 -        amd_iommu_set_dev_table_entry(
   7.123 -            (u32 *)dte,
   7.124 -            root_ptr, hd->domain_id, hd->paging_mode);
   7.125 -        invalidate_dev_table_entry(iommu, requestor_id);
   7.126 +        /* bind DTE to domain page-tables */
   7.127 +        sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
   7.128 +        dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
   7.129 +        amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
   7.130 +            req_id, sys_mgt, dev_ex, hd->paging_mode);
   7.131 +
   7.132 +        invalidate_dev_table_entry(iommu, req_id);
   7.133          flush_command_buffer(iommu);
   7.134          dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
   7.135                  "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
   7.136 -                requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
   7.137 +                req_id, root_ptr, hd->domain_id, hd->paging_mode);
   7.138  
   7.139          spin_unlock_irqrestore(&iommu->lock, flags);
   7.140      }
   7.141 @@ -266,7 +307,7 @@ void __init amd_iommu_setup_dom0_devices
   7.142      struct pci_dev *pdev;
   7.143      int bus, dev, func;
   7.144      u32 l;
   7.145 -    int req_id, bdf;
   7.146 +    int bdf;
   7.147  
   7.148      for ( bus = 0; bus < 256; bus++ )
   7.149      {
   7.150 @@ -286,11 +327,12 @@ void __init amd_iommu_setup_dom0_devices
   7.151                  list_add_tail(&pdev->list, &hd->pdev_list);
   7.152  
   7.153                  bdf = (bus << 8) | pdev->devfn;
   7.154 -                req_id = requestor_id_from_bdf(bdf);
   7.155 -                iommu = find_iommu_for_device(bus, pdev->devfn);
   7.156 +                /* supported device? */
   7.157 +                iommu = (bdf < ivrs_bdf_entries) ?
   7.158 +                    find_iommu_for_device(bus, pdev->devfn) : NULL;
   7.159  
   7.160                  if ( iommu )
   7.161 -                    amd_iommu_setup_domain_device(dom0, iommu, req_id);
   7.162 +                    amd_iommu_setup_domain_device(dom0, iommu, bdf);
   7.163              }
   7.164          }
   7.165      }
   7.166 @@ -299,6 +341,8 @@ void __init amd_iommu_setup_dom0_devices
   7.167  int amd_iommu_detect(void)
   7.168  {
   7.169      unsigned long i;
   7.170 +    int last_bus;
   7.171 +    struct amd_iommu *iommu;
   7.172  
   7.173      if ( !enable_amd_iommu )
   7.174      {
   7.175 @@ -319,6 +363,28 @@ int amd_iommu_detect(void)
   7.176          printk("AMD IOMMU: Not found!\n");
   7.177          return 0;
   7.178      }
   7.179 +    else
   7.180 +    {
   7.181 +        /* allocate 'ivrs mappings' table */
   7.182 +        /* note: the table has entries to accomodate all IOMMUs */
   7.183 +        last_bus = 0;
   7.184 +        for_each_amd_iommu (iommu)
   7.185 +           if (iommu->last_downstream_bus > last_bus)
   7.186 +               last_bus = iommu->last_downstream_bus;
   7.187 +
   7.188 +        ivrs_bdf_entries = (last_bus + 1) *
   7.189 +                IOMMU_DEV_TABLE_ENTRIES_PER_BUS;
   7.190 +        ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
   7.191 +
   7.192 +        if ( !ivrs_mappings )
   7.193 +        {
   7.194 +            dprintk(XENLOG_ERR, "AMD IOMMU:"
   7.195 +                        " Error allocating IVRS DevMappings table\n");
   7.196 +            goto error_out;
   7.197 +        }
   7.198 +        memset(ivrs_mappings, 0,
   7.199 +            ivrs_bdf_entries * sizeof(struct ivrs_mappings));
   7.200 +    }
   7.201  
   7.202      if ( amd_iommu_init() != 0 )
   7.203      {
   7.204 @@ -407,23 +473,25 @@ int amd_iommu_domain_init(struct domain 
   7.205  }
   7.206  
   7.207  static void amd_iommu_disable_domain_device(
   7.208 -    struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
   7.209 +    struct domain *domain, struct amd_iommu *iommu, int bdf)
   7.210  {
   7.211      void *dte;
   7.212      unsigned long flags;
   7.213 +    int req_id;
   7.214  
   7.215 +    req_id = ivrs_mappings[bdf].dte_requestor_id;
   7.216      dte = iommu->dev_table.buffer +
   7.217 -        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.218 +        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.219  
   7.220      if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
   7.221      {
   7.222          spin_lock_irqsave(&iommu->lock, flags); 
   7.223          memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.224 -        invalidate_dev_table_entry(iommu, requestor_id);
   7.225 +        invalidate_dev_table_entry(iommu, req_id);
   7.226          flush_command_buffer(iommu);
   7.227          dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
   7.228                  " domain_id:%d, paging_mode:%d\n",
   7.229 -                requestor_id,  domain_hvm_iommu(domain)->domain_id,
   7.230 +                req_id,  domain_hvm_iommu(domain)->domain_id,
   7.231                  domain_hvm_iommu(domain)->paging_mode);
   7.232          spin_unlock_irqrestore(&iommu->lock, flags);
   7.233      }
   7.234 @@ -438,7 +506,7 @@ static int reassign_device( struct domai
   7.235      struct hvm_iommu *target_hd = domain_hvm_iommu(target);
   7.236      struct pci_dev *pdev;
   7.237      struct amd_iommu *iommu;
   7.238 -    int req_id, bdf;
   7.239 +    int bdf;
   7.240      unsigned long flags;
   7.241  
   7.242      for_each_pdev( source, pdev )
   7.243 @@ -450,12 +518,13 @@ static int reassign_device( struct domai
   7.244          pdev->devfn = devfn;
   7.245  
   7.246          bdf = (bus << 8) | devfn;
   7.247 -        req_id = requestor_id_from_bdf(bdf);
   7.248 -        iommu = find_iommu_for_device(bus, devfn);
   7.249 +        /* supported device? */
   7.250 +        iommu = (bdf < ivrs_bdf_entries) ?
   7.251 +            find_iommu_for_device(bus, pdev->devfn) : NULL;
   7.252  
   7.253          if ( iommu )
   7.254          {
   7.255 -            amd_iommu_disable_domain_device(source, iommu, req_id);
   7.256 +            amd_iommu_disable_domain_device(source, iommu, bdf);
   7.257              /* Move pci device from the source domain to target domain. */
   7.258              spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
   7.259              spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
   7.260 @@ -463,7 +532,7 @@ static int reassign_device( struct domai
   7.261              spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
   7.262              spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
   7.263  
   7.264 -            amd_iommu_setup_domain_device(target, iommu, req_id);
   7.265 +            amd_iommu_setup_domain_device(target, iommu, bdf);
   7.266              gdprintk(XENLOG_INFO ,
   7.267                       "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
   7.268                       bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   7.269 @@ -484,6 +553,19 @@ static int reassign_device( struct domai
   7.270  
   7.271  int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
   7.272  {
   7.273 +    int bdf = (bus << 8) | devfn;
   7.274 +    int req_id;
   7.275 +    req_id = ivrs_mappings[bdf].dte_requestor_id;
   7.276 +
   7.277 +    if (ivrs_mappings[req_id].unity_map_enable)
   7.278 +    {
   7.279 +        amd_iommu_reserve_domain_unity_map(d,
   7.280 +            ivrs_mappings[req_id].addr_range_start,
   7.281 +            ivrs_mappings[req_id].addr_range_length,
   7.282 +            ivrs_mappings[req_id].write_permission,
   7.283 +            ivrs_mappings[req_id].read_permission);
   7.284 +    }
   7.285 +
   7.286      pdev_flr(bus, devfn);
   7.287      return reassign_device(dom0, d, bus, devfn);
   7.288  }
     8.1 --- a/xen/include/asm-x86/amd-iommu.h	Thu Feb 28 13:19:38 2008 +0000
     8.2 +++ b/xen/include/asm-x86/amd-iommu.h	Thu Feb 28 13:21:49 2008 +0000
     8.3 @@ -43,15 +43,26 @@ struct amd_iommu {
     8.4      struct list_head list;
     8.5      spinlock_t lock; /* protect iommu */
     8.6  
     8.7 -    int iotlb_support;
     8.8 -    int ht_tunnel_support;
     8.9 -    int not_present_cached;
    8.10 +    u16 bdf;
    8.11 +    u8  cap_offset;
    8.12      u8  revision;
    8.13 +    u8  unit_id;
    8.14 +    u8  msi_number;
    8.15  
    8.16      u8  root_bus;
    8.17      u8  first_devfn;
    8.18      u8  last_devfn;
    8.19  
    8.20 +    u8 pte_not_present_cached;
    8.21 +    u8 ht_tunnel_support;
    8.22 +    u8 iotlb_support;
    8.23 +
    8.24 +    u8 isochronous;
    8.25 +    u8 coherent;
    8.26 +    u8 res_pass_pw;
    8.27 +    u8 pass_pw;
    8.28 +    u8 ht_tunnel_enable;
    8.29 +
    8.30      int last_downstream_bus;
    8.31      int downstream_bus_present[PCI_MAX_BUS_COUNT];
    8.32  
    8.33 @@ -61,10 +72,23 @@ struct amd_iommu {
    8.34      struct table_struct dev_table;
    8.35      struct table_struct cmd_buffer;
    8.36      u32 cmd_buffer_tail;
    8.37 +    struct table_struct event_log;
    8.38 +    u32 event_log_head;
    8.39  
    8.40 -    int exclusion_enabled;
    8.41 +    int exclusion_enable;
    8.42 +    int exclusion_allow_all;
    8.43      unsigned long exclusion_base;
    8.44      unsigned long exclusion_limit;
    8.45  };
    8.46  
    8.47 +struct ivrs_mappings {
    8.48 +    u16 dte_requestor_id;
    8.49 +    u8 dte_sys_mgt_enable;
    8.50 +    u8 dte_allow_exclusion;
    8.51 +    u8 unity_map_enable;
    8.52 +    u8 write_permission;
    8.53 +    u8 read_permission;
    8.54 +    unsigned long addr_range_start;
    8.55 +    unsigned long addr_range_length;
    8.56 +};
    8.57  #endif /* _ASM_X86_64_AMD_IOMMU_H */
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h	Thu Feb 28 13:21:49 2008 +0000
     9.3 @@ -0,0 +1,176 @@
     9.4 +/*
     9.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     9.6 + * Author: Leo Duran <leo.duran@amd.com>
     9.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     9.8 + *
     9.9 + * This program is free software; you can redistribute it and/or modify
    9.10 + * it under the terms of the GNU General Public License as published by
    9.11 + * the Free Software Foundation; either version 2 of the License, or
    9.12 + * (at your option) any later version.
    9.13 + *
    9.14 + * This program is distributed in the hope that it will be useful,
    9.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    9.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    9.17 + * GNU General Public License for more details.
    9.18 + *
    9.19 + * You should have received a copy of the GNU General Public License
    9.20 + * along with this program; if not, write to the Free Software
    9.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    9.22 + */
    9.23 +
    9.24 +#ifndef _ASM_X86_64_AMD_IOMMU_ACPI_H
    9.25 +#define _ASM_X86_64_AMD_IOMMU_ACPI_H
    9.26 +
    9.27 +#include <xen/acpi.h>
    9.28 +
    9.29 +/* I/O Virtualization Reporting Structure */
    9.30 +#define AMD_IOMMU_ACPI_IVRS_SIG            "IVRS"
    9.31 +#define AMD_IOMMU_ACPI_IVHD_TYPE       0x10
    9.32 +#define AMD_IOMMU_ACPI_IVMD_ALL_TYPE       0x20
    9.33 +#define AMD_IOMMU_ACPI_IVMD_ONE_TYPE       0x21
    9.34 +#define AMD_IOMMU_ACPI_IVMD_RANGE_TYPE     0x22
    9.35 +#define AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE     0x23
    9.36 +
    9.37 +/* 4-byte Device Entries */
    9.38 +#define AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD        0
    9.39 +#define AMD_IOMMU_ACPI_IVHD_DEV_SELECT     2
    9.40 +#define AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START    3
    9.41 +#define AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END  4
    9.42 +
    9.43 +/* 8-byte Device Entries */
    9.44 +#define AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD        64
    9.45 +#define AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT   66
    9.46 +#define AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE    67
    9.47 +#define AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT 70
    9.48 +#define AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE  71
    9.49 +
    9.50 +/* IVHD IOMMU Flags */
    9.51 +#define AMD_IOMMU_ACPI_COHERENT_MASK       0x20
    9.52 +#define AMD_IOMMU_ACPI_COHERENT_SHIFT      5
    9.53 +#define AMD_IOMMU_ACPI_IOTLB_SUP_MASK      0x10
    9.54 +#define AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT     4
    9.55 +#define AMD_IOMMU_ACPI_ISOC_MASK       0x08
    9.56 +#define AMD_IOMMU_ACPI_ISOC_SHIFT      3
    9.57 +#define AMD_IOMMU_ACPI_RES_PASS_PW_MASK        0x04
    9.58 +#define AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT   2
    9.59 +#define AMD_IOMMU_ACPI_PASS_PW_MASK        0x02
    9.60 +#define AMD_IOMMU_ACPI_PASS_PW_SHIFT       1
    9.61 +#define AMD_IOMMU_ACPI_HT_TUN_ENB_MASK     0x01
    9.62 +#define AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT        0
    9.63 +
    9.64 +/* IVHD Device Flags */
    9.65 +#define AMD_IOMMU_ACPI_LINT1_PASS_MASK     0x80
    9.66 +#define AMD_IOMMU_ACPI_LINT1_PASS_SHIFT        7
    9.67 +#define AMD_IOMMU_ACPI_LINT0_PASS_MASK     0x40
    9.68 +#define AMD_IOMMU_ACPI_LINT0_PASS_SHIFT        6
    9.69 +#define AMD_IOMMU_ACPI_SYS_MGT_MASK        0x30
    9.70 +#define AMD_IOMMU_ACPI_SYS_MGT_SHIFT       4
    9.71 +#define AMD_IOMMU_ACPI_NMI_PASS_MASK       0x04
    9.72 +#define AMD_IOMMU_ACPI_NMI_PASS_SHIFT      2
    9.73 +#define AMD_IOMMU_ACPI_EINT_PASS_MASK      0x02
    9.74 +#define AMD_IOMMU_ACPI_EINT_PASS_SHIFT     1
    9.75 +#define AMD_IOMMU_ACPI_INIT_PASS_MASK      0x01
    9.76 +#define AMD_IOMMU_ACPI_INIT_PASS_SHIFT     0
    9.77 +
    9.78 +/* IVHD Device Extended Flags */
    9.79 +#define AMD_IOMMU_ACPI_ATS_DISABLED_MASK   0x80000000
    9.80 +#define AMD_IOMMU_ACPI_ATS_DISABLED_SHIFT  31
    9.81 +
    9.82 +/* IVMD Device Flags */
    9.83 +#define AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK    0x08
    9.84 +#define AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT   3
    9.85 +#define AMD_IOMMU_ACPI_IW_PERMISSION_MASK  0x04
    9.86 +#define AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT 2
    9.87 +#define AMD_IOMMU_ACPI_IR_PERMISSION_MASK  0x02
    9.88 +#define AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT 1
    9.89 +#define AMD_IOMMU_ACPI_UNITY_MAPPING_MASK  0x01
    9.90 +#define AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT 0
    9.91 +
    9.92 +#define ACPI_OEM_ID_SIZE                6
    9.93 +#define ACPI_OEM_TABLE_ID_SIZE          8
    9.94 +
    9.95 +#pragma pack(1)
    9.96 +struct acpi_ivrs_table_header {
    9.97 +   struct acpi_table_header acpi_header;
    9.98 +   u32 io_info;
    9.99 +   u8  reserved[8];
   9.100 +};
   9.101 +
   9.102 +struct acpi_ivrs_block_header {
   9.103 +   u8  type;
   9.104 +   u8  flags;
   9.105 +   u16 length;
   9.106 +   u16 dev_id;
   9.107 +};
   9.108 +
   9.109 +struct acpi_ivhd_block_header {
   9.110 +   struct acpi_ivrs_block_header header;
   9.111 +   u16 cap_offset;
   9.112 +   u64 mmio_base;
   9.113 +   u16 pci_segment;
   9.114 +   u16 iommu_info;
   9.115 +   u8 reserved[4];
   9.116 +};
   9.117 +
   9.118 +struct acpi_ivhd_device_header {
   9.119 +   u8  type;
   9.120 +   u16 dev_id;
   9.121 +   u8  flags;
   9.122 +};
   9.123 +
   9.124 +struct acpi_ivhd_device_trailer {
   9.125 +   u8  type;
   9.126 +   u16 dev_id;
   9.127 +   u8  reserved;
   9.128 +};
   9.129 +
   9.130 +struct acpi_ivhd_device_range {
   9.131 +   struct acpi_ivhd_device_header header;
   9.132 +   struct acpi_ivhd_device_trailer trailer;
   9.133 +};
   9.134 +
   9.135 +struct acpi_ivhd_device_alias {
   9.136 +   struct acpi_ivhd_device_header header;
   9.137 +   u8  reserved1;
   9.138 +   u16 dev_id;
   9.139 +   u8  reserved2;
   9.140 +};
   9.141 +
   9.142 +struct acpi_ivhd_device_alias_range {
   9.143 +   struct acpi_ivhd_device_alias alias;
   9.144 +   struct acpi_ivhd_device_trailer trailer;
   9.145 +};
   9.146 +
   9.147 +struct acpi_ivhd_device_extended {
   9.148 +   struct acpi_ivhd_device_header header;
   9.149 +   u32 ext_flags;
   9.150 +};
   9.151 +
   9.152 +struct acpi_ivhd_device_extended_range {
   9.153 +   struct acpi_ivhd_device_extended extended;
   9.154 +   struct acpi_ivhd_device_trailer trailer;
   9.155 +};
   9.156 +
   9.157 +union acpi_ivhd_device {
   9.158 +   struct acpi_ivhd_device_header header;
   9.159 +   struct acpi_ivhd_device_range range;
   9.160 +   struct acpi_ivhd_device_alias alias;
   9.161 +   struct acpi_ivhd_device_alias_range alias_range;
   9.162 +   struct acpi_ivhd_device_extended extended;
   9.163 +   struct acpi_ivhd_device_extended_range extended_range;
   9.164 +};
   9.165 +
   9.166 +struct acpi_ivmd_block_header {
   9.167 +   struct acpi_ivrs_block_header header;
   9.168 +   union {
   9.169 +       u16 last_dev_id;
   9.170 +       u16 cap_offset;
   9.171 +       u16 reserved1;
   9.172 +   };
   9.173 +   u64 reserved2;
   9.174 +   u64 start_addr;
   9.175 +   u64 mem_length;
   9.176 +};
   9.177 +#pragma pack()
   9.178 +
   9.179 +#endif /* _ASM_X86_64_AMD_IOMMU_ACPI_H */
    10.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h	Thu Feb 28 13:19:38 2008 +0000
    10.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h	Thu Feb 28 13:21:49 2008 +0000
    10.3 @@ -118,6 +118,12 @@
    10.4  #define PCI_CAP_LAST_DEVICE_MASK	0xFF000000
    10.5  #define PCI_CAP_LAST_DEVICE_SHIFT	24
    10.6  
    10.7 +#define PCI_CAP_UNIT_ID_MASK    0x0000001F
    10.8 +#define PCI_CAP_UNIT_ID_SHIFT   0
    10.9 +#define PCI_MISC_INFO_OFFSET    0x10
   10.10 +#define PCI_CAP_MSI_NUMBER_MASK     0x0000001F
   10.11 +#define PCI_CAP_MSI_NUMBER_SHIFT    0
   10.12 +
   10.13  /* Device Table */
   10.14  #define IOMMU_DEV_TABLE_BASE_LOW_OFFSET		0x00
   10.15  #define IOMMU_DEV_TABLE_BASE_HIGH_OFFSET	0x04
    11.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Feb 28 13:19:38 2008 +0000
    11.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Feb 28 13:21:49 2008 +0000
    11.3 @@ -21,6 +21,7 @@
    11.4  #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
    11.5  #define _ASM_X86_64_AMD_IOMMU_PROTO_H
    11.6  
    11.7 +#include <xen/sched.h>
    11.8  #include <asm/amd-iommu.h>
    11.9  
   11.10  #define for_each_amd_iommu(amd_iommu) \
   11.11 @@ -54,10 +55,12 @@ void __init enable_iommu(struct amd_iomm
   11.12  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
   11.13  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
   11.14  void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
   11.15 +int amd_iommu_reserve_domain_unity_map(struct domain *domain,
   11.16 +        unsigned long phys_addr, unsigned long size, int iw, int ir);
   11.17  
   11.18  /* device table functions */
   11.19 -void amd_iommu_set_dev_table_entry(u32 *dte,
   11.20 -        u64 root_ptr, u16 domain_id, u8 paging_mode);
   11.21 +void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
   11.22 +        u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
   11.23  int amd_iommu_is_dte_page_translation_valid(u32 *entry);
   11.24  void invalidate_dev_table_entry(struct amd_iommu *iommu,
   11.25              u16 devic_id);
   11.26 @@ -69,11 +72,14 @@ void flush_command_buffer(struct amd_iom
   11.27  /* iommu domain funtions */
   11.28  int amd_iommu_domain_init(struct domain *domain);
   11.29  void amd_iommu_setup_domain_device(struct domain *domain,
   11.30 -    struct amd_iommu *iommu, int requestor_id);
   11.31 +    struct amd_iommu *iommu, int bdf);
   11.32  
   11.33  /* find iommu for bdf */
   11.34  struct amd_iommu *find_iommu_for_device(int bus, int devfn);
   11.35  
   11.36 +/* amd-iommu-acpi functions */
   11.37 +int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size);
   11.38 +
   11.39  static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
   11.40  {
   11.41      u32 field;
   11.42 @@ -91,4 +97,16 @@ static inline u32 set_field_in_reg_u32(u
   11.43      return reg_value;
   11.44  }
   11.45  
   11.46 +static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift)
   11.47 +{
   11.48 +    u8 field;
   11.49 +    field = (value & mask) >> shift;
   11.50 +    return field;
   11.51 +}
   11.52 +
   11.53 +static inline unsigned long region_to_pages(unsigned long addr, unsigned long size)
   11.54 +{
   11.55 +    return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
   11.56 +}
   11.57 +
   11.58  #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
    12.1 --- a/xen/include/xen/acpi.h	Thu Feb 28 13:19:38 2008 +0000
    12.2 +++ b/xen/include/xen/acpi.h	Thu Feb 28 13:21:49 2008 +0000
    12.3 @@ -368,6 +368,7 @@ enum acpi_table_id {
    12.4  	ACPI_HPET,
    12.5  	ACPI_MCFG,
    12.6  	ACPI_DMAR,
    12.7 +	ACPI_IVRS,
    12.8  	ACPI_TABLE_COUNT
    12.9  };
   12.10