ia64/xen-unstable

changeset 17099:591cfd37bd54

Move vtd and amd iommu code to arch-generic location.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 21 15:06:37 2008 +0000 (2008-02-21)
parents f1a107ec62b6
children 09b53f27a18b
files xen/arch/x86/Rules.mk xen/arch/x86/hvm/svm/Makefile xen/arch/x86/hvm/svm/amd_iommu/Makefile xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c xen/arch/x86/hvm/svm/amd_iommu/pci-direct.h xen/arch/x86/hvm/svm/amd_iommu/pci_regs.h xen/arch/x86/hvm/vmx/Makefile xen/arch/x86/hvm/vmx/vtd/Makefile xen/arch/x86/hvm/vmx/vtd/dmar.c xen/arch/x86/hvm/vmx/vtd/dmar.h xen/arch/x86/hvm/vmx/vtd/extern.h xen/arch/x86/hvm/vmx/vtd/intel-iommu.c xen/arch/x86/hvm/vmx/vtd/intremap.c xen/arch/x86/hvm/vmx/vtd/io.c xen/arch/x86/hvm/vmx/vtd/msi.h xen/arch/x86/hvm/vmx/vtd/pci-direct.h xen/arch/x86/hvm/vmx/vtd/pci_regs.h xen/arch/x86/hvm/vmx/vtd/qinval.c xen/arch/x86/hvm/vmx/vtd/utils.c xen/arch/x86/hvm/vmx/vtd/vtd.h xen/drivers/Makefile xen/drivers/passthrough/Makefile xen/drivers/passthrough/amd/Makefile xen/drivers/passthrough/amd/iommu_detect.c xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/drivers/passthrough/pci-direct.h xen/drivers/passthrough/pci_regs.h xen/drivers/passthrough/vtd/Makefile xen/drivers/passthrough/vtd/dmar.c xen/drivers/passthrough/vtd/dmar.h xen/drivers/passthrough/vtd/extern.h xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/io.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/msi.h xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/utils.c xen/drivers/passthrough/vtd/vtd.h
line diff
     1.1 --- a/xen/arch/x86/Rules.mk	Thu Feb 21 14:50:27 2008 +0000
     1.2 +++ b/xen/arch/x86/Rules.mk	Thu Feb 21 15:06:37 2008 +0000
     1.3 @@ -39,7 +39,9 @@ ifeq ($(supervisor_mode_kernel),y)
     1.4  CFLAGS += -DCONFIG_X86_SUPERVISOR_MODE_KERNEL=1
     1.5  endif
     1.6  
     1.7 -ifeq ($(XEN_TARGET_ARCH),x86_32)
     1.8 +x86 := y
     1.9 +
    1.10 +ifeq ($(TARGET_SUBARCH),x86_32)
    1.11  x86_32 := y
    1.12  x86_64 := n
    1.13  endif
     2.1 --- a/xen/arch/x86/hvm/svm/Makefile	Thu Feb 21 14:50:27 2008 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/Makefile	Thu Feb 21 15:06:37 2008 +0000
     2.3 @@ -1,8 +1,6 @@
     2.4  subdir-$(x86_32) += x86_32
     2.5  subdir-$(x86_64) += x86_64
     2.6  
     2.7 -subdir-y += amd_iommu
     2.8 -
     2.9  obj-y += asid.o
    2.10  obj-y += emulate.o
    2.11  obj-y += intr.o
     3.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/Makefile	Thu Feb 21 14:50:27 2008 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,4 +0,0 @@
     3.4 -obj-y += amd-iommu-detect.o
     3.5 -obj-y += amd-iommu-init.o
     3.6 -obj-y += amd-iommu-map.o
     3.7 -obj-y += pci-amd-iommu.o
     4.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c	Thu Feb 21 14:50:27 2008 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,215 +0,0 @@
     4.4 -/*
     4.5 - * Copyright (C) 2007 Advanced Micro Devices, Inc.
     4.6 - * Author: Leo Duran <leo.duran@amd.com>
     4.7 - * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     4.8 - *
     4.9 - * This program is free software; you can redistribute it and/or modify
    4.10 - * it under the terms of the GNU General Public License as published by
    4.11 - * the Free Software Foundation; either version 2 of the License, or
    4.12 - * (at your option) any later version.
    4.13 - *
    4.14 - * This program is distributed in the hope that it will be useful,
    4.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
    4.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    4.17 - * GNU General Public License for more details.
    4.18 - *
    4.19 - * You should have received a copy of the GNU General Public License
    4.20 - * along with this program; if not, write to the Free Software
    4.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    4.22 - */
    4.23 -
    4.24 -#include <xen/config.h>
    4.25 -#include <xen/errno.h>
    4.26 -#include <asm/iommu.h>
    4.27 -#include <asm/amd-iommu.h>
    4.28 -#include <asm/hvm/svm/amd-iommu-proto.h>
    4.29 -#include "pci-direct.h"
    4.30 -#include "pci_regs.h"
    4.31 -
    4.32 -static int __init valid_bridge_bus_config(int bus, int dev, int func,
    4.33 -            int *sec_bus, int *sub_bus)
    4.34 -{
    4.35 -    int pri_bus;
    4.36 -
    4.37 -    pri_bus = read_pci_config_byte(bus, dev, func, PCI_PRIMARY_BUS);
    4.38 -    *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS);
    4.39 -    *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS);
    4.40 -
    4.41 -    return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus );
    4.42 -}
    4.43 -
    4.44 -int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu)
    4.45 -{
    4.46 -    int bus, dev, func;
    4.47 -    int devfn, hdr_type;
    4.48 -    int sec_bus, sub_bus;
    4.49 -    int multi_func;
    4.50 -
    4.51 -    bus = iommu->last_downstream_bus = iommu->root_bus;
    4.52 -    iommu->downstream_bus_present[bus] = 1;
    4.53 -    dev = PCI_SLOT(iommu->first_devfn);
    4.54 -    multi_func = PCI_FUNC(iommu->first_devfn) > 0;
    4.55 -    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) {
    4.56 -        /* skipping to next device#? */
    4.57 -        if ( dev != PCI_SLOT(devfn) ) {
    4.58 -            dev = PCI_SLOT(devfn);
    4.59 -            multi_func = 0;
    4.60 -        }
    4.61 -        func = PCI_FUNC(devfn);
    4.62 - 
    4.63 -        if ( !VALID_PCI_VENDOR_ID(
    4.64 -            read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) )
    4.65 -            continue;
    4.66 -
    4.67 -        hdr_type = read_pci_config_byte(bus, dev, func,
    4.68 -                PCI_HEADER_TYPE);
    4.69 -        if ( func == 0 )
    4.70 -            multi_func = IS_PCI_MULTI_FUNCTION(hdr_type);
    4.71 -
    4.72 -        if ( (func == 0 || multi_func) &&
    4.73 -            IS_PCI_TYPE1_HEADER(hdr_type) ) {
    4.74 -            if (!valid_bridge_bus_config(bus, dev, func,
    4.75 -                &sec_bus, &sub_bus))
    4.76 -                return -ENODEV;
    4.77 -
    4.78 -            if ( sub_bus > iommu->last_downstream_bus )
    4.79 -                iommu->last_downstream_bus = sub_bus;
    4.80 -            do {
    4.81 -                iommu->downstream_bus_present[sec_bus] = 1;
    4.82 -            } while ( sec_bus++ < sub_bus );
    4.83 -        }
    4.84 -    }
    4.85 -
    4.86 -    return 0;
    4.87 -}
    4.88 -
    4.89 -int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
    4.90 -            struct amd_iommu *iommu)
    4.91 -{
    4.92 -    u32 cap_header, cap_range;
    4.93 -    u64 mmio_bar;
    4.94 -
    4.95 -#if HACK_BIOS_SETTINGS
    4.96 -    /* remove it when BIOS available */
    4.97 -    write_pci_config(bus, dev, func,
    4.98 -        cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
    4.99 -    write_pci_config(bus, dev, func,
   4.100 -        cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
   4.101 -    /* remove it when BIOS available */
   4.102 -#endif
   4.103 -
   4.104 -    mmio_bar = (u64)read_pci_config(bus, dev, func,
   4.105 -             cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
   4.106 -    mmio_bar |= read_pci_config(bus, dev, func,
   4.107 -            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) &
   4.108 -            PCI_CAP_MMIO_BAR_LOW_MASK;
   4.109 -    iommu->mmio_base_phys = (unsigned long)mmio_bar;
   4.110 -
   4.111 -    if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) {
   4.112 -        dprintk(XENLOG_ERR ,
   4.113 -                "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
   4.114 -        return -ENODEV;
   4.115 -    }
   4.116 -
   4.117 -    cap_header = read_pci_config(bus, dev, func, cap_ptr);
   4.118 -    iommu->revision = get_field_from_reg_u32(cap_header,
   4.119 -                  PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
   4.120 -    iommu->iotlb_support = get_field_from_reg_u32(cap_header,
   4.121 -                PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT);
   4.122 -    iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
   4.123 -                    PCI_CAP_HT_TUNNEL_MASK,
   4.124 -                    PCI_CAP_HT_TUNNEL_SHIFT);
   4.125 -    iommu->not_present_cached = get_field_from_reg_u32(cap_header,
   4.126 -                    PCI_CAP_NP_CACHE_MASK,
   4.127 -                    PCI_CAP_NP_CACHE_SHIFT);
   4.128 -
   4.129 -    cap_range = read_pci_config(bus, dev, func,
   4.130 -            cap_ptr + PCI_CAP_RANGE_OFFSET);
   4.131 -    iommu->root_bus = get_field_from_reg_u32(cap_range,
   4.132 -                PCI_CAP_BUS_NUMBER_MASK,
   4.133 -                PCI_CAP_BUS_NUMBER_SHIFT);
   4.134 -    iommu->first_devfn = get_field_from_reg_u32(cap_range,
   4.135 -                PCI_CAP_FIRST_DEVICE_MASK,
   4.136 -                PCI_CAP_FIRST_DEVICE_SHIFT);
   4.137 -    iommu->last_devfn = get_field_from_reg_u32(cap_range,
   4.138 -                PCI_CAP_LAST_DEVICE_MASK,
   4.139 -                PCI_CAP_LAST_DEVICE_SHIFT);
   4.140 -
   4.141 -    return 0;
   4.142 -}
   4.143 -
   4.144 -static int __init scan_caps_for_iommu(int bus, int dev, int func,
   4.145 -            iommu_detect_callback_ptr_t iommu_detect_callback)
   4.146 -{
   4.147 -    int cap_ptr, cap_id, cap_type;
   4.148 -    u32 cap_header;
   4.149 -    int count, error = 0;
   4.150 -
   4.151 -    count = 0;
   4.152 -    cap_ptr = read_pci_config_byte(bus, dev, func,
   4.153 -            PCI_CAPABILITY_LIST);
   4.154 -    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
   4.155 -        count < PCI_MAX_CAP_BLOCKS && !error ) {
   4.156 -        cap_ptr &= PCI_CAP_PTR_MASK;
   4.157 -        cap_header = read_pci_config(bus, dev, func, cap_ptr);
   4.158 -        cap_id = get_field_from_reg_u32(cap_header,
   4.159 -                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
   4.160 -
   4.161 -        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) {
   4.162 -            cap_type = get_field_from_reg_u32(cap_header,
   4.163 -                    PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
   4.164 -            if ( cap_type == PCI_CAP_TYPE_IOMMU ) {
   4.165 -                error = iommu_detect_callback(
   4.166 -                        bus, dev, func, cap_ptr);
   4.167 -            }
   4.168 -        }
   4.169 -
   4.170 -        cap_ptr = get_field_from_reg_u32(cap_header,
   4.171 -                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
   4.172 -        ++count;    }
   4.173 -
   4.174 -    return error;
   4.175 -}
   4.176 -
   4.177 -static int __init scan_functions_for_iommu(int bus, int dev,
   4.178 -            iommu_detect_callback_ptr_t iommu_detect_callback)
   4.179 -{
   4.180 -    int func, hdr_type;
   4.181 -    int count, error = 0;
   4.182 -
   4.183 -    func = 0;
   4.184 -    count = 1;
   4.185 -    while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func,
   4.186 -            PCI_VENDOR_ID)) && !error && func < count ) {
   4.187 -        hdr_type = read_pci_config_byte(bus, dev, func,
   4.188 -                PCI_HEADER_TYPE);
   4.189 -
   4.190 -        if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) )
   4.191 -            count = PCI_MAX_FUNC_COUNT;
   4.192 -
   4.193 -        if ( IS_PCI_TYPE0_HEADER(hdr_type) ||
   4.194 -            IS_PCI_TYPE1_HEADER(hdr_type) ) {
   4.195 -            error =  scan_caps_for_iommu(bus, dev, func,
   4.196 -                    iommu_detect_callback);
   4.197 -        }
   4.198 -        ++func;
   4.199 -    }
   4.200 -
   4.201 -    return error;
   4.202 -}
   4.203 -
   4.204 -
   4.205 -int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback)
   4.206 -{
   4.207 -    int bus, dev, error = 0;
   4.208 -
   4.209 -    for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) {
   4.210 -        for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) {
   4.211 -            error =  scan_functions_for_iommu(bus, dev,
   4.212 -                  iommu_detect_callback);
   4.213 -        }
   4.214 -    }
   4.215 -
   4.216 -    return error;
   4.217 -}
   4.218 -
     5.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c	Thu Feb 21 14:50:27 2008 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,147 +0,0 @@
     5.4 -/*
     5.5 - * Copyright (C) 2007 Advanced Micro Devices, Inc.
     5.6 - * Author: Leo Duran <leo.duran@amd.com>
     5.7 - * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     5.8 - *
     5.9 - * This program is free software; you can redistribute it and/or modify
    5.10 - * it under the terms of the GNU General Public License as published by
    5.11 - * the Free Software Foundation; either version 2 of the License, or
    5.12 - * (at your option) any later version.
    5.13 - *
    5.14 - * This program is distributed in the hope that it will be useful,
    5.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
    5.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    5.17 - * GNU General Public License for more details.
    5.18 - *
    5.19 - * You should have received a copy of the GNU General Public License
    5.20 - * along with this program; if not, write to the Free Software
    5.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    5.22 - */
    5.23 -
    5.24 -#include <xen/config.h>
    5.25 -#include <xen/errno.h>
    5.26 -#include <asm/amd-iommu.h>
    5.27 -#include <asm/hvm/svm/amd-iommu-proto.h>
    5.28 -#include <asm-x86/fixmap.h>
    5.29 -#include "pci-direct.h"
    5.30 -#include "pci_regs.h"
    5.31 -
    5.32 -extern int nr_amd_iommus;
    5.33 -
    5.34 -int __init map_iommu_mmio_region(struct amd_iommu *iommu)
    5.35 -{
    5.36 -    unsigned long mfn;
    5.37 -
    5.38 -    if ( nr_amd_iommus > MAX_AMD_IOMMUS ) {
    5.39 -        gdprintk(XENLOG_ERR,
    5.40 -            "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
    5.41 -        return -ENOMEM;
    5.42 -    }
    5.43 -
    5.44 -    iommu->mmio_base = (void *) fix_to_virt(FIX_IOMMU_MMIO_BASE_0 +
    5.45 -                       nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
    5.46 -    mfn = (unsigned long)iommu->mmio_base_phys >> PAGE_SHIFT;
    5.47 -    map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
    5.48 -                    MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
    5.49 -
    5.50 -    memset((u8*)iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
    5.51 -
    5.52 -    return 0;
    5.53 -}
    5.54 -
    5.55 -void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
    5.56 -{
    5.57 -    if ( iommu->mmio_base ) {
    5.58 -        iounmap(iommu->mmio_base);
    5.59 -        iommu->mmio_base = NULL;
    5.60 -    }
    5.61 -}
    5.62 -
    5.63 -void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
    5.64 -{
    5.65 -    u64 addr_64, addr_lo, addr_hi;
    5.66 -    u32 entry;
    5.67 -
    5.68 -    addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
    5.69 -    addr_lo = addr_64 & DMA_32BIT_MASK;
    5.70 -    addr_hi = addr_64 >> 32;
    5.71 -
    5.72 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    5.73 -        IOMMU_DEV_TABLE_BASE_LOW_MASK,
    5.74 -        IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
    5.75 -    set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
    5.76 -        entry, IOMMU_DEV_TABLE_SIZE_MASK,
    5.77 -        IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
    5.78 -    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
    5.79 -
    5.80 -    set_field_in_reg_u32((u32)addr_hi, 0,
    5.81 -        IOMMU_DEV_TABLE_BASE_HIGH_MASK,
    5.82 -        IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
    5.83 -    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
    5.84 -}
    5.85 -
    5.86 -void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
    5.87 -{
    5.88 -    u64 addr_64, addr_lo, addr_hi;
    5.89 -    u32 power_of2_entries;
    5.90 -    u32 entry;
    5.91 -
    5.92 -    addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
    5.93 -    addr_lo = addr_64 & DMA_32BIT_MASK;
    5.94 -    addr_hi = addr_64 >> 32;
    5.95 -
    5.96 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    5.97 -        IOMMU_CMD_BUFFER_BASE_LOW_MASK,
    5.98 -        IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
    5.99 -    writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
   5.100 -
   5.101 -    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
   5.102 -        IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
   5.103 -
   5.104 -    set_field_in_reg_u32((u32)addr_hi, 0,
   5.105 -        IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
   5.106 -        IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
   5.107 -    set_field_in_reg_u32(power_of2_entries, entry,
   5.108 -        IOMMU_CMD_BUFFER_LENGTH_MASK,
   5.109 -        IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
   5.110 -    writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
   5.111 -}
   5.112 -
   5.113 -static void __init set_iommu_translation_control(struct amd_iommu *iommu,
   5.114 -            int enable)
   5.115 -{
   5.116 -    u32 entry;
   5.117 -
   5.118 -    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   5.119 -    set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
   5.120 -        IOMMU_CONTROL_ENABLED, entry,
   5.121 -        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
   5.122 -        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
   5.123 -    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   5.124 -        IOMMU_CONTROL_ENABLED, entry,
   5.125 -        IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
   5.126 -        IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
   5.127 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   5.128 -}
   5.129 -
   5.130 -static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
   5.131 -            int enable)
   5.132 -{
   5.133 -    u32 entry;
   5.134 -
   5.135 -    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   5.136 -    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   5.137 -        IOMMU_CONTROL_ENABLED, entry,
   5.138 -        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
   5.139 -        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
   5.140 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   5.141 -}
   5.142 -
   5.143 -void __init enable_iommu(struct amd_iommu *iommu)
   5.144 -{
   5.145 -    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
   5.146 -    set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
   5.147 -    printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
   5.148 -}
   5.149 -
   5.150 -
     6.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c	Thu Feb 21 14:50:27 2008 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,450 +0,0 @@
     6.4 -/*
     6.5 - * Copyright (C) 2007 Advanced Micro Devices, Inc.
     6.6 - * Author: Leo Duran <leo.duran@amd.com>
     6.7 - * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     6.8 - *
     6.9 - * This program is free software; you can redistribute it and/or modify
    6.10 - * it under the terms of the GNU General Public License as published by
    6.11 - * the Free Software Foundation; either version 2 of the License, or
    6.12 - * (at your option) any later version.
    6.13 - *
    6.14 - * This program is distributed in the hope that it will be useful,
    6.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    6.17 - * GNU General Public License for more details.
    6.18 - *
    6.19 - * You should have received a copy of the GNU General Public License
    6.20 - * along with this program; if not, write to the Free Software
    6.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    6.22 - */
    6.23 -
    6.24 -#include <xen/sched.h>
    6.25 -#include <asm/hvm/iommu.h>
    6.26 -#include <asm/amd-iommu.h>
    6.27 -#include <asm/hvm/svm/amd-iommu-proto.h>
    6.28 -
    6.29 -extern long amd_iommu_poll_comp_wait;
    6.30 -
    6.31 -static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
    6.32 -{
    6.33 -    u32 tail, head, *cmd_buffer;
    6.34 -    int i;
    6.35 -
    6.36 -    tail = iommu->cmd_buffer_tail;
    6.37 -    if ( ++tail == iommu->cmd_buffer.entries )
    6.38 -        tail = 0;
    6.39 -    head = get_field_from_reg_u32(
    6.40 -        readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
    6.41 -        IOMMU_CMD_BUFFER_HEAD_MASK,
    6.42 -        IOMMU_CMD_BUFFER_HEAD_SHIFT);
    6.43 -    if ( head != tail )
    6.44 -    {
    6.45 -        cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
    6.46 -                             (iommu->cmd_buffer_tail *
    6.47 -                              IOMMU_CMD_BUFFER_ENTRY_SIZE));
    6.48 -        for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
    6.49 -            cmd_buffer[i] = cmd[i];
    6.50 -
    6.51 -        iommu->cmd_buffer_tail = tail;
    6.52 -        return 1;
    6.53 -    }
    6.54 -
    6.55 -    return 0;
    6.56 -}
    6.57 -
    6.58 -static void commit_iommu_command_buffer(struct amd_iommu *iommu)
    6.59 -{
    6.60 -    u32 tail;
    6.61 -
    6.62 -    set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
    6.63 -                         IOMMU_CMD_BUFFER_TAIL_MASK,
    6.64 -                         IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
    6.65 -    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
    6.66 -}
    6.67 -
    6.68 -int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
    6.69 -{
    6.70 -    if ( queue_iommu_command(iommu, cmd) )
    6.71 -    {
    6.72 -        commit_iommu_command_buffer(iommu);
    6.73 -        return 1;
    6.74 -    }
    6.75 -
    6.76 -    return 0;
    6.77 -}
    6.78 -
    6.79 -static void invalidate_iommu_page(struct amd_iommu *iommu,
    6.80 -                                  u64 io_addr, u16 domain_id)
    6.81 -{
    6.82 -    u64 addr_lo, addr_hi;
    6.83 -    u32 cmd[4], entry;
    6.84 -
    6.85 -    addr_lo = io_addr & DMA_32BIT_MASK;
    6.86 -    addr_hi = io_addr >> 32;
    6.87 -
    6.88 -    set_field_in_reg_u32(domain_id, 0,
    6.89 -                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
    6.90 -                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
    6.91 -    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
    6.92 -                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
    6.93 -                         &entry);
    6.94 -    cmd[1] = entry;
    6.95 -
    6.96 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
    6.97 -                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
    6.98 -                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
    6.99 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
   6.100 -                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
   6.101 -                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
   6.102 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
   6.103 -                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
   6.104 -                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
   6.105 -    cmd[2] = entry;
   6.106 -
   6.107 -    set_field_in_reg_u32((u32)addr_hi, 0,
   6.108 -                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
   6.109 -                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
   6.110 -    cmd[3] = entry;
   6.111 -
   6.112 -    cmd[0] = 0;
   6.113 -    send_iommu_command(iommu, cmd);
   6.114 -}
   6.115 -
   6.116 -void flush_command_buffer(struct amd_iommu *iommu)
   6.117 -{
   6.118 -    u32 cmd[4], status;
   6.119 -    int loop_count, comp_wait;
   6.120 -
   6.121 -    /* clear 'ComWaitInt' in status register (WIC) */
   6.122 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
   6.123 -                         IOMMU_STATUS_COMP_WAIT_INT_MASK,
   6.124 -                         IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
   6.125 -    writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
   6.126 -
   6.127 -    /* send an empty COMPLETION_WAIT command to flush command buffer */
   6.128 -    cmd[3] = cmd[2] = 0;
   6.129 -    set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
   6.130 -                         IOMMU_CMD_OPCODE_MASK,
   6.131 -                         IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
   6.132 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
   6.133 -                         IOMMU_COMP_WAIT_I_FLAG_MASK,
   6.134 -                         IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
   6.135 -    send_iommu_command(iommu, cmd);
   6.136 -
   6.137 -    /* wait for 'ComWaitInt' to signal comp#endifletion? */
   6.138 -    if ( amd_iommu_poll_comp_wait ) {
   6.139 -        loop_count = amd_iommu_poll_comp_wait;
   6.140 -        do {
   6.141 -            status = readl(iommu->mmio_base +
   6.142 -                           IOMMU_STATUS_MMIO_OFFSET);
   6.143 -            comp_wait = get_field_from_reg_u32(
   6.144 -                status,
   6.145 -                IOMMU_STATUS_COMP_WAIT_INT_MASK,
   6.146 -                IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
   6.147 -            --loop_count;
   6.148 -        } while ( loop_count && !comp_wait );
   6.149 -
   6.150 -        if ( comp_wait )
   6.151 -        {
   6.152 -            /* clear 'ComWaitInt' in status register (WIC) */
   6.153 -            status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
   6.154 -            writel(status, iommu->mmio_base +
   6.155 -                   IOMMU_STATUS_MMIO_OFFSET);
   6.156 -        }
   6.157 -        else
   6.158 -            dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:"
   6.159 -                    " ComWaitInt bit did not assert!\n");
   6.160 -    }
   6.161 -}
   6.162 -
   6.163 -static void clear_page_table_entry_present(u32 *pte)
   6.164 -{
   6.165 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
   6.166 -                         IOMMU_PTE_PRESENT_MASK,
   6.167 -                         IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
   6.168 -}
   6.169 -
   6.170 -static void set_page_table_entry_present(u32 *pte, u64 page_addr,
   6.171 -                                         int iw, int ir)
   6.172 -{
   6.173 -    u64 addr_lo, addr_hi;
   6.174 -    u32 entry;
   6.175 -
   6.176 -    addr_lo = page_addr & DMA_32BIT_MASK;
   6.177 -    addr_hi = page_addr >> 32;
   6.178 -
   6.179 -    set_field_in_reg_u32((u32)addr_hi, 0,
   6.180 -                         IOMMU_PTE_ADDR_HIGH_MASK,
   6.181 -                         IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
   6.182 -    set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED :
   6.183 -                         IOMMU_CONTROL_DISABLED, entry,
   6.184 -                         IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
   6.185 -                         IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
   6.186 -    set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED :
   6.187 -                         IOMMU_CONTROL_DISABLED, entry,
   6.188 -                         IOMMU_PTE_IO_READ_PERMISSION_MASK,
   6.189 -                         IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
   6.190 -    pte[1] = entry;
   6.191 -
   6.192 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   6.193 -                         IOMMU_PTE_ADDR_LOW_MASK,
   6.194 -                         IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
   6.195 -    set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry,
   6.196 -                         IOMMU_PTE_NEXT_LEVEL_MASK,
   6.197 -                         IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
   6.198 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.199 -                         IOMMU_PTE_PRESENT_MASK,
   6.200 -                         IOMMU_PTE_PRESENT_SHIFT, &entry);
   6.201 -    pte[0] = entry;
   6.202 -}
   6.203 -
   6.204 -
   6.205 -static void amd_iommu_set_page_directory_entry(u32 *pde, 
   6.206 -                                               u64 next_ptr, u8 next_level)
   6.207 -{
   6.208 -    u64 addr_lo, addr_hi;
   6.209 -    u32 entry;
   6.210 -
   6.211 -    addr_lo = next_ptr & DMA_32BIT_MASK;
   6.212 -    addr_hi = next_ptr >> 32;
   6.213 -
   6.214 -    /* enable read/write permissions,which will be enforced at the PTE */
   6.215 -    set_field_in_reg_u32((u32)addr_hi, 0,
   6.216 -                         IOMMU_PDE_ADDR_HIGH_MASK,
   6.217 -                         IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
   6.218 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.219 -                         IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
   6.220 -                         IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
   6.221 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.222 -                         IOMMU_PDE_IO_READ_PERMISSION_MASK,
   6.223 -                         IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
   6.224 -    pde[1] = entry;
   6.225 -
   6.226 -    /* mark next level as 'present' */
   6.227 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   6.228 -                         IOMMU_PDE_ADDR_LOW_MASK,
   6.229 -                         IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
   6.230 -    set_field_in_reg_u32(next_level, entry,
   6.231 -                         IOMMU_PDE_NEXT_LEVEL_MASK,
   6.232 -                         IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
   6.233 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.234 -                         IOMMU_PDE_PRESENT_MASK,
   6.235 -                         IOMMU_PDE_PRESENT_SHIFT, &entry);
   6.236 -    pde[0] = entry;
   6.237 -}
   6.238 -
   6.239 -void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
   6.240 -                                   u8 paging_mode)
   6.241 -{
   6.242 -    u64 addr_hi, addr_lo;
   6.243 -    u32 entry;
   6.244 -
   6.245 -    dte[6] = dte[5] = dte[4] = 0;
   6.246 -
   6.247 -    set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
   6.248 -                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
   6.249 -                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
   6.250 -    dte[3] = entry;
   6.251 -
   6.252 -    set_field_in_reg_u32(domain_id, 0,
   6.253 -                         IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
   6.254 -                         IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
   6.255 -    dte[2] = entry;
   6.256 -
   6.257 -    addr_lo = root_ptr & DMA_32BIT_MASK;
   6.258 -    addr_hi = root_ptr >> 32;
   6.259 -    set_field_in_reg_u32((u32)addr_hi, 0,
   6.260 -                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
   6.261 -                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
   6.262 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.263 -                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
   6.264 -                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
   6.265 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.266 -                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
   6.267 -                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
   6.268 -    dte[1] = entry;
   6.269 -
   6.270 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   6.271 -                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
   6.272 -                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
   6.273 -    set_field_in_reg_u32(paging_mode, entry,
   6.274 -                         IOMMU_DEV_TABLE_PAGING_MODE_MASK,
   6.275 -                         IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
   6.276 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.277 -                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
   6.278 -                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
   6.279 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   6.280 -                         IOMMU_DEV_TABLE_VALID_MASK,
   6.281 -                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
   6.282 -    dte[0] = entry;
   6.283 -}
   6.284 -
   6.285 -void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
   6.286 -{
   6.287 -    u64 addr_lo, addr_hi, ptr;
   6.288 -
   6.289 -    addr_lo = get_field_from_reg_u32(
   6.290 -        entry[0],
   6.291 -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
   6.292 -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
   6.293 -
   6.294 -    addr_hi = get_field_from_reg_u32(
   6.295 -        entry[1],
   6.296 -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
   6.297 -        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
   6.298 -
   6.299 -    ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
   6.300 -    return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
   6.301 -}
   6.302 -
   6.303 -static int amd_iommu_is_pte_present(u32 *entry)
   6.304 -{
   6.305 -    return (get_field_from_reg_u32(entry[0],
   6.306 -                                   IOMMU_PDE_PRESENT_MASK,
   6.307 -                                   IOMMU_PDE_PRESENT_SHIFT));
   6.308 -}
   6.309 -
   6.310 -void invalidate_dev_table_entry(struct amd_iommu *iommu,
   6.311 -                                u16 device_id)
   6.312 -{
   6.313 -    u32 cmd[4], entry;
   6.314 -
   6.315 -    cmd[3] = cmd[2] = 0;
   6.316 -    set_field_in_reg_u32(device_id, 0,
   6.317 -                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
   6.318 -                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
   6.319 -    cmd[0] = entry;
   6.320 -
   6.321 -    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
   6.322 -                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
   6.323 -                         &entry);
   6.324 -    cmd[1] = entry;
   6.325 -
   6.326 -    send_iommu_command(iommu, cmd);
   6.327 -}
   6.328 -
   6.329 -int amd_iommu_is_dte_page_translation_valid(u32 *entry)
   6.330 -{
   6.331 -    return (get_field_from_reg_u32(entry[0],
   6.332 -                                   IOMMU_DEV_TABLE_VALID_MASK,
   6.333 -                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
   6.334 -            get_field_from_reg_u32(entry[0],
   6.335 -                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
   6.336 -                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
   6.337 -}
   6.338 -
   6.339 -static void *get_pte_from_page_tables(void *table, int level,
   6.340 -                                      unsigned long io_pfn)
   6.341 -{
   6.342 -    unsigned long offset;
   6.343 -    void *pde = NULL;
   6.344 -
   6.345 -    BUG_ON(table == NULL);
   6.346 -
   6.347 -    while ( level > 0 )
   6.348 -    {
   6.349 -        offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
   6.350 -                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
   6.351 -        offset &= ~PTE_PER_TABLE_MASK;
   6.352 -        pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   6.353 -
   6.354 -        if ( level == 1 )
   6.355 -            break;
   6.356 -        if ( !pde )
   6.357 -            return NULL;
   6.358 -        if ( !amd_iommu_is_pte_present(pde) )
   6.359 -        {
   6.360 -            void *next_table = alloc_xenheap_page();
   6.361 -            if ( next_table == NULL )
   6.362 -                return NULL;
   6.363 -            memset(next_table, 0, PAGE_SIZE);
   6.364 -            if ( *(u64 *)pde == 0 )
   6.365 -            {
   6.366 -                unsigned long next_ptr = (u64)virt_to_maddr(next_table);
   6.367 -                amd_iommu_set_page_directory_entry(
   6.368 -                    (u32 *)pde, next_ptr, level - 1);
   6.369 -            }
   6.370 -            else
   6.371 -            {
   6.372 -                free_xenheap_page(next_table);
   6.373 -            }
   6.374 -        }
   6.375 -        table = amd_iommu_get_vptr_from_page_table_entry(pde);
   6.376 -        level--;
   6.377 -    }
   6.378 -
   6.379 -    return pde;
   6.380 -}
   6.381 -
   6.382 -int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
   6.383 -{
   6.384 -    void *pte;
   6.385 -    unsigned long flags;
   6.386 -    u64 maddr;
   6.387 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   6.388 -    int iw, ir;
   6.389 -
   6.390 -    BUG_ON( !hd->root_table );
   6.391 -
   6.392 -    maddr = (u64)mfn << PAGE_SHIFT;
   6.393 -
   6.394 -    iw = IOMMU_IO_WRITE_ENABLED;
   6.395 -    ir = IOMMU_IO_READ_ENABLED;
   6.396 -
   6.397 -    spin_lock_irqsave(&hd->mapping_lock, flags);
   6.398 -
   6.399 -    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   6.400 -    if ( pte == 0 )
   6.401 -    {
   6.402 -        dprintk(XENLOG_ERR,
   6.403 -                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
   6.404 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.405 -        return -EIO;
   6.406 -    }
   6.407 -
   6.408 -    set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
   6.409 -
   6.410 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.411 -    return 0;
   6.412 -}
   6.413 -
   6.414 -int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
   6.415 -{
   6.416 -    void *pte;
   6.417 -    unsigned long flags;
   6.418 -    u64 io_addr = gfn;
   6.419 -    int requestor_id;
   6.420 -    struct amd_iommu *iommu;
   6.421 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   6.422 -
   6.423 -    BUG_ON( !hd->root_table );
   6.424 -
   6.425 -    requestor_id = hd->domain_id;
   6.426 -    io_addr = (u64)gfn << PAGE_SHIFT;
   6.427 -
   6.428 -    spin_lock_irqsave(&hd->mapping_lock, flags);
   6.429 -
   6.430 -    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   6.431 -    if ( pte == 0 )
   6.432 -    {
   6.433 -        dprintk(XENLOG_ERR,
   6.434 -                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
   6.435 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.436 -        return -EIO;
   6.437 -    }
   6.438 -
   6.439 -    /* mark PTE as 'page not present' */
   6.440 -    clear_page_table_entry_present((u32 *)pte);
   6.441 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   6.442 -
   6.443 -    /* send INVALIDATE_IOMMU_PAGES command */
   6.444 -    for_each_amd_iommu(iommu)
   6.445 -    {
   6.446 -        spin_lock_irqsave(&iommu->lock, flags);
   6.447 -        invalidate_iommu_page(iommu, io_addr, requestor_id);
   6.448 -        flush_command_buffer(iommu);
   6.449 -        spin_unlock_irqrestore(&iommu->lock, flags);
   6.450 -    }
   6.451 -
   6.452 -    return 0;
   6.453 -}
     7.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c	Thu Feb 21 14:50:27 2008 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,578 +0,0 @@
     7.4 -/*
     7.5 - * Copyright (C) 2007 Advanced Micro Devices, Inc.
     7.6 - * Author: Leo Duran <leo.duran@amd.com>
     7.7 - * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     7.8 - *
     7.9 - * This program is free software; you can redistribute it and/or modify
    7.10 - * it under the terms of the GNU General Public License as published by
    7.11 - * the Free Software Foundation; either version 2 of the License, or
    7.12 - * (at your option) any later version.
    7.13 - *
    7.14 - * This program is distributed in the hope that it will be useful,
    7.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
    7.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    7.17 - * GNU General Public License for more details.
    7.18 - *
    7.19 - * You should have received a copy of the GNU General Public License
    7.20 - * along with this program; if not, write to the Free Software
    7.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    7.22 - */
    7.23 -
    7.24 -#include <asm/amd-iommu.h>
    7.25 -#include <asm/hvm/svm/amd-iommu-proto.h>
    7.26 -#include <xen/sched.h>
    7.27 -#include <asm/mm.h>
    7.28 -#include "pci-direct.h"
    7.29 -#include "pci_regs.h"
    7.30 -
    7.31 -struct list_head amd_iommu_head;
    7.32 -long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
    7.33 -static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
    7.34 -int nr_amd_iommus = 0;
    7.35 -
    7.36 -/* will set if amd-iommu HW is found */
    7.37 -int amd_iommu_enabled = 0;
    7.38 -
    7.39 -static int enable_amd_iommu = 0;
    7.40 -boolean_param("enable_amd_iommu", enable_amd_iommu);
    7.41 -
    7.42 -static void deallocate_domain_page_tables(struct hvm_iommu *hd)
    7.43 -{
    7.44 -    if ( hd->root_table )
    7.45 -        free_xenheap_page(hd->root_table);
    7.46 -}
    7.47 -
    7.48 -static void deallocate_domain_resources(struct hvm_iommu *hd)
    7.49 -{
    7.50 -    deallocate_domain_page_tables(hd);
    7.51 -}
    7.52 -
    7.53 -static void __init init_cleanup(void)
    7.54 -{
    7.55 -    struct amd_iommu *iommu;
    7.56 -
    7.57 -    for_each_amd_iommu ( iommu )
    7.58 -        unmap_iommu_mmio_region(iommu);
    7.59 -}
    7.60 -
    7.61 -static void __init deallocate_iommu_table_struct(
    7.62 -    struct table_struct *table)
    7.63 -{
    7.64 -    if ( table->buffer )
    7.65 -    {
    7.66 -        free_xenheap_pages(table->buffer,
    7.67 -                           get_order_from_bytes(table->alloc_size));
    7.68 -        table->buffer = NULL;
    7.69 -    }
    7.70 -}
    7.71 -
    7.72 -static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
    7.73 -{
    7.74 -    deallocate_iommu_table_struct(&iommu->dev_table);
    7.75 -    deallocate_iommu_table_struct(&iommu->cmd_buffer);;
    7.76 -}
    7.77 -
    7.78 -static void __init detect_cleanup(void)
    7.79 -{
    7.80 -    struct amd_iommu *iommu, *next;
    7.81 -
    7.82 -    list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
    7.83 -    {
    7.84 -        list_del(&iommu->list);
    7.85 -        deallocate_iommu_resources(iommu);
    7.86 -        xfree(iommu);
    7.87 -    }
    7.88 -}
    7.89 -
    7.90 -static int requestor_id_from_bdf(int bdf)
    7.91 -{
    7.92 -    /* HACK - HACK */
    7.93 -    /* account for possible 'aliasing' by parent device */
    7.94 -    return bdf;
    7.95 -}
    7.96 -
    7.97 -static int __init allocate_iommu_table_struct(struct table_struct *table,
    7.98 -                                              const char *name)
    7.99 -{
   7.100 -    table->buffer = (void *) alloc_xenheap_pages(
   7.101 -        get_order_from_bytes(table->alloc_size));
   7.102 -
   7.103 -    if ( !table->buffer )
   7.104 -    {
   7.105 -        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
   7.106 -        return -ENOMEM;
   7.107 -    }
   7.108 -
   7.109 -    memset(table->buffer, 0, table->alloc_size);
   7.110 -
   7.111 -    return 0;
   7.112 -}
   7.113 -
   7.114 -static int __init allocate_iommu_resources(struct amd_iommu *iommu)
   7.115 -{
   7.116 -    /* allocate 'device table' on a 4K boundary */
   7.117 -    iommu->dev_table.alloc_size =
   7.118 -        PAGE_ALIGN(((iommu->last_downstream_bus + 1) *
   7.119 -                    IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
   7.120 -                   IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.121 -    iommu->dev_table.entries =
   7.122 -        iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
   7.123 -
   7.124 -    if ( allocate_iommu_table_struct(&iommu->dev_table,
   7.125 -                                     "Device Table") != 0 )
   7.126 -        goto error_out;
   7.127 -
   7.128 -    /* allocate 'command buffer' in power of 2 increments of 4K */
   7.129 -    iommu->cmd_buffer_tail = 0;
   7.130 -    iommu->cmd_buffer.alloc_size =
   7.131 -        PAGE_SIZE << get_order_from_bytes(
   7.132 -            PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
   7.133 -                       IOMMU_CMD_BUFFER_ENTRY_SIZE));
   7.134 -
   7.135 -    iommu->cmd_buffer.entries =
   7.136 -        iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
   7.137 -
   7.138 -    if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
   7.139 -                                     "Command Buffer") != 0 )
   7.140 -        goto error_out;
   7.141 -
   7.142 -    return 0;
   7.143 -
   7.144 - error_out:
   7.145 -    deallocate_iommu_resources(iommu);
   7.146 -    return -ENOMEM;
   7.147 -}
   7.148 -
   7.149 -int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
   7.150 -{
   7.151 -    struct amd_iommu *iommu;
   7.152 -
   7.153 -    iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
   7.154 -    if ( !iommu )
   7.155 -    {
   7.156 -        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
   7.157 -        return -ENOMEM;
   7.158 -    }
   7.159 -    memset(iommu, 0, sizeof(struct amd_iommu));
   7.160 -    spin_lock_init(&iommu->lock);
   7.161 -
   7.162 -    /* get capability and topology information */
   7.163 -    if ( get_iommu_capabilities(bus, dev, func, cap_ptr, iommu) != 0 )
   7.164 -        goto error_out;
   7.165 -    if ( get_iommu_last_downstream_bus(iommu) != 0 )
   7.166 -        goto error_out;
   7.167 -
   7.168 -    list_add_tail(&iommu->list, &amd_iommu_head);
   7.169 -
   7.170 -    /* allocate resources for this IOMMU */
   7.171 -    if (allocate_iommu_resources(iommu) != 0)
   7.172 -        goto error_out;
   7.173 -
   7.174 -    return 0;
   7.175 -
   7.176 - error_out:
   7.177 -    xfree(iommu);
   7.178 -    return -ENODEV;
   7.179 -}
   7.180 -
   7.181 -static int __init amd_iommu_init(void)
   7.182 -{
   7.183 -    struct amd_iommu *iommu;
   7.184 -    unsigned long flags;
   7.185 -
   7.186 -    for_each_amd_iommu ( iommu )
   7.187 -    {
   7.188 -        spin_lock_irqsave(&iommu->lock, flags);
   7.189 -
   7.190 -        /* register IOMMU data strucures in MMIO space */
   7.191 -        if ( map_iommu_mmio_region(iommu) != 0 )
   7.192 -            goto error_out;
   7.193 -        register_iommu_dev_table_in_mmio_space(iommu);
   7.194 -        register_iommu_cmd_buffer_in_mmio_space(iommu);
   7.195 -
   7.196 -        /* enable IOMMU translation services */
   7.197 -        enable_iommu(iommu);
   7.198 -        nr_amd_iommus++;
   7.199 -
   7.200 -        spin_unlock_irqrestore(&iommu->lock, flags);
   7.201 -    }
   7.202 -
   7.203 -    amd_iommu_enabled = 1;
   7.204 -
   7.205 -    return 0;
   7.206 -
   7.207 - error_out:
   7.208 -    init_cleanup();
   7.209 -    return -ENODEV;
   7.210 -}
   7.211 -
   7.212 -struct amd_iommu *find_iommu_for_device(int bus, int devfn)
   7.213 -{
   7.214 -    struct amd_iommu *iommu;
   7.215 -
   7.216 -    for_each_amd_iommu ( iommu )
   7.217 -    {
   7.218 -        if ( bus == iommu->root_bus )
   7.219 -        {
   7.220 -            if ( (devfn >= iommu->first_devfn) &&
   7.221 -                 (devfn <= iommu->last_devfn) )
   7.222 -                return iommu;
   7.223 -        }
   7.224 -        else if ( bus <= iommu->last_downstream_bus )
   7.225 -        {
   7.226 -            if ( iommu->downstream_bus_present[bus] )
   7.227 -                return iommu;
   7.228 -        }
   7.229 -    }
   7.230 -
   7.231 -    return NULL;
   7.232 -}
   7.233 -
   7.234 -void amd_iommu_setup_domain_device(
   7.235 -    struct domain *domain, struct amd_iommu *iommu, int requestor_id)
   7.236 -{
   7.237 -    void *dte;
   7.238 -    u64 root_ptr;
   7.239 -    unsigned long flags;
   7.240 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   7.241 -
   7.242 -    BUG_ON( !hd->root_table||!hd->paging_mode );
   7.243 -
   7.244 -    root_ptr = (u64)virt_to_maddr(hd->root_table);
   7.245 -    dte = iommu->dev_table.buffer +
   7.246 -        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.247 -
   7.248 -    if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
   7.249 -    {
   7.250 -        spin_lock_irqsave(&iommu->lock, flags); 
   7.251 -
   7.252 -        amd_iommu_set_dev_table_entry(
   7.253 -            (u32 *)dte,
   7.254 -            root_ptr, hd->domain_id, hd->paging_mode);
   7.255 -        invalidate_dev_table_entry(iommu, requestor_id);
   7.256 -        flush_command_buffer(iommu);
   7.257 -        dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
   7.258 -                "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
   7.259 -                requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
   7.260 -
   7.261 -        spin_unlock_irqrestore(&iommu->lock, flags);
   7.262 -    }
   7.263 -}
   7.264 -
   7.265 -void __init amd_iommu_setup_dom0_devices(void)
   7.266 -{
   7.267 -    struct hvm_iommu *hd = domain_hvm_iommu(dom0);
   7.268 -    struct amd_iommu *iommu;
   7.269 -    struct pci_dev *pdev;
   7.270 -    int bus, dev, func;
   7.271 -    u32 l;
   7.272 -    int req_id, bdf;
   7.273 -
   7.274 -    for ( bus = 0; bus < 256; bus++ )
   7.275 -    {
   7.276 -        for ( dev = 0; dev < 32; dev++ )
   7.277 -        {
   7.278 -            for ( func = 0; func < 8; func++ )
   7.279 -            {
   7.280 -                l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
   7.281 -                /* some broken boards return 0 or ~0 if a slot is empty: */
   7.282 -                if ( l == 0xffffffff || l == 0x00000000 ||
   7.283 -                     l == 0x0000ffff || l == 0xffff0000 )
   7.284 -                    continue;
   7.285 -
   7.286 -                pdev = xmalloc(struct pci_dev);
   7.287 -                pdev->bus = bus;
   7.288 -                pdev->devfn = PCI_DEVFN(dev, func);
   7.289 -                list_add_tail(&pdev->list, &hd->pdev_list);
   7.290 -
   7.291 -                bdf = (bus << 8) | pdev->devfn;
   7.292 -                req_id = requestor_id_from_bdf(bdf);
   7.293 -                iommu = find_iommu_for_device(bus, pdev->devfn);
   7.294 -
   7.295 -                if ( iommu )
   7.296 -                    amd_iommu_setup_domain_device(dom0, iommu, req_id);
   7.297 -            }
   7.298 -        }
   7.299 -    }
   7.300 -}
   7.301 -
   7.302 -int amd_iommu_detect(void)
   7.303 -{
   7.304 -    unsigned long i;
   7.305 -
   7.306 -    if ( !enable_amd_iommu )
   7.307 -    {
   7.308 -        printk("AMD IOMMU: Disabled\n");
   7.309 -        return 0;
   7.310 -    }
   7.311 -
   7.312 -    INIT_LIST_HEAD(&amd_iommu_head);
   7.313 -
   7.314 -    if ( scan_for_iommu(iommu_detect_callback) != 0 )
   7.315 -    {
   7.316 -        dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
   7.317 -        goto error_out;
   7.318 -    }
   7.319 -
   7.320 -    if ( !iommu_found() )
   7.321 -    {
   7.322 -        printk("AMD IOMMU: Not found!\n");
   7.323 -        return 0;
   7.324 -    }
   7.325 -
   7.326 -    if ( amd_iommu_init() != 0 )
   7.327 -    {
   7.328 -        dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
   7.329 -        goto error_out;
   7.330 -    }
   7.331 -
   7.332 -    if ( iommu_domain_init(dom0) != 0 )
   7.333 -        goto error_out;
   7.334 -
   7.335 -    /* setup 1:1 page table for dom0 */
   7.336 -    for ( i = 0; i < max_page; i++ )
   7.337 -        amd_iommu_map_page(dom0, i, i);
   7.338 -
   7.339 -    amd_iommu_setup_dom0_devices();
   7.340 -    return 0;
   7.341 -
   7.342 - error_out:
   7.343 -    detect_cleanup();
   7.344 -    return -ENODEV;
   7.345 -
   7.346 -}
   7.347 -
   7.348 -static int allocate_domain_resources(struct hvm_iommu *hd)
   7.349 -{
   7.350 -    /* allocate root table */
   7.351 -    unsigned long flags;
   7.352 -
   7.353 -    spin_lock_irqsave(&hd->mapping_lock, flags);
   7.354 -    if ( !hd->root_table )
   7.355 -    {
   7.356 -        hd->root_table = (void *)alloc_xenheap_page();
   7.357 -        if ( !hd->root_table )
   7.358 -            goto error_out;
   7.359 -        memset((u8*)hd->root_table, 0, PAGE_SIZE);
   7.360 -    }
   7.361 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   7.362 -
   7.363 -    return 0;
   7.364 - error_out:
   7.365 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   7.366 -    return -ENOMEM;
   7.367 -}
   7.368 -
   7.369 -static int get_paging_mode(unsigned long entries)
   7.370 -{
   7.371 -    int level = 1;
   7.372 -
   7.373 -    BUG_ON ( !max_page );
   7.374 -
   7.375 -    if ( entries > max_page )
   7.376 -        entries = max_page;
   7.377 -
   7.378 -    while ( entries > PTE_PER_TABLE_SIZE )
   7.379 -    {
   7.380 -        entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
   7.381 -        ++level;
   7.382 -        if ( level > 6 )
   7.383 -            return -ENOMEM;
   7.384 -    }
   7.385 -
   7.386 -    dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level);
   7.387 -
   7.388 -    return level;
   7.389 -}
   7.390 -
   7.391 -int amd_iommu_domain_init(struct domain *domain)
   7.392 -{
   7.393 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   7.394 -
   7.395 -    /* allocate page directroy */
   7.396 -    if ( allocate_domain_resources(hd) != 0 )
   7.397 -    {
   7.398 -        deallocate_domain_resources(hd);
   7.399 -        return -ENOMEM;
   7.400 -    }
   7.401 -
   7.402 -    if ( is_hvm_domain(domain) )
   7.403 -        hd->paging_mode = IOMMU_PAGE_TABLE_LEVEL_4;
   7.404 -    else
   7.405 -        hd->paging_mode = get_paging_mode(max_page);
   7.406 -
   7.407 -    hd->domain_id = domain->domain_id;
   7.408 -
   7.409 -    return 0;
   7.410 -}
   7.411 -
   7.412 -static void amd_iommu_disable_domain_device(
   7.413 -    struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
   7.414 -{
   7.415 -    void *dte;
   7.416 -    unsigned long flags;
   7.417 -
   7.418 -    dte = iommu->dev_table.buffer +
   7.419 -        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.420 -
   7.421 -    if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
   7.422 -    {
   7.423 -        spin_lock_irqsave(&iommu->lock, flags); 
   7.424 -        memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
   7.425 -        invalidate_dev_table_entry(iommu, requestor_id);
   7.426 -        flush_command_buffer(iommu);
   7.427 -        dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
   7.428 -                " domain_id:%d, paging_mode:%d\n",
   7.429 -                requestor_id,  domain_hvm_iommu(domain)->domain_id,
   7.430 -                domain_hvm_iommu(domain)->paging_mode);
   7.431 -        spin_unlock_irqrestore(&iommu->lock, flags);
   7.432 -    }
   7.433 -}
   7.434 -
   7.435 -extern void pdev_flr(u8 bus, u8 devfn);
   7.436 -
   7.437 -static int reassign_device( struct domain *source, struct domain *target,
   7.438 -                            u8 bus, u8 devfn)
   7.439 -{
   7.440 -    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
   7.441 -    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
   7.442 -    struct pci_dev *pdev;
   7.443 -    struct amd_iommu *iommu;
   7.444 -    int req_id, bdf;
   7.445 -    unsigned long flags;
   7.446 -
   7.447 -    for_each_pdev( source, pdev )
   7.448 -    {
   7.449 -        if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
   7.450 -            continue;
   7.451 -
   7.452 -        pdev->bus = bus;
   7.453 -        pdev->devfn = devfn;
   7.454 -
   7.455 -        bdf = (bus << 8) | devfn;
   7.456 -        req_id = requestor_id_from_bdf(bdf);
   7.457 -        iommu = find_iommu_for_device(bus, devfn);
   7.458 -
   7.459 -        if ( iommu )
   7.460 -        {
   7.461 -            amd_iommu_disable_domain_device(source, iommu, req_id);
   7.462 -            /* Move pci device from the source domain to target domain. */
   7.463 -            spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
   7.464 -            spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
   7.465 -            list_move(&pdev->list, &target_hd->pdev_list);
   7.466 -            spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
   7.467 -            spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
   7.468 -
   7.469 -            amd_iommu_setup_domain_device(target, iommu, req_id);
   7.470 -            gdprintk(XENLOG_INFO ,
   7.471 -                     "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
   7.472 -                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
   7.473 -                     source->domain_id, target->domain_id);
   7.474 -        }
   7.475 -        else
   7.476 -        {
   7.477 -            gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
   7.478 -                     " %x:%x.%x cannot be assigned to domain %d\n", 
   7.479 -                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
   7.480 -            return -ENODEV;
   7.481 -        }
   7.482 -
   7.483 -        break;
   7.484 -    }
   7.485 -    return 0;
   7.486 -}
   7.487 -
   7.488 -int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
   7.489 -{
   7.490 -    pdev_flr(bus, devfn);
   7.491 -    return reassign_device(dom0, d, bus, devfn);
   7.492 -}
   7.493 -
   7.494 -static void release_domain_devices(struct domain *d)
   7.495 -{
   7.496 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
   7.497 -    struct pci_dev *pdev;
   7.498 -
   7.499 -    while ( !list_empty(&hd->pdev_list) )
   7.500 -    {
   7.501 -        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
   7.502 -        pdev_flr(pdev->bus, pdev->devfn);
   7.503 -        gdprintk(XENLOG_INFO ,
   7.504 -                 "AMD IOMMU: release devices %x:%x.%x\n",
   7.505 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
   7.506 -        reassign_device(d, dom0, pdev->bus, pdev->devfn);
   7.507 -    }
   7.508 -}
   7.509 -
   7.510 -static void deallocate_next_page_table(void *table, unsigned long index,
   7.511 -                                       int level)
   7.512 -{
   7.513 -    unsigned long next_index;
   7.514 -    void *next_table, *pde;
   7.515 -    int next_level;
   7.516 -
   7.517 -    pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   7.518 -    next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
   7.519 -
   7.520 -    if ( next_table )
   7.521 -    {
   7.522 -        next_level = level - 1;
   7.523 -        if ( next_level > 1 )
   7.524 -        {
   7.525 -            next_index = 0;
   7.526 -            do
   7.527 -            {
   7.528 -                deallocate_next_page_table(next_table,
   7.529 -                                           next_index, next_level);
   7.530 -                ++next_index;
   7.531 -            } while (next_index < PTE_PER_TABLE_SIZE);
   7.532 -        }
   7.533 -
   7.534 -        free_xenheap_page(next_table);
   7.535 -    }
   7.536 -}
   7.537 -
   7.538 -static void deallocate_iommu_page_tables(struct domain *d)
   7.539 -{
   7.540 -    unsigned long index;
   7.541 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
   7.542 -
   7.543 -    if ( hd ->root_table )
   7.544 -    {
   7.545 -        index = 0;
   7.546 -        do
   7.547 -        {
   7.548 -            deallocate_next_page_table(hd->root_table,
   7.549 -                                       index, hd->paging_mode);
   7.550 -            ++index;
   7.551 -        } while ( index < PTE_PER_TABLE_SIZE );
   7.552 -
   7.553 -        free_xenheap_page(hd ->root_table);
   7.554 -    }
   7.555 -
   7.556 -    hd ->root_table = NULL;
   7.557 -}
   7.558 -
   7.559 -void amd_iommu_domain_destroy(struct domain *d)
   7.560 -{
   7.561 -    if ( !amd_iommu_enabled )
   7.562 -        return;
   7.563 -
   7.564 -    deallocate_iommu_page_tables(d);
   7.565 -    release_domain_devices(d);
   7.566 -}
   7.567 -
   7.568 -void amd_iommu_return_device(struct domain *s, struct domain *t, u8 bus, u8 devfn)
   7.569 -{
   7.570 -    pdev_flr(bus, devfn);
   7.571 -    reassign_device(s, t, bus, devfn);
   7.572 -}
   7.573 -
   7.574 -struct iommu_ops amd_iommu_ops = {
   7.575 -    .init = amd_iommu_domain_init,
   7.576 -    .assign_device  = amd_iommu_assign_device,
   7.577 -    .teardown = amd_iommu_domain_destroy,
   7.578 -    .map_page = amd_iommu_map_page,
   7.579 -    .unmap_page = amd_iommu_unmap_page,
   7.580 -    .reassign_device = amd_iommu_return_device,
   7.581 -};
     8.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/pci-direct.h	Thu Feb 21 14:50:27 2008 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,48 +0,0 @@
     8.4 -#ifndef ASM_PCI_DIRECT_H
     8.5 -#define ASM_PCI_DIRECT_H 1
     8.6 -
     8.7 -#include <xen/types.h>
     8.8 -#include <asm/io.h>
     8.9 -
    8.10 -/* Direct PCI access. This is used for PCI accesses in early boot before
    8.11 -   the PCI subsystem works. */ 
    8.12 -
    8.13 -#define PDprintk(x...)
    8.14 -
    8.15 -static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
    8.16 -{
    8.17 -    u32 v; 
    8.18 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    8.19 -    v = inl(0xcfc); 
    8.20 -    if (v != 0xffffffff)
    8.21 -        PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
    8.22 -    return v;
    8.23 -}
    8.24 -
    8.25 -static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
    8.26 -{
    8.27 -    u8 v; 
    8.28 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    8.29 -    v = inb(0xcfc + (offset&3)); 
    8.30 -    PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
    8.31 -    return v;
    8.32 -}
    8.33 -
    8.34 -static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
    8.35 -{
    8.36 -    u16 v; 
    8.37 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    8.38 -    v = inw(0xcfc + (offset&2)); 
    8.39 -    PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
    8.40 -    return v;
    8.41 -}
    8.42 -
    8.43 -static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
    8.44 -                    u32 val)
    8.45 -{
    8.46 -    PDprintk("%x writing to %x: %x\n", slot, offset, val); 
    8.47 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    8.48 -    outl(val, 0xcfc); 
    8.49 -}
    8.50 -
    8.51 -#endif
     9.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/pci_regs.h	Thu Feb 21 14:50:27 2008 +0000
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,513 +0,0 @@
     9.4 -/*
     9.5 - *	pci_regs.h
     9.6 - *
     9.7 - *	PCI standard defines
     9.8 - *	Copyright 1994, Drew Eckhardt
     9.9 - *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
    9.10 - *
    9.11 - *	For more information, please consult the following manuals (look at
    9.12 - *	http://www.pcisig.com/ for how to get them):
    9.13 - *
    9.14 - *	PCI BIOS Specification
    9.15 - *	PCI Local Bus Specification
    9.16 - *	PCI to PCI Bridge Specification
    9.17 - *	PCI System Design Guide
    9.18 - *
    9.19 - * 	For hypertransport information, please consult the following manuals
    9.20 - * 	from http://www.hypertransport.org
    9.21 - *
    9.22 - *	The Hypertransport I/O Link Specification
    9.23 - */
    9.24 -
    9.25 -#ifndef LINUX_PCI_REGS_H
    9.26 -#define LINUX_PCI_REGS_H
    9.27 -
    9.28 -/*
    9.29 - * Under PCI, each device has 256 bytes of configuration address space,
    9.30 - * of which the first 64 bytes are standardized as follows:
    9.31 - */
    9.32 -#define PCI_VENDOR_ID		0x00	/* 16 bits */
    9.33 -#define PCI_DEVICE_ID		0x02	/* 16 bits */
    9.34 -#define PCI_COMMAND		0x04	/* 16 bits */
    9.35 -#define  PCI_COMMAND_IO		0x1	/* Enable response in I/O space */
    9.36 -#define  PCI_COMMAND_MEMORY	0x2	/* Enable response in Memory space */
    9.37 -#define  PCI_COMMAND_MASTER	0x4	/* Enable bus mastering */
    9.38 -#define  PCI_COMMAND_SPECIAL	0x8	/* Enable response to special cycles */
    9.39 -#define  PCI_COMMAND_INVALIDATE	0x10	/* Use memory write and invalidate */
    9.40 -#define  PCI_COMMAND_VGA_PALETTE 0x20	/* Enable palette snooping */
    9.41 -#define  PCI_COMMAND_PARITY	0x40	/* Enable parity checking */
    9.42 -#define  PCI_COMMAND_WAIT 	0x80	/* Enable address/data stepping */
    9.43 -#define  PCI_COMMAND_SERR	0x100	/* Enable SERR */
    9.44 -#define  PCI_COMMAND_FAST_BACK	0x200	/* Enable back-to-back writes */
    9.45 -#define  PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
    9.46 -
    9.47 -#define PCI_STATUS		0x06	/* 16 bits */
    9.48 -#define  PCI_STATUS_CAP_LIST	0x10	/* Support Capability List */
    9.49 -#define  PCI_STATUS_66MHZ	0x20	/* Support 66 Mhz PCI 2.1 bus */
    9.50 -#define  PCI_STATUS_UDF		0x40	/* Support User Definable Features [obsolete] */
    9.51 -#define  PCI_STATUS_FAST_BACK	0x80	/* Accept fast-back to back */
    9.52 -#define  PCI_STATUS_PARITY	0x100	/* Detected parity error */
    9.53 -#define  PCI_STATUS_DEVSEL_MASK	0x600	/* DEVSEL timing */
    9.54 -#define  PCI_STATUS_DEVSEL_FAST		0x000
    9.55 -#define  PCI_STATUS_DEVSEL_MEDIUM	0x200
    9.56 -#define  PCI_STATUS_DEVSEL_SLOW		0x400
    9.57 -#define  PCI_STATUS_SIG_TARGET_ABORT	0x800 /* Set on target abort */
    9.58 -#define  PCI_STATUS_REC_TARGET_ABORT	0x1000 /* Master ack of " */
    9.59 -#define  PCI_STATUS_REC_MASTER_ABORT	0x2000 /* Set on master abort */
    9.60 -#define  PCI_STATUS_SIG_SYSTEM_ERROR	0x4000 /* Set when we drive SERR */
    9.61 -#define  PCI_STATUS_DETECTED_PARITY	0x8000 /* Set on parity error */
    9.62 -
    9.63 -#define PCI_CLASS_REVISION	0x08	/* High 24 bits are class, low 8 revision */
    9.64 -#define PCI_REVISION_ID		0x08	/* Revision ID */
    9.65 -#define PCI_CLASS_PROG		0x09	/* Reg. Level Programming Interface */
    9.66 -#define PCI_CLASS_DEVICE	0x0a	/* Device class */
    9.67 -
    9.68 -#define PCI_CACHE_LINE_SIZE	0x0c	/* 8 bits */
    9.69 -#define PCI_LATENCY_TIMER	0x0d	/* 8 bits */
    9.70 -#define PCI_HEADER_TYPE		0x0e	/* 8 bits */
    9.71 -#define  PCI_HEADER_TYPE_NORMAL		0
    9.72 -#define  PCI_HEADER_TYPE_BRIDGE		1
    9.73 -#define  PCI_HEADER_TYPE_CARDBUS	2
    9.74 -
    9.75 -#define PCI_BIST		0x0f	/* 8 bits */
    9.76 -#define  PCI_BIST_CODE_MASK	0x0f	/* Return result */
    9.77 -#define  PCI_BIST_START		0x40	/* 1 to start BIST, 2 secs or less */
    9.78 -#define  PCI_BIST_CAPABLE	0x80	/* 1 if BIST capable */
    9.79 -
    9.80 -/*
    9.81 - * Base addresses specify locations in memory or I/O space.
    9.82 - * Decoded size can be determined by writing a value of
    9.83 - * 0xffffffff to the register, and reading it back.  Only
    9.84 - * 1 bits are decoded.
    9.85 - */
    9.86 -#define PCI_BASE_ADDRESS_0	0x10	/* 32 bits */
    9.87 -#define PCI_BASE_ADDRESS_1	0x14	/* 32 bits [htype 0,1 only] */
    9.88 -#define PCI_BASE_ADDRESS_2	0x18	/* 32 bits [htype 0 only] */
    9.89 -#define PCI_BASE_ADDRESS_3	0x1c	/* 32 bits */
    9.90 -#define PCI_BASE_ADDRESS_4	0x20	/* 32 bits */
    9.91 -#define PCI_BASE_ADDRESS_5	0x24	/* 32 bits */
    9.92 -#define  PCI_BASE_ADDRESS_SPACE		0x01	/* 0 = memory, 1 = I/O */
    9.93 -#define  PCI_BASE_ADDRESS_SPACE_IO	0x01
    9.94 -#define  PCI_BASE_ADDRESS_SPACE_MEMORY	0x00
    9.95 -#define  PCI_BASE_ADDRESS_MEM_TYPE_MASK	0x06
    9.96 -#define  PCI_BASE_ADDRESS_MEM_TYPE_32	0x00	/* 32 bit address */
    9.97 -#define  PCI_BASE_ADDRESS_MEM_TYPE_1M	0x02	/* Below 1M [obsolete] */
    9.98 -#define  PCI_BASE_ADDRESS_MEM_TYPE_64	0x04	/* 64 bit address */
    9.99 -#define  PCI_BASE_ADDRESS_MEM_PREFETCH	0x08	/* prefetchable? */
   9.100 -#define  PCI_BASE_ADDRESS_MEM_MASK	(~0x0fUL)
   9.101 -#define  PCI_BASE_ADDRESS_IO_MASK	(~0x03UL)
   9.102 -/* bit 1 is reserved if address_space = 1 */
   9.103 -
   9.104 -/* Header type 0 (normal devices) */
   9.105 -#define PCI_CARDBUS_CIS		0x28
   9.106 -#define PCI_SUBSYSTEM_VENDOR_ID	0x2c
   9.107 -#define PCI_SUBSYSTEM_ID	0x2e
   9.108 -#define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
   9.109 -#define  PCI_ROM_ADDRESS_ENABLE	0x01
   9.110 -#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
   9.111 -
   9.112 -#define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
   9.113 -
   9.114 -/* 0x35-0x3b are reserved */
   9.115 -#define PCI_INTERRUPT_LINE	0x3c	/* 8 bits */
   9.116 -#define PCI_INTERRUPT_PIN	0x3d	/* 8 bits */
   9.117 -#define PCI_MIN_GNT		0x3e	/* 8 bits */
   9.118 -#define PCI_MAX_LAT		0x3f	/* 8 bits */
   9.119 -
   9.120 -/* Header type 1 (PCI-to-PCI bridges) */
   9.121 -#define PCI_PRIMARY_BUS		0x18	/* Primary bus number */
   9.122 -#define PCI_SECONDARY_BUS	0x19	/* Secondary bus number */
   9.123 -#define PCI_SUBORDINATE_BUS	0x1a	/* Highest bus number behind the bridge */
   9.124 -#define PCI_SEC_LATENCY_TIMER	0x1b	/* Latency timer for secondary interface */
   9.125 -#define PCI_IO_BASE		0x1c	/* I/O range behind the bridge */
   9.126 -#define PCI_IO_LIMIT		0x1d
   9.127 -#define  PCI_IO_RANGE_TYPE_MASK	0x0fUL	/* I/O bridging type */
   9.128 -#define  PCI_IO_RANGE_TYPE_16	0x00
   9.129 -#define  PCI_IO_RANGE_TYPE_32	0x01
   9.130 -#define  PCI_IO_RANGE_MASK	(~0x0fUL)
   9.131 -#define PCI_SEC_STATUS		0x1e	/* Secondary status register, only bit 14 used */
   9.132 -#define PCI_MEMORY_BASE		0x20	/* Memory range behind */
   9.133 -#define PCI_MEMORY_LIMIT	0x22
   9.134 -#define  PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
   9.135 -#define  PCI_MEMORY_RANGE_MASK	(~0x0fUL)
   9.136 -#define PCI_PREF_MEMORY_BASE	0x24	/* Prefetchable memory range behind */
   9.137 -#define PCI_PREF_MEMORY_LIMIT	0x26
   9.138 -#define  PCI_PREF_RANGE_TYPE_MASK 0x0fUL
   9.139 -#define  PCI_PREF_RANGE_TYPE_32	0x00
   9.140 -#define  PCI_PREF_RANGE_TYPE_64	0x01
   9.141 -#define  PCI_PREF_RANGE_MASK	(~0x0fUL)
   9.142 -#define PCI_PREF_BASE_UPPER32	0x28	/* Upper half of prefetchable memory range */
   9.143 -#define PCI_PREF_LIMIT_UPPER32	0x2c
   9.144 -#define PCI_IO_BASE_UPPER16	0x30	/* Upper half of I/O addresses */
   9.145 -#define PCI_IO_LIMIT_UPPER16	0x32
   9.146 -/* 0x34 same as for htype 0 */
   9.147 -/* 0x35-0x3b is reserved */
   9.148 -#define PCI_ROM_ADDRESS1	0x38	/* Same as PCI_ROM_ADDRESS, but for htype 1 */
   9.149 -/* 0x3c-0x3d are same as for htype 0 */
   9.150 -#define PCI_BRIDGE_CONTROL	0x3e
   9.151 -#define  PCI_BRIDGE_CTL_PARITY	0x01	/* Enable parity detection on secondary interface */
   9.152 -#define  PCI_BRIDGE_CTL_SERR	0x02	/* The same for SERR forwarding */
   9.153 -#define  PCI_BRIDGE_CTL_NO_ISA	0x04	/* Disable bridging of ISA ports */
   9.154 -#define  PCI_BRIDGE_CTL_VGA	0x08	/* Forward VGA addresses */
   9.155 -#define  PCI_BRIDGE_CTL_MASTER_ABORT	0x20  /* Report master aborts */
   9.156 -#define  PCI_BRIDGE_CTL_BUS_RESET	0x40	/* Secondary bus reset */
   9.157 -#define  PCI_BRIDGE_CTL_FAST_BACK	0x80	/* Fast Back2Back enabled on secondary interface */
   9.158 -
   9.159 -/* Header type 2 (CardBus bridges) */
   9.160 -#define PCI_CB_CAPABILITY_LIST	0x14
   9.161 -/* 0x15 reserved */
   9.162 -#define PCI_CB_SEC_STATUS	0x16	/* Secondary status */
   9.163 -#define PCI_CB_PRIMARY_BUS	0x18	/* PCI bus number */
   9.164 -#define PCI_CB_CARD_BUS		0x19	/* CardBus bus number */
   9.165 -#define PCI_CB_SUBORDINATE_BUS	0x1a	/* Subordinate bus number */
   9.166 -#define PCI_CB_LATENCY_TIMER	0x1b	/* CardBus latency timer */
   9.167 -#define PCI_CB_MEMORY_BASE_0	0x1c
   9.168 -#define PCI_CB_MEMORY_LIMIT_0	0x20
   9.169 -#define PCI_CB_MEMORY_BASE_1	0x24
   9.170 -#define PCI_CB_MEMORY_LIMIT_1	0x28
   9.171 -#define PCI_CB_IO_BASE_0	0x2c
   9.172 -#define PCI_CB_IO_BASE_0_HI	0x2e
   9.173 -#define PCI_CB_IO_LIMIT_0	0x30
   9.174 -#define PCI_CB_IO_LIMIT_0_HI	0x32
   9.175 -#define PCI_CB_IO_BASE_1	0x34
   9.176 -#define PCI_CB_IO_BASE_1_HI	0x36
   9.177 -#define PCI_CB_IO_LIMIT_1	0x38
   9.178 -#define PCI_CB_IO_LIMIT_1_HI	0x3a
   9.179 -#define  PCI_CB_IO_RANGE_MASK	(~0x03UL)
   9.180 -/* 0x3c-0x3d are same as for htype 0 */
   9.181 -#define PCI_CB_BRIDGE_CONTROL	0x3e
   9.182 -#define  PCI_CB_BRIDGE_CTL_PARITY	0x01	/* Similar to standard bridge control register */
   9.183 -#define  PCI_CB_BRIDGE_CTL_SERR		0x02
   9.184 -#define  PCI_CB_BRIDGE_CTL_ISA		0x04
   9.185 -#define  PCI_CB_BRIDGE_CTL_VGA		0x08
   9.186 -#define  PCI_CB_BRIDGE_CTL_MASTER_ABORT	0x20
   9.187 -#define  PCI_CB_BRIDGE_CTL_CB_RESET	0x40	/* CardBus reset */
   9.188 -#define  PCI_CB_BRIDGE_CTL_16BIT_INT	0x80	/* Enable interrupt for 16-bit cards */
   9.189 -#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100	/* Prefetch enable for both memory regions */
   9.190 -#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
   9.191 -#define  PCI_CB_BRIDGE_CTL_POST_WRITES	0x400
   9.192 -#define PCI_CB_SUBSYSTEM_VENDOR_ID	0x40
   9.193 -#define PCI_CB_SUBSYSTEM_ID		0x42
   9.194 -#define PCI_CB_LEGACY_MODE_BASE		0x44	/* 16-bit PC Card legacy mode base address (ExCa) */
   9.195 -/* 0x48-0x7f reserved */
   9.196 -
   9.197 -/* Capability lists */
   9.198 -
   9.199 -#define PCI_CAP_LIST_ID		0	/* Capability ID */
   9.200 -#define  PCI_CAP_ID_PM		0x01	/* Power Management */
   9.201 -#define  PCI_CAP_ID_AGP		0x02	/* Accelerated Graphics Port */
   9.202 -#define  PCI_CAP_ID_VPD		0x03	/* Vital Product Data */
   9.203 -#define  PCI_CAP_ID_SLOTID	0x04	/* Slot Identification */
   9.204 -#define  PCI_CAP_ID_MSI		0x05	/* Message Signalled Interrupts */
   9.205 -#define  PCI_CAP_ID_CHSWP	0x06	/* CompactPCI HotSwap */
   9.206 -#define  PCI_CAP_ID_PCIX	0x07	/* PCI-X */
   9.207 -#define  PCI_CAP_ID_HT		0x08	/* HyperTransport */
   9.208 -#define  PCI_CAP_ID_VNDR	0x09	/* Vendor specific capability */
   9.209 -#define  PCI_CAP_ID_SHPC 	0x0C	/* PCI Standard Hot-Plug Controller */
   9.210 -#define  PCI_CAP_ID_EXP 	0x10	/* PCI Express */
   9.211 -#define  PCI_CAP_ID_MSIX	0x11	/* MSI-X */
   9.212 -#define PCI_CAP_LIST_NEXT	1	/* Next capability in the list */
   9.213 -#define PCI_CAP_FLAGS		2	/* Capability defined flags (16 bits) */
   9.214 -#define PCI_CAP_SIZEOF		4
   9.215 -
   9.216 -/* Power Management Registers */
   9.217 -
   9.218 -#define PCI_PM_PMC		2	/* PM Capabilities Register */
   9.219 -#define  PCI_PM_CAP_VER_MASK	0x0007	/* Version */
   9.220 -#define  PCI_PM_CAP_PME_CLOCK	0x0008	/* PME clock required */
   9.221 -#define  PCI_PM_CAP_RESERVED    0x0010  /* Reserved field */
   9.222 -#define  PCI_PM_CAP_DSI		0x0020	/* Device specific initialization */
   9.223 -#define  PCI_PM_CAP_AUX_POWER	0x01C0	/* Auxilliary power support mask */
   9.224 -#define  PCI_PM_CAP_D1		0x0200	/* D1 power state support */
   9.225 -#define  PCI_PM_CAP_D2		0x0400	/* D2 power state support */
   9.226 -#define  PCI_PM_CAP_PME		0x0800	/* PME pin supported */
   9.227 -#define  PCI_PM_CAP_PME_MASK	0xF800	/* PME Mask of all supported states */
   9.228 -#define  PCI_PM_CAP_PME_D0	0x0800	/* PME# from D0 */
   9.229 -#define  PCI_PM_CAP_PME_D1	0x1000	/* PME# from D1 */
   9.230 -#define  PCI_PM_CAP_PME_D2	0x2000	/* PME# from D2 */
   9.231 -#define  PCI_PM_CAP_PME_D3	0x4000	/* PME# from D3 (hot) */
   9.232 -#define  PCI_PM_CAP_PME_D3cold	0x8000	/* PME# from D3 (cold) */
   9.233 -#define PCI_PM_CTRL		4	/* PM control and status register */
   9.234 -#define  PCI_PM_CTRL_STATE_MASK	0x0003	/* Current power state (D0 to D3) */
   9.235 -#define  PCI_PM_CTRL_NO_SOFT_RESET	0x0004	/* No reset for D3hot->D0 */
   9.236 -#define  PCI_PM_CTRL_PME_ENABLE	0x0100	/* PME pin enable */
   9.237 -#define  PCI_PM_CTRL_DATA_SEL_MASK	0x1e00	/* Data select (??) */
   9.238 -#define  PCI_PM_CTRL_DATA_SCALE_MASK	0x6000	/* Data scale (??) */
   9.239 -#define  PCI_PM_CTRL_PME_STATUS	0x8000	/* PME pin status */
   9.240 -#define PCI_PM_PPB_EXTENSIONS	6	/* PPB support extensions (??) */
   9.241 -#define  PCI_PM_PPB_B2_B3	0x40	/* Stop clock when in D3hot (??) */
   9.242 -#define  PCI_PM_BPCC_ENABLE	0x80	/* Bus power/clock control enable (??) */
   9.243 -#define PCI_PM_DATA_REGISTER	7	/* (??) */
   9.244 -#define PCI_PM_SIZEOF		8
   9.245 -
   9.246 -/* AGP registers */
   9.247 -
   9.248 -#define PCI_AGP_VERSION		2	/* BCD version number */
   9.249 -#define PCI_AGP_RFU		3	/* Rest of capability flags */
   9.250 -#define PCI_AGP_STATUS		4	/* Status register */
   9.251 -#define  PCI_AGP_STATUS_RQ_MASK	0xff000000	/* Maximum number of requests - 1 */
   9.252 -#define  PCI_AGP_STATUS_SBA	0x0200	/* Sideband addressing supported */
   9.253 -#define  PCI_AGP_STATUS_64BIT	0x0020	/* 64-bit addressing supported */
   9.254 -#define  PCI_AGP_STATUS_FW	0x0010	/* FW transfers supported */
   9.255 -#define  PCI_AGP_STATUS_RATE4	0x0004	/* 4x transfer rate supported */
   9.256 -#define  PCI_AGP_STATUS_RATE2	0x0002	/* 2x transfer rate supported */
   9.257 -#define  PCI_AGP_STATUS_RATE1	0x0001	/* 1x transfer rate supported */
   9.258 -#define PCI_AGP_COMMAND		8	/* Control register */
   9.259 -#define  PCI_AGP_COMMAND_RQ_MASK 0xff000000  /* Master: Maximum number of requests */
   9.260 -#define  PCI_AGP_COMMAND_SBA	0x0200	/* Sideband addressing enabled */
   9.261 -#define  PCI_AGP_COMMAND_AGP	0x0100	/* Allow processing of AGP transactions */
   9.262 -#define  PCI_AGP_COMMAND_64BIT	0x0020 	/* Allow processing of 64-bit addresses */
   9.263 -#define  PCI_AGP_COMMAND_FW	0x0010 	/* Force FW transfers */
   9.264 -#define  PCI_AGP_COMMAND_RATE4	0x0004	/* Use 4x rate */
   9.265 -#define  PCI_AGP_COMMAND_RATE2	0x0002	/* Use 2x rate */
   9.266 -#define  PCI_AGP_COMMAND_RATE1	0x0001	/* Use 1x rate */
   9.267 -#define PCI_AGP_SIZEOF		12
   9.268 -
   9.269 -/* Vital Product Data */
   9.270 -
   9.271 -#define PCI_VPD_ADDR		2	/* Address to access (15 bits!) */
   9.272 -#define  PCI_VPD_ADDR_MASK	0x7fff	/* Address mask */
   9.273 -#define  PCI_VPD_ADDR_F		0x8000	/* Write 0, 1 indicates completion */
   9.274 -#define PCI_VPD_DATA		4	/* 32-bits of data returned here */
   9.275 -
   9.276 -/* Slot Identification */
   9.277 -
   9.278 -#define PCI_SID_ESR		2	/* Expansion Slot Register */
   9.279 -#define  PCI_SID_ESR_NSLOTS	0x1f	/* Number of expansion slots available */
   9.280 -#define  PCI_SID_ESR_FIC	0x20	/* First In Chassis Flag */
   9.281 -#define PCI_SID_CHASSIS_NR	3	/* Chassis Number */
   9.282 -
   9.283 -/* Message Signalled Interrupts registers */
   9.284 -
   9.285 -#define PCI_MSI_FLAGS		2	/* Various flags */
   9.286 -#define  PCI_MSI_FLAGS_64BIT	0x80	/* 64-bit addresses allowed */
   9.287 -#define  PCI_MSI_FLAGS_QSIZE	0x70	/* Message queue size configured */
   9.288 -#define  PCI_MSI_FLAGS_QMASK	0x0e	/* Maximum queue size available */
   9.289 -#define  PCI_MSI_FLAGS_ENABLE	0x01	/* MSI feature enabled */
   9.290 -#define  PCI_MSI_FLAGS_MASKBIT	0x100	/* 64-bit mask bits allowed */
   9.291 -#define PCI_MSI_RFU		3	/* Rest of capability flags */
   9.292 -#define PCI_MSI_ADDRESS_LO	4	/* Lower 32 bits */
   9.293 -#define PCI_MSI_ADDRESS_HI	8	/* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
   9.294 -#define PCI_MSI_DATA_32		8	/* 16 bits of data for 32-bit devices */
   9.295 -#define PCI_MSI_DATA_64		12	/* 16 bits of data for 64-bit devices */
   9.296 -#define PCI_MSI_MASK_BIT	16	/* Mask bits register */
   9.297 -
   9.298 -/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
   9.299 -#define PCI_MSIX_FLAGS		2
   9.300 -#define  PCI_MSIX_FLAGS_QSIZE	0x7FF
   9.301 -#define  PCI_MSIX_FLAGS_ENABLE	(1 << 15)
   9.302 -#define  PCI_MSIX_FLAGS_MASKALL	(1 << 14)
   9.303 -#define PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
   9.304 -#define PCI_MSIX_FLAGS_BITMASK	(1 << 0)
   9.305 -
   9.306 -/* CompactPCI Hotswap Register */
   9.307 -
   9.308 -#define PCI_CHSWP_CSR		2	/* Control and Status Register */
   9.309 -#define  PCI_CHSWP_DHA		0x01	/* Device Hiding Arm */
   9.310 -#define  PCI_CHSWP_EIM		0x02	/* ENUM# Signal Mask */
   9.311 -#define  PCI_CHSWP_PIE		0x04	/* Pending Insert or Extract */
   9.312 -#define  PCI_CHSWP_LOO		0x08	/* LED On / Off */
   9.313 -#define  PCI_CHSWP_PI		0x30	/* Programming Interface */
   9.314 -#define  PCI_CHSWP_EXT		0x40	/* ENUM# status - extraction */
   9.315 -#define  PCI_CHSWP_INS		0x80	/* ENUM# status - insertion */
   9.316 -
   9.317 -/* PCI-X registers */
   9.318 -
   9.319 -#define PCI_X_CMD		2	/* Modes & Features */
   9.320 -#define  PCI_X_CMD_DPERR_E	0x0001	/* Data Parity Error Recovery Enable */
   9.321 -#define  PCI_X_CMD_ERO		0x0002	/* Enable Relaxed Ordering */
   9.322 -#define  PCI_X_CMD_MAX_READ	0x000c	/* Max Memory Read Byte Count */
   9.323 -#define  PCI_X_CMD_MAX_SPLIT	0x0070	/* Max Outstanding Split Transactions */
   9.324 -#define  PCI_X_CMD_VERSION(x) 	(((x) >> 12) & 3) /* Version */
   9.325 -#define PCI_X_STATUS		4	/* PCI-X capabilities */
   9.326 -#define  PCI_X_STATUS_DEVFN	0x000000ff	/* A copy of devfn */
   9.327 -#define  PCI_X_STATUS_BUS	0x0000ff00	/* A copy of bus nr */
   9.328 -#define  PCI_X_STATUS_64BIT	0x00010000	/* 64-bit device */
   9.329 -#define  PCI_X_STATUS_133MHZ	0x00020000	/* 133 MHz capable */
   9.330 -#define  PCI_X_STATUS_SPL_DISC	0x00040000	/* Split Completion Discarded */
   9.331 -#define  PCI_X_STATUS_UNX_SPL	0x00080000	/* Unexpected Split Completion */
   9.332 -#define  PCI_X_STATUS_COMPLEX	0x00100000	/* Device Complexity */
   9.333 -#define  PCI_X_STATUS_MAX_READ	0x00600000	/* Designed Max Memory Read Count */
   9.334 -#define  PCI_X_STATUS_MAX_SPLIT	0x03800000	/* Designed Max Outstanding Split Transactions */
   9.335 -#define  PCI_X_STATUS_MAX_CUM	0x1c000000	/* Designed Max Cumulative Read Size */
   9.336 -#define  PCI_X_STATUS_SPL_ERR	0x20000000	/* Rcvd Split Completion Error Msg */
   9.337 -#define  PCI_X_STATUS_266MHZ	0x40000000	/* 266 MHz capable */
   9.338 -#define  PCI_X_STATUS_533MHZ	0x80000000	/* 533 MHz capable */
   9.339 -
   9.340 -/* PCI Express capability registers */
   9.341 -
   9.342 -#define PCI_EXP_FLAGS		2	/* Capabilities register */
   9.343 -#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
   9.344 -#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
   9.345 -#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
   9.346 -#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
   9.347 -#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
   9.348 -#define  PCI_EXP_TYPE_UPSTREAM	0x5	/* Upstream Port */
   9.349 -#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
   9.350 -#define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
   9.351 -#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
   9.352 -#define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
   9.353 -#define PCI_EXP_DEVCAP		4	/* Device capabilities */
   9.354 -#define  PCI_EXP_DEVCAP_PAYLOAD	0x07	/* Max_Payload_Size */
   9.355 -#define  PCI_EXP_DEVCAP_PHANTOM	0x18	/* Phantom functions */
   9.356 -#define  PCI_EXP_DEVCAP_EXT_TAG	0x20	/* Extended tags */
   9.357 -#define  PCI_EXP_DEVCAP_L0S	0x1c0	/* L0s Acceptable Latency */
   9.358 -#define  PCI_EXP_DEVCAP_L1	0xe00	/* L1 Acceptable Latency */
   9.359 -#define  PCI_EXP_DEVCAP_ATN_BUT	0x1000	/* Attention Button Present */
   9.360 -#define  PCI_EXP_DEVCAP_ATN_IND	0x2000	/* Attention Indicator Present */
   9.361 -#define  PCI_EXP_DEVCAP_PWR_IND	0x4000	/* Power Indicator Present */
   9.362 -#define  PCI_EXP_DEVCAP_PWR_VAL	0x3fc0000 /* Slot Power Limit Value */
   9.363 -#define  PCI_EXP_DEVCAP_PWR_SCL	0xc000000 /* Slot Power Limit Scale */
   9.364 -#define PCI_EXP_DEVCTL		8	/* Device Control */
   9.365 -#define  PCI_EXP_DEVCTL_CERE	0x0001	/* Correctable Error Reporting En. */
   9.366 -#define  PCI_EXP_DEVCTL_NFERE	0x0002	/* Non-Fatal Error Reporting Enable */
   9.367 -#define  PCI_EXP_DEVCTL_FERE	0x0004	/* Fatal Error Reporting Enable */
   9.368 -#define  PCI_EXP_DEVCTL_URRE	0x0008	/* Unsupported Request Reporting En. */
   9.369 -#define  PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
   9.370 -#define  PCI_EXP_DEVCTL_PAYLOAD	0x00e0	/* Max_Payload_Size */
   9.371 -#define  PCI_EXP_DEVCTL_EXT_TAG	0x0100	/* Extended Tag Field Enable */
   9.372 -#define  PCI_EXP_DEVCTL_PHANTOM	0x0200	/* Phantom Functions Enable */
   9.373 -#define  PCI_EXP_DEVCTL_AUX_PME	0x0400	/* Auxiliary Power PM Enable */
   9.374 -#define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
   9.375 -#define  PCI_EXP_DEVCTL_READRQ	0x7000	/* Max_Read_Request_Size */
   9.376 -#define PCI_EXP_DEVSTA		10	/* Device Status */
   9.377 -#define  PCI_EXP_DEVSTA_CED	0x01	/* Correctable Error Detected */
   9.378 -#define  PCI_EXP_DEVSTA_NFED	0x02	/* Non-Fatal Error Detected */
   9.379 -#define  PCI_EXP_DEVSTA_FED	0x04	/* Fatal Error Detected */
   9.380 -#define  PCI_EXP_DEVSTA_URD	0x08	/* Unsupported Request Detected */
   9.381 -#define  PCI_EXP_DEVSTA_AUXPD	0x10	/* AUX Power Detected */
   9.382 -#define  PCI_EXP_DEVSTA_TRPND	0x20	/* Transactions Pending */
   9.383 -#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
   9.384 -#define PCI_EXP_LNKCTL		16	/* Link Control */
   9.385 -#define  PCI_EXP_LNKCTL_CLKREQ_EN 0x100	/* Enable clkreq */
   9.386 -#define PCI_EXP_LNKSTA		18	/* Link Status */
   9.387 -#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
   9.388 -#define PCI_EXP_SLTCTL		24	/* Slot Control */
   9.389 -#define PCI_EXP_SLTSTA		26	/* Slot Status */
   9.390 -#define PCI_EXP_RTCTL		28	/* Root Control */
   9.391 -#define  PCI_EXP_RTCTL_SECEE	0x01	/* System Error on Correctable Error */
   9.392 -#define  PCI_EXP_RTCTL_SENFEE	0x02	/* System Error on Non-Fatal Error */
   9.393 -#define  PCI_EXP_RTCTL_SEFEE	0x04	/* System Error on Fatal Error */
   9.394 -#define  PCI_EXP_RTCTL_PMEIE	0x08	/* PME Interrupt Enable */
   9.395 -#define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
   9.396 -#define PCI_EXP_RTCAP		30	/* Root Capabilities */
   9.397 -#define PCI_EXP_RTSTA		32	/* Root Status */
   9.398 -
   9.399 -/* Extended Capabilities (PCI-X 2.0 and Express) */
   9.400 -#define PCI_EXT_CAP_ID(header)		(header & 0x0000ffff)
   9.401 -#define PCI_EXT_CAP_VER(header)		((header >> 16) & 0xf)
   9.402 -#define PCI_EXT_CAP_NEXT(header)	((header >> 20) & 0xffc)
   9.403 -
   9.404 -#define PCI_EXT_CAP_ID_ERR	1
   9.405 -#define PCI_EXT_CAP_ID_VC	2
   9.406 -#define PCI_EXT_CAP_ID_DSN	3
   9.407 -#define PCI_EXT_CAP_ID_PWR	4
   9.408 -
   9.409 -/* Advanced Error Reporting */
   9.410 -#define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
   9.411 -#define  PCI_ERR_UNC_TRAIN	0x00000001	/* Training */
   9.412 -#define  PCI_ERR_UNC_DLP	0x00000010	/* Data Link Protocol */
   9.413 -#define  PCI_ERR_UNC_POISON_TLP	0x00001000	/* Poisoned TLP */
   9.414 -#define  PCI_ERR_UNC_FCP	0x00002000	/* Flow Control Protocol */
   9.415 -#define  PCI_ERR_UNC_COMP_TIME	0x00004000	/* Completion Timeout */
   9.416 -#define  PCI_ERR_UNC_COMP_ABORT	0x00008000	/* Completer Abort */
   9.417 -#define  PCI_ERR_UNC_UNX_COMP	0x00010000	/* Unexpected Completion */
   9.418 -#define  PCI_ERR_UNC_RX_OVER	0x00020000	/* Receiver Overflow */
   9.419 -#define  PCI_ERR_UNC_MALF_TLP	0x00040000	/* Malformed TLP */
   9.420 -#define  PCI_ERR_UNC_ECRC	0x00080000	/* ECRC Error Status */
   9.421 -#define  PCI_ERR_UNC_UNSUP	0x00100000	/* Unsupported Request */
   9.422 -#define PCI_ERR_UNCOR_MASK	8	/* Uncorrectable Error Mask */
   9.423 -	/* Same bits as above */
   9.424 -#define PCI_ERR_UNCOR_SEVER	12	/* Uncorrectable Error Severity */
   9.425 -	/* Same bits as above */
   9.426 -#define PCI_ERR_COR_STATUS	16	/* Correctable Error Status */
   9.427 -#define  PCI_ERR_COR_RCVR	0x00000001	/* Receiver Error Status */
   9.428 -#define  PCI_ERR_COR_BAD_TLP	0x00000040	/* Bad TLP Status */
   9.429 -#define  PCI_ERR_COR_BAD_DLLP	0x00000080	/* Bad DLLP Status */
   9.430 -#define  PCI_ERR_COR_REP_ROLL	0x00000100	/* REPLAY_NUM Rollover */
   9.431 -#define  PCI_ERR_COR_REP_TIMER	0x00001000	/* Replay Timer Timeout */
   9.432 -#define PCI_ERR_COR_MASK	20	/* Correctable Error Mask */
   9.433 -	/* Same bits as above */
   9.434 -#define PCI_ERR_CAP		24	/* Advanced Error Capabilities */
   9.435 -#define  PCI_ERR_CAP_FEP(x)	((x) & 31)	/* First Error Pointer */
   9.436 -#define  PCI_ERR_CAP_ECRC_GENC	0x00000020	/* ECRC Generation Capable */
   9.437 -#define  PCI_ERR_CAP_ECRC_GENE	0x00000040	/* ECRC Generation Enable */
   9.438 -#define  PCI_ERR_CAP_ECRC_CHKC	0x00000080	/* ECRC Check Capable */
   9.439 -#define  PCI_ERR_CAP_ECRC_CHKE	0x00000100	/* ECRC Check Enable */
   9.440 -#define PCI_ERR_HEADER_LOG	28	/* Header Log Register (16 bytes) */
   9.441 -#define PCI_ERR_ROOT_COMMAND	44	/* Root Error Command */
   9.442 -/* Correctable Err Reporting Enable */
   9.443 -#define PCI_ERR_ROOT_CMD_COR_EN		0x00000001
   9.444 -/* Non-fatal Err Reporting Enable */
   9.445 -#define PCI_ERR_ROOT_CMD_NONFATAL_EN	0x00000002
   9.446 -/* Fatal Err Reporting Enable */
   9.447 -#define PCI_ERR_ROOT_CMD_FATAL_EN	0x00000004
   9.448 -#define PCI_ERR_ROOT_STATUS	48
   9.449 -#define PCI_ERR_ROOT_COR_RCV		0x00000001	/* ERR_COR Received */
   9.450 -/* Multi ERR_COR Received */
   9.451 -#define PCI_ERR_ROOT_MULTI_COR_RCV	0x00000002
   9.452 -/* ERR_FATAL/NONFATAL Recevied */
   9.453 -#define PCI_ERR_ROOT_UNCOR_RCV		0x00000004
   9.454 -/* Multi ERR_FATAL/NONFATAL Recevied */
   9.455 -#define PCI_ERR_ROOT_MULTI_UNCOR_RCV	0x00000008
   9.456 -#define PCI_ERR_ROOT_FIRST_FATAL	0x00000010	/* First Fatal */
   9.457 -#define PCI_ERR_ROOT_NONFATAL_RCV	0x00000020	/* Non-Fatal Received */
   9.458 -#define PCI_ERR_ROOT_FATAL_RCV		0x00000040	/* Fatal Received */
   9.459 -#define PCI_ERR_ROOT_COR_SRC	52
   9.460 -#define PCI_ERR_ROOT_SRC	54
   9.461 -
   9.462 -/* Virtual Channel */
   9.463 -#define PCI_VC_PORT_REG1	4
   9.464 -#define PCI_VC_PORT_REG2	8
   9.465 -#define PCI_VC_PORT_CTRL	12
   9.466 -#define PCI_VC_PORT_STATUS	14
   9.467 -#define PCI_VC_RES_CAP		16
   9.468 -#define PCI_VC_RES_CTRL		20
   9.469 -#define PCI_VC_RES_STATUS	26
   9.470 -
   9.471 -/* Power Budgeting */
   9.472 -#define PCI_PWR_DSR		4	/* Data Select Register */
   9.473 -#define PCI_PWR_DATA		8	/* Data Register */
   9.474 -#define  PCI_PWR_DATA_BASE(x)	((x) & 0xff)	    /* Base Power */
   9.475 -#define  PCI_PWR_DATA_SCALE(x)	(((x) >> 8) & 3)    /* Data Scale */
   9.476 -#define  PCI_PWR_DATA_PM_SUB(x)	(((x) >> 10) & 7)   /* PM Sub State */
   9.477 -#define  PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
   9.478 -#define  PCI_PWR_DATA_TYPE(x)	(((x) >> 15) & 7)   /* Type */
   9.479 -#define  PCI_PWR_DATA_RAIL(x)	(((x) >> 18) & 7)   /* Power Rail */
   9.480 -#define PCI_PWR_CAP		12	/* Capability */
   9.481 -#define  PCI_PWR_CAP_BUDGET(x)	((x) & 1)	/* Included in system budget */
   9.482 -
   9.483 -/*
   9.484 - * Hypertransport sub capability types
   9.485 - *
   9.486 - * Unfortunately there are both 3 bit and 5 bit capability types defined
   9.487 - * in the HT spec, catering for that is a little messy. You probably don't
   9.488 - * want to use these directly, just use pci_find_ht_capability() and it
   9.489 - * will do the right thing for you.
   9.490 - */
   9.491 -#define HT_3BIT_CAP_MASK	0xE0
   9.492 -#define HT_CAPTYPE_SLAVE	0x00	/* Slave/Primary link configuration */
   9.493 -#define HT_CAPTYPE_HOST		0x20	/* Host/Secondary link configuration */
   9.494 -
   9.495 -#define HT_5BIT_CAP_MASK	0xF8
   9.496 -#define HT_CAPTYPE_IRQ		0x80	/* IRQ Configuration */
   9.497 -#define HT_CAPTYPE_REMAPPING_40	0xA0	/* 40 bit address remapping */
   9.498 -#define HT_CAPTYPE_REMAPPING_64 0xA2	/* 64 bit address remapping */
   9.499 -#define HT_CAPTYPE_UNITID_CLUMP	0x90	/* Unit ID clumping */
   9.500 -#define HT_CAPTYPE_EXTCONF	0x98	/* Extended Configuration Space Access */
   9.501 -#define HT_CAPTYPE_MSI_MAPPING	0xA8	/* MSI Mapping Capability */
   9.502 -#define  HT_MSI_FLAGS		0x02		/* Offset to flags */
   9.503 -#define  HT_MSI_FLAGS_ENABLE	0x1		/* Mapping enable */
   9.504 -#define  HT_MSI_FLAGS_FIXED	0x2		/* Fixed mapping only */
   9.505 -#define  HT_MSI_FIXED_ADDR	0x00000000FEE00000ULL	/* Fixed addr */
   9.506 -#define  HT_MSI_ADDR_LO		0x04		/* Offset to low addr bits */
   9.507 -#define  HT_MSI_ADDR_LO_MASK	0xFFF00000	/* Low address bit mask */
   9.508 -#define  HT_MSI_ADDR_HI		0x08		/* Offset to high addr bits */
   9.509 -#define HT_CAPTYPE_DIRECT_ROUTE	0xB0	/* Direct routing configuration */
   9.510 -#define HT_CAPTYPE_VCSET	0xB8	/* Virtual Channel configuration */
   9.511 -#define HT_CAPTYPE_ERROR_RETRY	0xC0	/* Retry on error configuration */
   9.512 -#define HT_CAPTYPE_GEN3		0xD0	/* Generation 3 hypertransport configuration */
   9.513 -#define HT_CAPTYPE_PM		0xE0	/* Hypertransport powermanagement configuration */
   9.514 -
   9.515 -
   9.516 -#endif /* LINUX_PCI_REGS_H */
    10.1 --- a/xen/arch/x86/hvm/vmx/Makefile	Thu Feb 21 14:50:27 2008 +0000
    10.2 +++ b/xen/arch/x86/hvm/vmx/Makefile	Thu Feb 21 15:06:37 2008 +0000
    10.3 @@ -1,5 +1,3 @@
    10.4 -subdir-y += vtd
    10.5 -
    10.6  subdir-$(x86_32) += x86_32
    10.7  subdir-$(x86_64) += x86_64
    10.8  
    11.1 --- a/xen/arch/x86/hvm/vmx/vtd/Makefile	Thu Feb 21 14:50:27 2008 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,6 +0,0 @@
    11.4 -obj-y += intel-iommu.o
    11.5 -obj-y += dmar.o
    11.6 -obj-y += utils.o
    11.7 -obj-y += io.o
    11.8 -obj-y += qinval.o
    11.9 -obj-y += intremap.o
    12.1 --- a/xen/arch/x86/hvm/vmx/vtd/dmar.c	Thu Feb 21 14:50:27 2008 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,625 +0,0 @@
    12.4 -/*
    12.5 - * Copyright (c) 2006, Intel Corporation.
    12.6 - *
    12.7 - * This program is free software; you can redistribute it and/or modify it
    12.8 - * under the terms and conditions of the GNU General Public License,
    12.9 - * version 2, as published by the Free Software Foundation.
   12.10 - *
   12.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   12.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   12.14 - * more details.
   12.15 - *
   12.16 - * You should have received a copy of the GNU General Public License along with
   12.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   12.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   12.19 - *
   12.20 - * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   12.21 - * Copyright (C) Shaohua Li <shaohua.li@intel.com>
   12.22 - * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
   12.23 - */
   12.24 -
   12.25 -#include <xen/init.h>
   12.26 -#include <xen/bitmap.h>
   12.27 -#include <xen/kernel.h>
   12.28 -#include <xen/acpi.h>
   12.29 -#include <xen/mm.h>
   12.30 -#include <xen/xmalloc.h>
   12.31 -#include <asm/string.h>
   12.32 -#include "dmar.h"
   12.33 -#include "pci-direct.h"
   12.34 -#include "pci_regs.h"
   12.35 -
   12.36 -int vtd_enabled;
   12.37 -boolean_param("vtd", vtd_enabled);
   12.38 -
   12.39 -#undef PREFIX
   12.40 -#define PREFIX VTDPREFIX "ACPI DMAR:"
   12.41 -#define DEBUG
   12.42 -
   12.43 -#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + \
   12.44 -                       sizeof(struct acpi_dev_scope))
   12.45 -
   12.46 -LIST_HEAD(acpi_drhd_units);
   12.47 -LIST_HEAD(acpi_rmrr_units);
   12.48 -LIST_HEAD(acpi_atsr_units);
   12.49 -
   12.50 -u8 dmar_host_address_width;
   12.51 -
   12.52 -static int __init acpi_register_drhd_unit(struct acpi_drhd_unit *drhd)
   12.53 -{
   12.54 -    /*
   12.55 -     * add INCLUDE_ALL at the tail, so scan the list will find it at
   12.56 -     * the very end.
   12.57 -     */
   12.58 -    if ( drhd->include_all )
   12.59 -        list_add_tail(&drhd->list, &acpi_drhd_units);
   12.60 -    else
   12.61 -        list_add(&drhd->list, &acpi_drhd_units);
   12.62 -    return 0;
   12.63 -}
   12.64 -
   12.65 -static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr)
   12.66 -{
   12.67 -    list_add(&rmrr->list, &acpi_rmrr_units);
   12.68 -    return 0;
   12.69 -}
   12.70 -
   12.71 -static int acpi_ioapic_device_match(
   12.72 -    struct list_head *ioapic_list, unsigned int apic_id)
   12.73 -{
   12.74 -    struct acpi_ioapic_unit *ioapic;
   12.75 -    list_for_each_entry( ioapic, ioapic_list, list ) {
   12.76 -        if (ioapic->apic_id == apic_id)
   12.77 -            return 1;
   12.78 -    }
   12.79 -    return 0;
   12.80 -}
   12.81 -
   12.82 -struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
   12.83 -{
   12.84 -    struct acpi_drhd_unit *drhd;
   12.85 -    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
   12.86 -        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
   12.87 -            dprintk(XENLOG_INFO VTDPREFIX,
   12.88 -                    "ioapic_to_drhd: drhd->address = %lx\n",
   12.89 -                    drhd->address);
   12.90 -            return drhd;
   12.91 -        }
   12.92 -    }
   12.93 -    return NULL;
   12.94 -}
   12.95 -
   12.96 -struct iommu * ioapic_to_iommu(unsigned int apic_id)
   12.97 -{
   12.98 -    struct acpi_drhd_unit *drhd;
   12.99 -
  12.100 -    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
  12.101 -        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
  12.102 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.103 -                    "ioapic_to_iommu: drhd->address = %lx\n",
  12.104 -                    drhd->address);
  12.105 -            return drhd->iommu;
  12.106 -        }
  12.107 -    }
  12.108 -    dprintk(XENLOG_INFO VTDPREFIX, "returning NULL\n");
  12.109 -    return NULL;
  12.110 -}
  12.111 -
  12.112 -static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
  12.113 -                                 struct pci_dev *dev)
  12.114 -{
  12.115 -    int i;
  12.116 -
  12.117 -    for ( i = 0; i < cnt; i++ )
  12.118 -    {
  12.119 -        if ( (dev->bus == devices->bus) &&
  12.120 -             (dev->devfn == devices->devfn) )
  12.121 -            return 1;
  12.122 -        devices++;
  12.123 -    }
  12.124 -    return 0;
  12.125 -}
  12.126 -
  12.127 -static int __init acpi_register_atsr_unit(struct acpi_atsr_unit *atsr)
  12.128 -{
  12.129 -    /*
  12.130 -     * add ALL_PORTS at the tail, so scan the list will find it at
  12.131 -     * the very end.
  12.132 -     */
  12.133 -    if ( atsr->all_ports )
  12.134 -        list_add_tail(&atsr->list, &acpi_atsr_units);
  12.135 -    else
  12.136 -        list_add(&atsr->list, &acpi_atsr_units);
  12.137 -    return 0;
  12.138 -}
  12.139 -
  12.140 -struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev)
  12.141 -{
  12.142 -    struct acpi_drhd_unit *drhd;
  12.143 -    struct acpi_drhd_unit *include_all_drhd;
  12.144 -
  12.145 -    include_all_drhd = NULL;
  12.146 -    list_for_each_entry ( drhd, &acpi_drhd_units, list )
  12.147 -    {
  12.148 -        if ( drhd->include_all )
  12.149 -        {
  12.150 -            include_all_drhd = drhd;
  12.151 -            continue;
  12.152 -        }
  12.153 -
  12.154 -        if ( acpi_pci_device_match(drhd->devices,
  12.155 -                                   drhd->devices_cnt, dev) )
  12.156 -        {
  12.157 -            dprintk(XENLOG_INFO VTDPREFIX, 
  12.158 -                    "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
  12.159 -                    drhd->address);
  12.160 -            return drhd;
  12.161 -        }
  12.162 -    }
  12.163 -
  12.164 -    if ( include_all_drhd )
  12.165 -    {
  12.166 -        dprintk(XENLOG_INFO VTDPREFIX, 
  12.167 -                "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
  12.168 -                include_all_drhd->address);
  12.169 -        return include_all_drhd;
  12.170 -    }
  12.171 -
  12.172 -    return NULL;
  12.173 -}
  12.174 -
  12.175 -struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev)
  12.176 -{
  12.177 -    struct acpi_rmrr_unit *rmrr;
  12.178 -
  12.179 -    list_for_each_entry ( rmrr, &acpi_rmrr_units, list )
  12.180 -    {
  12.181 -        if ( acpi_pci_device_match(rmrr->devices,
  12.182 -                                   rmrr->devices_cnt, dev) )
  12.183 -            return rmrr;
  12.184 -    }
  12.185 -
  12.186 -    return NULL;
  12.187 -}
  12.188 -
  12.189 -struct acpi_atsr_unit * acpi_find_matched_atsr_unit(struct pci_dev *dev)
  12.190 -{
  12.191 -    struct acpi_atsr_unit *atsru;
  12.192 -    struct acpi_atsr_unit *all_ports_atsru;
  12.193 -
  12.194 -    all_ports_atsru = NULL;
  12.195 -    list_for_each_entry ( atsru, &acpi_atsr_units, list )
  12.196 -    {
  12.197 -        if ( atsru->all_ports )
  12.198 -            all_ports_atsru = atsru;
  12.199 -        if ( acpi_pci_device_match(atsru->devices,
  12.200 -                                   atsru->devices_cnt, dev) )
  12.201 -            return atsru;
  12.202 -    }
  12.203 -
  12.204 -    if ( all_ports_atsru )
  12.205 -    {
  12.206 -        dprintk(XENLOG_INFO VTDPREFIX,
  12.207 -                "acpi_find_matched_atsr_unit: all_ports_atsru\n");
  12.208 -        return all_ports_atsru;;
  12.209 -    }
  12.210 -
  12.211 -    return NULL;
  12.212 -}
  12.213 -
  12.214 -static int scope_device_count(void *start, void *end)
  12.215 -{
  12.216 -    struct acpi_dev_scope *scope;
  12.217 -    u8 bus, sub_bus, sec_bus;
  12.218 -    struct acpi_pci_path *path;
  12.219 -    int depth, count = 0;
  12.220 -    u8 dev, func;
  12.221 -    u32 l;
  12.222 -
  12.223 -    while ( start < end )
  12.224 -    {
  12.225 -        scope = start;
  12.226 -        if ( (scope->length < MIN_SCOPE_LEN) ||
  12.227 -             (scope->dev_type >= ACPI_DEV_ENTRY_COUNT) )
  12.228 -        {
  12.229 -            dprintk(XENLOG_WARNING VTDPREFIX, "Invalid device scope\n");
  12.230 -            return -EINVAL;
  12.231 -        }
  12.232 -
  12.233 -        path = (struct acpi_pci_path *)(scope + 1);
  12.234 -        bus = scope->start_bus;
  12.235 -        depth = (scope->length - sizeof(struct acpi_dev_scope))
  12.236 -		    / sizeof(struct acpi_pci_path);
  12.237 -        while ( --depth )
  12.238 -        {
  12.239 -            bus = read_pci_config_byte(
  12.240 -                bus, path->dev, path->fn, PCI_SECONDARY_BUS);
  12.241 -            path++;
  12.242 -        }
  12.243 -
  12.244 -        if ( scope->dev_type == ACPI_DEV_ENDPOINT )
  12.245 -        {
  12.246 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.247 -                    "found endpoint: bdf = %x:%x:%x\n",
  12.248 -                    bus, path->dev, path->fn);
  12.249 -            count++;
  12.250 -        }
  12.251 -        else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
  12.252 -        {
  12.253 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.254 -                    "found bridge: bdf = %x:%x:%x\n",
  12.255 -                    bus, path->dev, path->fn);
  12.256 -            sec_bus = read_pci_config_byte(
  12.257 -                bus, path->dev, path->fn, PCI_SECONDARY_BUS);
  12.258 -            sub_bus = read_pci_config_byte(
  12.259 -                bus, path->dev, path->fn, PCI_SUBORDINATE_BUS);
  12.260 -
  12.261 -            while ( sec_bus <= sub_bus )
  12.262 -            {
  12.263 -                for ( dev = 0; dev < 32; dev++ )
  12.264 -                {
  12.265 -                    for ( func = 0; func < 8; func++ )
  12.266 -                    {
  12.267 -                        l = read_pci_config(
  12.268 -                            sec_bus, dev, func, PCI_VENDOR_ID);
  12.269 -
  12.270 -                        /* some broken boards return 0 or
  12.271 -                         * ~0 if a slot is empty
  12.272 -                         */
  12.273 -                        if ( l == 0xffffffff || l == 0x00000000 ||
  12.274 -                             l == 0x0000ffff || l == 0xffff0000 )
  12.275 -                            break;
  12.276 -                        count++;
  12.277 -                    }
  12.278 -                }
  12.279 -                sec_bus++;
  12.280 -            }
  12.281 -        }
  12.282 -        else if ( scope->dev_type == ACPI_DEV_IOAPIC )
  12.283 -        {
  12.284 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.285 -                    "found IOAPIC: bdf = %x:%x:%x\n",
  12.286 -                    bus, path->dev, path->fn);
  12.287 -            count++;
  12.288 -        }
  12.289 -        else
  12.290 -        {
  12.291 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.292 -                    "found MSI HPET: bdf = %x:%x:%x\n",
  12.293 -                    bus, path->dev, path->fn);
  12.294 -            count++;
  12.295 -        }
  12.296 -
  12.297 -        start += scope->length;
  12.298 -    }
  12.299 -
  12.300 -    return count;
  12.301 -}
  12.302 -
  12.303 -static int __init acpi_parse_dev_scope(
  12.304 -    void *start, void *end, void *acpi_entry, int type)
  12.305 -{
  12.306 -    struct acpi_dev_scope *scope;
  12.307 -    u8 bus, sub_bus, sec_bus;
  12.308 -    struct acpi_pci_path *path;
  12.309 -    struct acpi_ioapic_unit *acpi_ioapic_unit = NULL;
  12.310 -    int depth;
  12.311 -    struct pci_dev *pdev;
  12.312 -    u8 dev, func;
  12.313 -    u32 l;
  12.314 -
  12.315 -    int *cnt = NULL;
  12.316 -    struct pci_dev **devices = NULL;
  12.317 -    struct acpi_drhd_unit *dmaru = (struct acpi_drhd_unit *) acpi_entry;
  12.318 -    struct acpi_rmrr_unit *rmrru = (struct acpi_rmrr_unit *) acpi_entry;
  12.319 -    struct acpi_atsr_unit *atsru = (struct acpi_atsr_unit *) acpi_entry;
  12.320 -
  12.321 -    switch (type) {
  12.322 -        case DMAR_TYPE:
  12.323 -            cnt = &(dmaru->devices_cnt);
  12.324 -            devices = &(dmaru->devices);
  12.325 -            break;
  12.326 -        case RMRR_TYPE:
  12.327 -            cnt = &(rmrru->devices_cnt);
  12.328 -            devices = &(rmrru->devices);
  12.329 -            break;
  12.330 -        case ATSR_TYPE:
  12.331 -            cnt = &(atsru->devices_cnt);
  12.332 -            devices = &(atsru->devices);
  12.333 -            break;
  12.334 -        default:
  12.335 -            dprintk(XENLOG_ERR VTDPREFIX, "invalid vt-d acpi entry type\n");
  12.336 -    }
  12.337 -
  12.338 -    *cnt = scope_device_count(start, end);
  12.339 -    if ( *cnt == 0 )
  12.340 -    {
  12.341 -        dprintk(XENLOG_INFO VTDPREFIX, "acpi_parse_dev_scope: no device\n");
  12.342 -        return 0;
  12.343 -    }
  12.344 -
  12.345 -    *devices = xmalloc_array(struct pci_dev,  *cnt);
  12.346 -    if ( !*devices )
  12.347 -        return -ENOMEM;
  12.348 -    memset(*devices, 0, sizeof(struct pci_dev) * (*cnt));
  12.349 -
  12.350 -    pdev = *devices;
  12.351 -    while ( start < end )
  12.352 -    {
  12.353 -        scope = start;
  12.354 -        path = (struct acpi_pci_path *)(scope + 1);
  12.355 -        depth = (scope->length - sizeof(struct acpi_dev_scope))
  12.356 -		    / sizeof(struct acpi_pci_path);
  12.357 -        bus = scope->start_bus;
  12.358 -
  12.359 -        while ( --depth )
  12.360 -        {
  12.361 -            bus = read_pci_config_byte(
  12.362 -                bus, path->dev, path->fn, PCI_SECONDARY_BUS);
  12.363 -            path++;
  12.364 -        }
  12.365 -
  12.366 -        if ( scope->dev_type == ACPI_DEV_ENDPOINT )
  12.367 -        {
  12.368 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.369 -                    "found endpoint: bdf = %x:%x:%x\n",
  12.370 -                    bus, path->dev, path->fn);
  12.371 -            pdev->bus = bus;
  12.372 -            pdev->devfn = PCI_DEVFN(path->dev, path->fn);
  12.373 -            pdev++;
  12.374 -        }
  12.375 -        else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
  12.376 -        {
  12.377 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.378 -                    "found bridge: bus = %x dev = %x func = %x\n",
  12.379 -                    bus, path->dev, path->fn);
  12.380 -            sec_bus = read_pci_config_byte(
  12.381 -                bus, path->dev, path->fn, PCI_SECONDARY_BUS);
  12.382 -            sub_bus = read_pci_config_byte(
  12.383 -                bus, path->dev, path->fn, PCI_SUBORDINATE_BUS);
  12.384 -
  12.385 -            while ( sec_bus <= sub_bus )
  12.386 -            {
  12.387 -                for ( dev = 0; dev < 32; dev++ )
  12.388 -                {
  12.389 -                    for ( func = 0; func < 8; func++ )
  12.390 -                    {
  12.391 -                        l = read_pci_config(
  12.392 -                            sec_bus, dev, func, PCI_VENDOR_ID);
  12.393 -
  12.394 -                        /* some broken boards return 0 or
  12.395 -                         * ~0 if a slot is empty
  12.396 -                         */
  12.397 -                        if ( l == 0xffffffff || l == 0x00000000 ||
  12.398 -                             l == 0x0000ffff || l == 0xffff0000 )
  12.399 -                            break;
  12.400 -
  12.401 -                        pdev->bus = sec_bus;
  12.402 -                        pdev->devfn = PCI_DEVFN(dev, func);
  12.403 -                        pdev++;
  12.404 -                    }
  12.405 -                }
  12.406 -                sec_bus++;
  12.407 -            }
  12.408 -        }
  12.409 -        else if ( scope->dev_type == ACPI_DEV_IOAPIC )
  12.410 -        {
  12.411 -            acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit);
  12.412 -            if ( !acpi_ioapic_unit )
  12.413 -                return -ENOMEM;
  12.414 -            acpi_ioapic_unit->apic_id = scope->enum_id;
  12.415 -            acpi_ioapic_unit->ioapic.bdf.bus = bus;
  12.416 -            acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
  12.417 -            acpi_ioapic_unit->ioapic.bdf.func = path->fn;
  12.418 -            list_add(&acpi_ioapic_unit->list, &dmaru->ioapic_list);
  12.419 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.420 -                    "found IOAPIC: bus = %x dev = %x func = %x\n",
  12.421 -                    bus, path->dev, path->fn);
  12.422 -        }
  12.423 -        else
  12.424 -            dprintk(XENLOG_INFO VTDPREFIX,
  12.425 -                    "found MSI HPET: bus = %x dev = %x func = %x\n",
  12.426 -                    bus, path->dev, path->fn);
  12.427 -        start += scope->length;
  12.428 -    }
  12.429 -
  12.430 -    return 0;
  12.431 -}
  12.432 -
  12.433 -static int __init
  12.434 -acpi_parse_one_drhd(struct acpi_dmar_entry_header *header)
  12.435 -{
  12.436 -    struct acpi_table_drhd * drhd = (struct acpi_table_drhd *)header;
  12.437 -    struct acpi_drhd_unit *dmaru;
  12.438 -    int ret = 0;
  12.439 -    static int include_all;
  12.440 -    void *dev_scope_start, *dev_scope_end;
  12.441 -
  12.442 -    dmaru = xmalloc(struct acpi_drhd_unit);
  12.443 -    if ( !dmaru )
  12.444 -        return -ENOMEM;
  12.445 -    memset(dmaru, 0, sizeof(struct acpi_drhd_unit));
  12.446 -
  12.447 -    dmaru->address = drhd->address;
  12.448 -    dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */
  12.449 -    INIT_LIST_HEAD(&dmaru->ioapic_list);
  12.450 -    dprintk(XENLOG_INFO VTDPREFIX, "dmaru->address = %lx\n", dmaru->address);
  12.451 -
  12.452 -    dev_scope_start = (void *)(drhd + 1);
  12.453 -    dev_scope_end   = ((void *)drhd) + header->length;
  12.454 -    ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
  12.455 -                               dmaru, DMAR_TYPE);
  12.456 -
  12.457 -    if ( dmaru->include_all )
  12.458 -    {
  12.459 -        dprintk(XENLOG_INFO VTDPREFIX, "found INCLUDE_ALL\n");
  12.460 -        /* Only allow one INCLUDE_ALL */
  12.461 -        if ( include_all )
  12.462 -        {
  12.463 -            dprintk(XENLOG_WARNING VTDPREFIX,
  12.464 -                    "Only one INCLUDE_ALL device scope is allowed\n");
  12.465 -            ret = -EINVAL;
  12.466 -        }
  12.467 -        include_all = 1;
  12.468 -    }
  12.469 -
  12.470 -    if ( ret )
  12.471 -        xfree(dmaru);
  12.472 -    else
  12.473 -        acpi_register_drhd_unit(dmaru);
  12.474 -    return ret;
  12.475 -}
  12.476 -
  12.477 -static int __init
  12.478 -acpi_parse_one_rmrr(struct acpi_dmar_entry_header *header)
  12.479 -{
  12.480 -    struct acpi_table_rmrr *rmrr = (struct acpi_table_rmrr *)header;
  12.481 -    struct acpi_rmrr_unit *rmrru;
  12.482 -    void *dev_scope_start, *dev_scope_end;
  12.483 -    int ret = 0;
  12.484 -
  12.485 -    rmrru = xmalloc(struct acpi_rmrr_unit);
  12.486 -    if ( !rmrru )
  12.487 -        return -ENOMEM;
  12.488 -    memset(rmrru, 0, sizeof(struct acpi_rmrr_unit));
  12.489 -
  12.490 -    rmrru->base_address = rmrr->base_address;
  12.491 -    rmrru->end_address = rmrr->end_address;
  12.492 -    dev_scope_start = (void *)(rmrr + 1);
  12.493 -    dev_scope_end   = ((void *)rmrr) + header->length;
  12.494 -    ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
  12.495 -                               rmrru, RMRR_TYPE);
  12.496 -    if ( ret || (rmrru->devices_cnt == 0) )
  12.497 -        xfree(rmrru);
  12.498 -    else
  12.499 -        acpi_register_rmrr_unit(rmrru);
  12.500 -    return ret;
  12.501 -}
  12.502 -
  12.503 -static int __init
  12.504 -acpi_parse_one_atsr(struct acpi_dmar_entry_header *header)
  12.505 -{
  12.506 -    struct acpi_table_atsr *atsr = (struct acpi_table_atsr *)header;
  12.507 -    struct acpi_atsr_unit *atsru;
  12.508 -    int ret = 0;
  12.509 -    static int all_ports;
  12.510 -    void *dev_scope_start, *dev_scope_end;
  12.511 -
  12.512 -    atsru = xmalloc(struct acpi_atsr_unit);
  12.513 -    if ( !atsru )
  12.514 -        return -ENOMEM;
  12.515 -    memset(atsru, 0, sizeof(struct acpi_atsr_unit));
  12.516 -
  12.517 -    atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */
  12.518 -    if ( !atsru->all_ports )
  12.519 -    {
  12.520 -        dev_scope_start = (void *)(atsr + 1);
  12.521 -        dev_scope_end   = ((void *)atsr) + header->length;
  12.522 -        ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
  12.523 -                                   atsru, ATSR_TYPE);
  12.524 -    }
  12.525 -    else {
  12.526 -        dprintk(XENLOG_INFO VTDPREFIX, "found ALL_PORTS\n");
  12.527 -        /* Only allow one ALL_PORTS */
  12.528 -        if ( all_ports )
  12.529 -        {
  12.530 -            dprintk(XENLOG_WARNING VTDPREFIX,
  12.531 -                    "Only one ALL_PORTS device scope is allowed\n");
  12.532 -            ret = -EINVAL;
  12.533 -        }
  12.534 -        all_ports = 1;
  12.535 -    }
  12.536 -
  12.537 -    if ( ret )
  12.538 -        xfree(atsr);
  12.539 -    else
  12.540 -        acpi_register_atsr_unit(atsru);
  12.541 -    return ret;
  12.542 -}
  12.543 -
  12.544 -static int __init acpi_parse_dmar(unsigned long phys_addr,
  12.545 -                                  unsigned long size)
  12.546 -{
  12.547 -    struct acpi_table_dmar *dmar = NULL;
  12.548 -    struct acpi_dmar_entry_header *entry_header;
  12.549 -    int ret = 0;
  12.550 -
  12.551 -    if ( !phys_addr || !size )
  12.552 -        return -EINVAL;
  12.553 -
  12.554 -    dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size);
  12.555 -    if ( !dmar )
  12.556 -    {
  12.557 -        dprintk(XENLOG_WARNING VTDPREFIX, "Unable to map DMAR\n");
  12.558 -        return -ENODEV;
  12.559 -    }
  12.560 -
  12.561 -    if ( !dmar->haw )
  12.562 -    {
  12.563 -        dprintk(XENLOG_WARNING VTDPREFIX, "Zero: Invalid DMAR haw\n");
  12.564 -        return -EINVAL;
  12.565 -    }
  12.566 -
  12.567 -    dmar_host_address_width = dmar->haw;
  12.568 -    dprintk(XENLOG_INFO VTDPREFIX, "Host address width %d\n",
  12.569 -            dmar_host_address_width);
  12.570 -
  12.571 -    entry_header = (struct acpi_dmar_entry_header *)(dmar + 1);
  12.572 -    while ( ((unsigned long)entry_header) <
  12.573 -            (((unsigned long)dmar) + size) )
  12.574 -    {
  12.575 -        switch ( entry_header->type )
  12.576 -        {
  12.577 -        case ACPI_DMAR_DRHD:
  12.578 -            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_DRHD\n");
  12.579 -            ret = acpi_parse_one_drhd(entry_header);
  12.580 -            break;
  12.581 -        case ACPI_DMAR_RMRR:
  12.582 -            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_RMRR\n");
  12.583 -            ret = acpi_parse_one_rmrr(entry_header);
  12.584 -            break;
  12.585 -        case ACPI_DMAR_ATSR:
  12.586 -            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_ATSR\n");
  12.587 -            ret = acpi_parse_one_atsr(entry_header);
  12.588 -            break;
  12.589 -        default:
  12.590 -            dprintk(XENLOG_WARNING VTDPREFIX, "Unknown DMAR structure type\n");
  12.591 -            ret = -EINVAL;
  12.592 -            break;
  12.593 -        }
  12.594 -        if ( ret )
  12.595 -            break;
  12.596 -
  12.597 -        entry_header = ((void *)entry_header + entry_header->length);
  12.598 -    }
  12.599 -
  12.600 -    /* Zap APCI DMAR signature to prevent dom0 using vt-d HW. */
  12.601 -    dmar->header.signature[0] = '\0';
  12.602 -
  12.603 -    return ret;
  12.604 -}
  12.605 -
  12.606 -int acpi_dmar_init(void)
  12.607 -{
  12.608 -    int rc;
  12.609 -
  12.610 -    if ( !vtd_enabled )
  12.611 -        return -ENODEV;
  12.612 -
  12.613 -    if ( (rc = vtd_hw_check()) != 0 )
  12.614 -        return rc;
  12.615 -
  12.616 -    acpi_table_parse(ACPI_DMAR, acpi_parse_dmar);
  12.617 -
  12.618 -    if ( list_empty(&acpi_drhd_units) )
  12.619 -    {
  12.620 -        dprintk(XENLOG_ERR VTDPREFIX, "No DMAR devices found\n");
  12.621 -        vtd_enabled = 0;
  12.622 -        return -ENODEV;
  12.623 -    }
  12.624 -
  12.625 -    printk("Intel VT-d has been enabled\n");
  12.626 -
  12.627 -    return 0;
  12.628 -}
    13.1 --- a/xen/arch/x86/hvm/vmx/vtd/dmar.h	Thu Feb 21 14:50:27 2008 +0000
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,104 +0,0 @@
    13.4 -/*
    13.5 - * Copyright (c) 2006, Intel Corporation.
    13.6 - *
    13.7 - * This program is free software; you can redistribute it and/or modify it
    13.8 - * under the terms and conditions of the GNU General Public License,
    13.9 - * version 2, as published by the Free Software Foundation.
   13.10 - *
   13.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   13.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   13.14 - * more details.
   13.15 - *
   13.16 - * You should have received a copy of the GNU General Public License along with
   13.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   13.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   13.19 - *
   13.20 - * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   13.21 - * Copyright (C) Shaohua Li <shaohua.li@intel.com>
   13.22 - */
   13.23 -
   13.24 -#ifndef _DMAR_H_
   13.25 -#define _DMAR_H_
   13.26 -
   13.27 -#include <xen/list.h>
   13.28 -#include <asm/iommu.h>
   13.29 -
   13.30 -extern u8 dmar_host_address_width;
   13.31 -
   13.32 -/* This one is for interrupt remapping */
   13.33 -struct acpi_ioapic_unit {
   13.34 -    struct list_head list;
   13.35 -    int apic_id;
   13.36 -    union {
   13.37 -        u16 info;
   13.38 -        struct {
   13.39 -            u16 func: 3,
   13.40 -                dev:  5,
   13.41 -                bus:  8;
   13.42 -        }bdf;
   13.43 -    }ioapic;
   13.44 -};
   13.45 -
   13.46 -struct acpi_drhd_unit {
   13.47 -    struct list_head list;
   13.48 -    unsigned long    address; /* register base address of the unit */
   13.49 -    struct    pci_dev *devices; /* target devices */
   13.50 -    int    devices_cnt;
   13.51 -    u8    include_all:1;
   13.52 -    struct iommu *iommu;
   13.53 -    struct list_head ioapic_list;
   13.54 -};
   13.55 -
   13.56 -struct acpi_rmrr_unit {
   13.57 -    struct list_head list;
   13.58 -    unsigned long base_address;
   13.59 -    unsigned long end_address;
   13.60 -    struct pci_dev *devices; /* target devices */
   13.61 -    int    devices_cnt;
   13.62 -    u8    allow_all:1;
   13.63 -};
   13.64 -
   13.65 -struct acpi_atsr_unit {
   13.66 -    struct list_head list;
   13.67 -    struct    pci_dev *devices; /* target devices */
   13.68 -    int    devices_cnt;
   13.69 -    u8    all_ports:1;
   13.70 -};
   13.71 -
   13.72 -#define for_each_iommu(domain, iommu) \
   13.73 -    list_for_each_entry(iommu, \
   13.74 -        &(domain->arch.hvm_domain.hvm_iommu.iommu_list), list)
   13.75 -
   13.76 -#define for_each_pdev(domain, pdev) \
   13.77 -    list_for_each_entry(pdev, \
   13.78 -         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
   13.79 -
   13.80 -#define for_each_drhd_unit(drhd) \
   13.81 -    list_for_each_entry(drhd, &acpi_drhd_units, list)
   13.82 -#define for_each_rmrr_device(rmrr, pdev) \
   13.83 -    list_for_each_entry(rmrr, &acpi_rmrr_units, list) { \
   13.84 -        int _i; \
   13.85 -        for (_i = 0; _i < rmrr->devices_cnt; _i++) { \
   13.86 -            pdev = &(rmrr->devices[_i]);
   13.87 -#define end_for_each_rmrr_device(rmrr, pdev) \
   13.88 -        } \
   13.89 -    }
   13.90 -
   13.91 -struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev);
   13.92 -struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev);
   13.93 -
   13.94 -#define DMAR_TYPE 1
   13.95 -#define RMRR_TYPE 2
   13.96 -#define ATSR_TYPE 3
   13.97 -
   13.98 -#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
   13.99 -#define time_after(a,b)         \
  13.100 -        (typecheck(unsigned long, a) && \
  13.101 -         typecheck(unsigned long, b) && \
  13.102 -         ((long)(b) - (long)(a) < 0))
  13.103 -
  13.104 -int vtd_hw_check(void);
  13.105 -void disable_pmr(struct iommu *iommu);
  13.106 -
  13.107 -#endif // _DMAR_H_
    14.1 --- a/xen/arch/x86/hvm/vmx/vtd/extern.h	Thu Feb 21 14:50:27 2008 +0000
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,51 +0,0 @@
    14.4 -/*
    14.5 - * Copyright (c) 2006, Intel Corporation.
    14.6 - *
    14.7 - * This program is free software; you can redistribute it and/or modify it
    14.8 - * under the terms and conditions of the GNU General Public License,
    14.9 - * version 2, as published by the Free Software Foundation.
   14.10 - *
   14.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   14.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   14.14 - * more details.
   14.15 - *
   14.16 - * You should have received a copy of the GNU General Public License along with
   14.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   14.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   14.19 - *
   14.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   14.21 - * Copyright (C) Weidong Han <weidong.han@intel.com>
   14.22 - */
   14.23 -
   14.24 -#ifndef _VTD_EXTERN_H_
   14.25 -#define _VTD_EXTERN_H_
   14.26 -
   14.27 -#include "dmar.h"
   14.28 -
   14.29 -extern struct qi_ctrl *qi_ctrl;
   14.30 -extern struct ir_ctrl *ir_ctrl;
   14.31 -
   14.32 -void print_iommu_regs(struct acpi_drhd_unit *drhd);
   14.33 -void print_vtd_entries(struct domain *d, struct iommu *iommu,
   14.34 -                       int bus, int devfn, unsigned long gmfn);
   14.35 -void pdev_flr(u8 bus, u8 devfn);
   14.36 -
   14.37 -int qinval_setup(struct iommu *iommu);
   14.38 -int intremap_setup(struct iommu *iommu);
   14.39 -int queue_invalidate_context(struct iommu *iommu,
   14.40 -    u16 did, u16 source_id, u8 function_mask, u8 granu);
   14.41 -int queue_invalidate_iotlb(struct iommu *iommu,
   14.42 -    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr);
   14.43 -int queue_invalidate_iec(struct iommu *iommu,
   14.44 -    u8 granu, u8 im, u16 iidx);
   14.45 -int invalidate_sync(struct iommu *iommu);
   14.46 -int iommu_flush_iec_global(struct iommu *iommu);
   14.47 -int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx);
   14.48 -void print_iommu_regs(struct acpi_drhd_unit *drhd);
   14.49 -int vtd_hw_check(void);
   14.50 -struct iommu * ioapic_to_iommu(unsigned int apic_id);
   14.51 -struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
   14.52 -void clear_fault_bits(struct iommu *iommu);
   14.53 -
   14.54 -#endif // _VTD_EXTERN_H_
    15.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Thu Feb 21 14:50:27 2008 +0000
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,2178 +0,0 @@
    15.4 -/*
    15.5 - * Copyright (c) 2006, Intel Corporation.
    15.6 - *
    15.7 - * This program is free software; you can redistribute it and/or modify it
    15.8 - * under the terms and conditions of the GNU General Public License,
    15.9 - * version 2, as published by the Free Software Foundation.
   15.10 - *
   15.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   15.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   15.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   15.14 - * more details.
   15.15 - *
   15.16 - * You should have received a copy of the GNU General Public License along with
   15.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   15.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   15.19 - *
   15.20 - * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   15.21 - * Copyright (C) Shaohua Li <shaohua.li@intel.com>
   15.22 - * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
   15.23 - */
   15.24 -
   15.25 -#include <xen/init.h>
   15.26 -#include <xen/irq.h>
   15.27 -#include <xen/spinlock.h>
   15.28 -#include <xen/sched.h>
   15.29 -#include <xen/xmalloc.h>
   15.30 -#include <xen/domain_page.h>
   15.31 -#include <asm/delay.h>
   15.32 -#include <asm/string.h>
   15.33 -#include <asm/mm.h>
   15.34 -#include <asm/iommu.h>
   15.35 -#include <asm/hvm/vmx/intel-iommu.h>
   15.36 -#include "dmar.h"
   15.37 -#include "pci-direct.h"
   15.38 -#include "pci_regs.h"
   15.39 -#include "msi.h"
   15.40 -#include "extern.h"
   15.41 -
   15.42 -#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
   15.43 -
   15.44 -static spinlock_t domid_bitmap_lock;    /* protect domain id bitmap */
   15.45 -static int domid_bitmap_size;           /* domain id bitmap size in bit */
   15.46 -static void *domid_bitmap;              /* iommu domain id bitmap */
   15.47 -
   15.48 -#define DID_FIELD_WIDTH 16
   15.49 -#define DID_HIGH_OFFSET 8
   15.50 -static void context_set_domain_id(struct context_entry *context,
   15.51 -                                  struct domain *d)
   15.52 -{
   15.53 -    unsigned long flags;
   15.54 -    domid_t iommu_domid = domain_iommu_domid(d);
   15.55 -
   15.56 -    if ( iommu_domid == 0 )
   15.57 -    {
   15.58 -        spin_lock_irqsave(&domid_bitmap_lock, flags);
   15.59 -        iommu_domid = find_first_zero_bit(domid_bitmap, domid_bitmap_size);
   15.60 -        set_bit(iommu_domid, domid_bitmap);
   15.61 -        spin_unlock_irqrestore(&domid_bitmap_lock, flags);
   15.62 -        d->arch.hvm_domain.hvm_iommu.iommu_domid = iommu_domid;
   15.63 -    }
   15.64 -
   15.65 -    context->hi &= (1 << DID_HIGH_OFFSET) - 1;
   15.66 -    context->hi |= iommu_domid << DID_HIGH_OFFSET;
   15.67 -}
   15.68 -
   15.69 -static void iommu_domid_release(struct domain *d)
   15.70 -{
   15.71 -    domid_t iommu_domid = domain_iommu_domid(d);
   15.72 -
   15.73 -    if ( iommu_domid != 0 )
   15.74 -    {
   15.75 -        d->arch.hvm_domain.hvm_iommu.iommu_domid = 0;
   15.76 -        clear_bit(iommu_domid, domid_bitmap);
   15.77 -    }
   15.78 -}
   15.79 -
   15.80 -unsigned int x86_clflush_size;
   15.81 -void clflush_cache_range(void *adr, int size)
   15.82 -{
   15.83 -    int i;
   15.84 -    for ( i = 0; i < size; i += x86_clflush_size )
   15.85 -        clflush(adr + i);
   15.86 -}
   15.87 -
   15.88 -static void __iommu_flush_cache(struct iommu *iommu, void *addr, int size)
   15.89 -{
   15.90 -    if ( !ecap_coherent(iommu->ecap) )
   15.91 -        clflush_cache_range(addr, size);
   15.92 -}
   15.93 -
   15.94 -#define iommu_flush_cache_entry(iommu, addr) \
   15.95 -       __iommu_flush_cache(iommu, addr, 8)
   15.96 -#define iommu_flush_cache_page(iommu, addr) \
   15.97 -       __iommu_flush_cache(iommu, addr, PAGE_SIZE_4K)
   15.98 -
   15.99 -int nr_iommus;
  15.100 -/* context entry handling */
  15.101 -static struct context_entry * device_to_context_entry(struct iommu *iommu,
  15.102 -                                                      u8 bus, u8 devfn)
  15.103 -{
  15.104 -    struct root_entry *root;
  15.105 -    struct context_entry *context;
  15.106 -    unsigned long phy_addr;
  15.107 -    unsigned long flags;
  15.108 -
  15.109 -    spin_lock_irqsave(&iommu->lock, flags);
  15.110 -    root = &iommu->root_entry[bus];
  15.111 -    if ( !root_present(*root) )
  15.112 -    {
  15.113 -        phy_addr = (unsigned long) alloc_xenheap_page();
  15.114 -        if ( !phy_addr )
  15.115 -        {
  15.116 -            spin_unlock_irqrestore(&iommu->lock, flags);
  15.117 -            return NULL;
  15.118 -        }
  15.119 -        memset((void *) phy_addr, 0, PAGE_SIZE);
  15.120 -        iommu_flush_cache_page(iommu, (void *)phy_addr);
  15.121 -        phy_addr = virt_to_maddr((void *)phy_addr);
  15.122 -        set_root_value(*root, phy_addr);
  15.123 -        set_root_present(*root);
  15.124 -        iommu_flush_cache_entry(iommu, root);
  15.125 -    }
  15.126 -    phy_addr = (unsigned long) get_context_addr(*root);
  15.127 -    context = (struct context_entry *)maddr_to_virt(phy_addr);
  15.128 -    spin_unlock_irqrestore(&iommu->lock, flags);
  15.129 -    return &context[devfn];
  15.130 -}
  15.131 -
  15.132 -static int device_context_mapped(struct iommu *iommu, u8 bus, u8 devfn)
  15.133 -{
  15.134 -    struct root_entry *root;
  15.135 -    struct context_entry *context;
  15.136 -    unsigned long phy_addr;
  15.137 -    int ret;
  15.138 -    unsigned long flags;
  15.139 -
  15.140 -    spin_lock_irqsave(&iommu->lock, flags);
  15.141 -    root = &iommu->root_entry[bus];
  15.142 -    if ( !root_present(*root) )
  15.143 -    {
  15.144 -        ret = 0;
  15.145 -        goto out;
  15.146 -    }
  15.147 -    phy_addr = get_context_addr(*root);
  15.148 -    context = (struct context_entry *)maddr_to_virt(phy_addr);
  15.149 -    ret = context_present(context[devfn]);
  15.150 - out:
  15.151 -    spin_unlock_irqrestore(&iommu->lock, flags);
  15.152 -    return ret;
  15.153 -}
  15.154 -
  15.155 -static struct page_info *addr_to_dma_page(struct domain *domain, u64 addr)
  15.156 -{
  15.157 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  15.158 -    struct acpi_drhd_unit *drhd;
  15.159 -    struct iommu *iommu;
  15.160 -    int addr_width = agaw_to_width(hd->agaw);
  15.161 -    struct dma_pte *parent, *pte = NULL, *pgd;
  15.162 -    int level = agaw_to_level(hd->agaw);
  15.163 -    int offset;
  15.164 -    unsigned long flags;
  15.165 -    struct page_info *pg = NULL;
  15.166 -    u64 *vaddr = NULL;
  15.167 -
  15.168 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
  15.169 -    iommu = drhd->iommu;
  15.170 -
  15.171 -    addr &= (((u64)1) << addr_width) - 1;
  15.172 -    spin_lock_irqsave(&hd->mapping_lock, flags);
  15.173 -    if ( !hd->pgd )
  15.174 -    {
  15.175 -        pgd = (struct dma_pte *)alloc_xenheap_page();
  15.176 -        if ( !pgd )
  15.177 -        {
  15.178 -            spin_unlock_irqrestore(&hd->mapping_lock, flags);
  15.179 -            return NULL;
  15.180 -        }
  15.181 -        memset(pgd, 0, PAGE_SIZE);
  15.182 -        hd->pgd = pgd;
  15.183 -    }
  15.184 -
  15.185 -    parent = hd->pgd;
  15.186 -    while ( level > 1 )
  15.187 -    {
  15.188 -        offset = address_level_offset(addr, level);
  15.189 -        pte = &parent[offset];
  15.190 -
  15.191 -        if ( dma_pte_addr(*pte) == 0 )
  15.192 -        {
  15.193 -            pg = alloc_domheap_page(NULL);
  15.194 -            vaddr = map_domain_page(page_to_mfn(pg));
  15.195 -            if ( !vaddr )
  15.196 -            {
  15.197 -                spin_unlock_irqrestore(&hd->mapping_lock, flags);
  15.198 -                return NULL;
  15.199 -            }
  15.200 -            memset(vaddr, 0, PAGE_SIZE);
  15.201 -            iommu_flush_cache_page(iommu, vaddr);
  15.202 -
  15.203 -            dma_set_pte_addr(*pte, page_to_maddr(pg));
  15.204 -
  15.205 -            /*
  15.206 -             * high level table always sets r/w, last level
  15.207 -             * page table control read/write
  15.208 -             */
  15.209 -            dma_set_pte_readable(*pte);
  15.210 -            dma_set_pte_writable(*pte);
  15.211 -            iommu_flush_cache_entry(iommu, pte);
  15.212 -        }
  15.213 -        else
  15.214 -        {
  15.215 -            pg = maddr_to_page(pte->val);
  15.216 -            vaddr = map_domain_page(page_to_mfn(pg));
  15.217 -            if ( !vaddr )
  15.218 -            {
  15.219 -                spin_unlock_irqrestore(&hd->mapping_lock, flags);
  15.220 -                return NULL;
  15.221 -            }
  15.222 -        }
  15.223 -
  15.224 -        if ( parent != hd->pgd )
  15.225 -            unmap_domain_page(parent);
  15.226 -
  15.227 -        if ( level == 2 && vaddr )
  15.228 -        {
  15.229 -            unmap_domain_page(vaddr);
  15.230 -            break;
  15.231 -        }
  15.232 -
  15.233 -        parent = (struct dma_pte *)vaddr;
  15.234 -        vaddr = NULL;
  15.235 -        level--;
  15.236 -    }
  15.237 -
  15.238 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
  15.239 -    return pg;
  15.240 -}
  15.241 -
  15.242 -/* return address's page at specific level */
  15.243 -static struct page_info *dma_addr_level_page(struct domain *domain,
  15.244 -                                             u64 addr, int level)
  15.245 -{
  15.246 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  15.247 -    struct dma_pte *parent, *pte = NULL;
  15.248 -    int total = agaw_to_level(hd->agaw);
  15.249 -    int offset;
  15.250 -    struct page_info *pg = NULL;
  15.251 -
  15.252 -    parent = hd->pgd;
  15.253 -    while ( level <= total )
  15.254 -    {
  15.255 -        offset = address_level_offset(addr, total);
  15.256 -        pte = &parent[offset];
  15.257 -        if ( dma_pte_addr(*pte) == 0 )
  15.258 -        {
  15.259 -            if ( parent != hd->pgd )
  15.260 -                unmap_domain_page(parent);
  15.261 -            break;
  15.262 -        }
  15.263 -
  15.264 -        pg = maddr_to_page(pte->val);
  15.265 -        if ( parent != hd->pgd )
  15.266 -            unmap_domain_page(parent);
  15.267 -
  15.268 -        if ( level == total )
  15.269 -            return pg;
  15.270 -
  15.271 -        parent = map_domain_page(page_to_mfn(pg));
  15.272 -        total--;
  15.273 -    }
  15.274 -
  15.275 -    return NULL;
  15.276 -}
  15.277 -
  15.278 -static void iommu_flush_write_buffer(struct iommu *iommu)
  15.279 -{
  15.280 -    u32 val;
  15.281 -    unsigned long flag;
  15.282 -    unsigned long start_time;
  15.283 -
  15.284 -    if ( !cap_rwbf(iommu->cap) )
  15.285 -        return;
  15.286 -    val = iommu->gcmd | DMA_GCMD_WBF;
  15.287 -
  15.288 -    spin_lock_irqsave(&iommu->register_lock, flag);
  15.289 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
  15.290 -
  15.291 -    /* Make sure hardware complete it */
  15.292 -    start_time = jiffies;
  15.293 -    for ( ; ; )
  15.294 -    {
  15.295 -        val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
  15.296 -        if ( !(val & DMA_GSTS_WBFS) )
  15.297 -            break;
  15.298 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  15.299 -            panic("DMAR hardware is malfunctional,"
  15.300 -                  " please disable IOMMU\n");
  15.301 -        cpu_relax();
  15.302 -    }
  15.303 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
  15.304 -}
  15.305 -
  15.306 -/* return value determine if we need a write buffer flush */
  15.307 -static int flush_context_reg(
  15.308 -    void *_iommu,
  15.309 -    u16 did, u16 source_id, u8 function_mask, u64 type,
  15.310 -    int non_present_entry_flush)
  15.311 -{
  15.312 -    struct iommu *iommu = (struct iommu *) _iommu;
  15.313 -    u64 val = 0;
  15.314 -    unsigned long flag;
  15.315 -    unsigned long start_time;
  15.316 -
  15.317 -    /*
  15.318 -     * In the non-present entry flush case, if hardware doesn't cache
  15.319 -     * non-present entry we do nothing and if hardware cache non-present
  15.320 -     * entry, we flush entries of domain 0 (the domain id is used to cache
  15.321 -     * any non-present entries)
  15.322 -     */
  15.323 -    if ( non_present_entry_flush )
  15.324 -    {
  15.325 -        if ( !cap_caching_mode(iommu->cap) )
  15.326 -            return 1;
  15.327 -        else
  15.328 -            did = 0;
  15.329 -    }
  15.330 -
  15.331 -    /* use register invalidation */
  15.332 -    switch ( type )
  15.333 -    {
  15.334 -    case DMA_CCMD_GLOBAL_INVL:
  15.335 -        val = DMA_CCMD_GLOBAL_INVL;
  15.336 -        break;
  15.337 -    case DMA_CCMD_DOMAIN_INVL:
  15.338 -        val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
  15.339 -        break;
  15.340 -    case DMA_CCMD_DEVICE_INVL:
  15.341 -        val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
  15.342 -            |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);
  15.343 -        break;
  15.344 -    default:
  15.345 -        BUG();
  15.346 -    }
  15.347 -    val |= DMA_CCMD_ICC;
  15.348 -
  15.349 -    spin_lock_irqsave(&iommu->register_lock, flag);
  15.350 -    dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
  15.351 -
  15.352 -    /* Make sure hardware complete it */
  15.353 -    start_time = jiffies;
  15.354 -    for ( ; ; )
  15.355 -    {
  15.356 -        val = dmar_readq(iommu->reg, DMAR_CCMD_REG);
  15.357 -        if ( !(val & DMA_CCMD_ICC) )
  15.358 -            break;
  15.359 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  15.360 -            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
  15.361 -        cpu_relax();
  15.362 -    }
  15.363 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
  15.364 -    /* flush context entry will implictly flush write buffer */
  15.365 -    return 0;
  15.366 -}
  15.367 -
  15.368 -static int inline iommu_flush_context_global(
  15.369 -    struct iommu *iommu, int non_present_entry_flush)
  15.370 -{
  15.371 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.372 -    return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
  15.373 -                                 non_present_entry_flush);
  15.374 -}
  15.375 -
  15.376 -static int inline iommu_flush_context_domain(
  15.377 -    struct iommu *iommu, u16 did, int non_present_entry_flush)
  15.378 -{
  15.379 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.380 -    return flush->context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
  15.381 -                                 non_present_entry_flush);
  15.382 -}
  15.383 -
  15.384 -static int inline iommu_flush_context_device(
  15.385 -    struct iommu *iommu, u16 did, u16 source_id,
  15.386 -    u8 function_mask, int non_present_entry_flush)
  15.387 -{
  15.388 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.389 -    return flush->context(iommu, did, source_id, function_mask,
  15.390 -                                 DMA_CCMD_DEVICE_INVL,
  15.391 -                                 non_present_entry_flush);
  15.392 -}
  15.393 -
  15.394 -/* return value determine if we need a write buffer flush */
  15.395 -static int flush_iotlb_reg(void *_iommu, u16 did,
  15.396 -                               u64 addr, unsigned int size_order, u64 type,
  15.397 -                               int non_present_entry_flush)
  15.398 -{
  15.399 -    struct iommu *iommu = (struct iommu *) _iommu;
  15.400 -    int tlb_offset = ecap_iotlb_offset(iommu->ecap);
  15.401 -    u64 val = 0, val_iva = 0;
  15.402 -    unsigned long flag;
  15.403 -    unsigned long start_time;
  15.404 -
  15.405 -    /*
  15.406 -     * In the non-present entry flush case, if hardware doesn't cache
  15.407 -     * non-present entry we do nothing and if hardware cache non-present
  15.408 -     * entry, we flush entries of domain 0 (the domain id is used to cache
  15.409 -     * any non-present entries)
  15.410 -     */
  15.411 -    if ( non_present_entry_flush )
  15.412 -    {
  15.413 -        if ( !cap_caching_mode(iommu->cap) )
  15.414 -            return 1;
  15.415 -        else
  15.416 -            did = 0;
  15.417 -    }
  15.418 -
  15.419 -    /* use register invalidation */
  15.420 -    switch ( type )
  15.421 -    {
  15.422 -    case DMA_TLB_GLOBAL_FLUSH:
  15.423 -        /* global flush doesn't need set IVA_REG */
  15.424 -        val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
  15.425 -        break;
  15.426 -    case DMA_TLB_DSI_FLUSH:
  15.427 -        val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
  15.428 -        break;
  15.429 -    case DMA_TLB_PSI_FLUSH:
  15.430 -        val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
  15.431 -        /* Note: always flush non-leaf currently */
  15.432 -        val_iva = size_order | addr;
  15.433 -        break;
  15.434 -    default:
  15.435 -        BUG();
  15.436 -    }
  15.437 -    /* Note: set drain read/write */
  15.438 -    if ( cap_read_drain(iommu->cap) )
  15.439 -        val |= DMA_TLB_READ_DRAIN;
  15.440 -    if ( cap_write_drain(iommu->cap) )
  15.441 -        val |= DMA_TLB_WRITE_DRAIN;
  15.442 -
  15.443 -    spin_lock_irqsave(&iommu->register_lock, flag);
  15.444 -    /* Note: Only uses first TLB reg currently */
  15.445 -    if ( val_iva )
  15.446 -        dmar_writeq(iommu->reg, tlb_offset, val_iva);
  15.447 -    dmar_writeq(iommu->reg, tlb_offset + 8, val);
  15.448 -
  15.449 -    /* Make sure hardware complete it */
  15.450 -    start_time = jiffies;
  15.451 -    for ( ; ; )
  15.452 -    {
  15.453 -        val = dmar_readq(iommu->reg, tlb_offset + 8);
  15.454 -        if ( !(val & DMA_TLB_IVT) )
  15.455 -            break;
  15.456 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  15.457 -            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
  15.458 -        cpu_relax();
  15.459 -    }
  15.460 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
  15.461 -
  15.462 -    /* check IOTLB invalidation granularity */
  15.463 -    if ( DMA_TLB_IAIG(val) == 0 )
  15.464 -        printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
  15.465 -    if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
  15.466 -        printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
  15.467 -               (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
  15.468 -    /* flush context entry will implictly flush write buffer */
  15.469 -    return 0;
  15.470 -}
  15.471 -
  15.472 -static int inline iommu_flush_iotlb_global(struct iommu *iommu,
  15.473 -                                           int non_present_entry_flush)
  15.474 -{
  15.475 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.476 -    return flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
  15.477 -                               non_present_entry_flush);
  15.478 -}
  15.479 -
  15.480 -static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
  15.481 -                                        int non_present_entry_flush)
  15.482 -{
  15.483 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.484 -    return flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
  15.485 -                               non_present_entry_flush);
  15.486 -}
  15.487 -
  15.488 -static int inline get_alignment(u64 base, unsigned int size)
  15.489 -{
  15.490 -    int t = 0;
  15.491 -    u64 end;
  15.492 -
  15.493 -    end = base + size - 1;
  15.494 -    while ( base != end )
  15.495 -    {
  15.496 -        t++;
  15.497 -        base >>= 1;
  15.498 -        end >>= 1;
  15.499 -    }
  15.500 -    return t;
  15.501 -}
  15.502 -
  15.503 -static int inline iommu_flush_iotlb_psi(
  15.504 -    struct iommu *iommu, u16 did,
  15.505 -    u64 addr, unsigned int pages, int non_present_entry_flush)
  15.506 -{
  15.507 -    unsigned int align;
  15.508 -    struct iommu_flush *flush = iommu_get_flush(iommu);
  15.509 -
  15.510 -    BUG_ON(addr & (~PAGE_MASK_4K));
  15.511 -    BUG_ON(pages == 0);
  15.512 -
  15.513 -    /* Fallback to domain selective flush if no PSI support */
  15.514 -    if ( !cap_pgsel_inv(iommu->cap) )
  15.515 -        return iommu_flush_iotlb_dsi(iommu, did,
  15.516 -                                     non_present_entry_flush);
  15.517 -
  15.518 -    /*
  15.519 -     * PSI requires page size is 2 ^ x, and the base address is naturally
  15.520 -     * aligned to the size
  15.521 -     */
  15.522 -    align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
  15.523 -    /* Fallback to domain selective flush if size is too big */
  15.524 -    if ( align > cap_max_amask_val(iommu->cap) )
  15.525 -        return iommu_flush_iotlb_dsi(iommu, did,
  15.526 -                                     non_present_entry_flush);
  15.527 -
  15.528 -    addr >>= PAGE_SHIFT_4K + align;
  15.529 -    addr <<= PAGE_SHIFT_4K + align;
  15.530 -
  15.531 -    return flush->iotlb(iommu, did, addr, align,
  15.532 -                               DMA_TLB_PSI_FLUSH, non_present_entry_flush);
  15.533 -}
  15.534 -
  15.535 -void iommu_flush_all(void)
  15.536 -{
  15.537 -    struct acpi_drhd_unit *drhd;
  15.538 -    struct iommu *iommu;
  15.539 -
  15.540 -    wbinvd();
  15.541 -    for_each_drhd_unit ( drhd )
  15.542 -    {
  15.543 -        iommu = drhd->iommu;
  15.544 -        iommu_flush_context_global(iommu, 0);
  15.545 -        iommu_flush_iotlb_global(iommu, 0);
  15.546 -    }
  15.547 -}
  15.548 -
  15.549 -/* clear one page's page table */
  15.550 -static void dma_pte_clear_one(struct domain *domain, u64 addr)
  15.551 -{
  15.552 -    struct acpi_drhd_unit *drhd;
  15.553 -    struct iommu *iommu;
  15.554 -    struct dma_pte *pte = NULL;
  15.555 -    struct page_info *pg = NULL;
  15.556 -
  15.557 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
  15.558 -
  15.559 -    /* get last level pte */
  15.560 -    pg = dma_addr_level_page(domain, addr, 1);
  15.561 -    if ( !pg )
  15.562 -        return;
  15.563 -    pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
  15.564 -    pte += address_level_offset(addr, 1);
  15.565 -    if ( pte )
  15.566 -    {
  15.567 -        dma_clear_pte(*pte);
  15.568 -        iommu_flush_cache_entry(drhd->iommu, pte);
  15.569 -
  15.570 -        for_each_drhd_unit ( drhd )
  15.571 -        {
  15.572 -            iommu = drhd->iommu;
  15.573 -            if ( cap_caching_mode(iommu->cap) )
  15.574 -                iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
  15.575 -                                      addr, 1, 0);
  15.576 -            else if (cap_rwbf(iommu->cap))
  15.577 -                iommu_flush_write_buffer(iommu);
  15.578 -        }
  15.579 -    }
  15.580 -    unmap_domain_page(pte);
  15.581 -}
  15.582 -
  15.583 -/* clear last level pte, a tlb flush should be followed */
  15.584 -static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end)
  15.585 -{
  15.586 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  15.587 -    int addr_width = agaw_to_width(hd->agaw);
  15.588 -
  15.589 -    start &= (((u64)1) << addr_width) - 1;
  15.590 -    end &= (((u64)1) << addr_width) - 1;
  15.591 -    /* in case it's partial page */
  15.592 -    start = PAGE_ALIGN_4K(start);
  15.593 -    end &= PAGE_MASK_4K;
  15.594 -
  15.595 -    /* we don't need lock here, nobody else touches the iova range */
  15.596 -    while ( start < end )
  15.597 -    {
  15.598 -        dma_pte_clear_one(domain, start);
  15.599 -        start += PAGE_SIZE_4K;
  15.600 -    }
  15.601 -}
  15.602 -
  15.603 -/* free page table pages. last level pte should already be cleared */
  15.604 -void dma_pte_free_pagetable(struct domain *domain, u64 start, u64 end)
  15.605 -{
  15.606 -    struct acpi_drhd_unit *drhd;
  15.607 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  15.608 -    struct iommu *iommu;
  15.609 -    int addr_width = agaw_to_width(hd->agaw);
  15.610 -    struct dma_pte *pte;
  15.611 -    int total = agaw_to_level(hd->agaw);
  15.612 -    int level;
  15.613 -    u32 tmp;
  15.614 -    struct page_info *pg = NULL;
  15.615 -
  15.616 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
  15.617 -    iommu = drhd->iommu;
  15.618 -
  15.619 -    start &= (((u64)1) << addr_width) - 1;
  15.620 -    end &= (((u64)1) << addr_width) - 1;
  15.621 -
  15.622 -    /* we don't need lock here, nobody else touches the iova range */
  15.623 -    level = 2;
  15.624 -    while ( level <= total )
  15.625 -    {
  15.626 -        tmp = align_to_level(start, level);
  15.627 -        if ( (tmp >= end) || ((tmp + level_size(level)) > end) )
  15.628 -            return;
  15.629 -
  15.630 -        while ( tmp < end )
  15.631 -        {
  15.632 -            pg = dma_addr_level_page(domain, tmp, level);
  15.633 -            if ( !pg )
  15.634 -                return;
  15.635 -            pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
  15.636 -            pte += address_level_offset(tmp, level);
  15.637 -            dma_clear_pte(*pte);
  15.638 -            iommu_flush_cache_entry(iommu, pte);
  15.639 -            unmap_domain_page(pte);
  15.640 -            free_domheap_page(pg);
  15.641 -
  15.642 -            tmp += level_size(level);
  15.643 -        }
  15.644 -        level++;
  15.645 -    }
  15.646 -
  15.647 -    /* free pgd */
  15.648 -    if ( start == 0 && end == ((((u64)1) << addr_width) - 1) )
  15.649 -    {
  15.650 -        free_xenheap_page((void *)hd->pgd);
  15.651 -        hd->pgd = NULL;
  15.652 -    }
  15.653 -}
  15.654 -
  15.655 -/* iommu handling */
  15.656 -static int iommu_set_root_entry(struct iommu *iommu)
  15.657 -{
  15.658 -    void *addr;
  15.659 -    u32 cmd, sts;
  15.660 -    struct root_entry *root;
  15.661 -    unsigned long flags;
  15.662 -
  15.663 -    if ( iommu == NULL )
  15.664 -    {
  15.665 -        gdprintk(XENLOG_ERR VTDPREFIX,
  15.666 -                 "iommu_set_root_entry: iommu == NULL\n");
  15.667 -        return -EINVAL;
  15.668 -    }
  15.669 -
  15.670 -    if ( unlikely(!iommu->root_entry) )
  15.671 -    {
  15.672 -        root = (struct root_entry *)alloc_xenheap_page();
  15.673 -        if ( root == NULL )
  15.674 -            return -ENOMEM;
  15.675 -
  15.676 -        memset((u8*)root, 0, PAGE_SIZE);
  15.677 -        iommu_flush_cache_page(iommu, root);
  15.678 -
  15.679 -        if ( cmpxchg((unsigned long *)&iommu->root_entry,
  15.680 -                     0, (unsigned long)root) != 0 )
  15.681 -            free_xenheap_page((void *)root);
  15.682 -    }
  15.683 -
  15.684 -    addr = iommu->root_entry;
  15.685 -
  15.686 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.687 -
  15.688 -    dmar_writeq(iommu->reg, DMAR_RTADDR_REG, virt_to_maddr(addr));
  15.689 -    cmd = iommu->gcmd | DMA_GCMD_SRTP;
  15.690 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
  15.691 -
  15.692 -    /* Make sure hardware complete it */
  15.693 -    for ( ; ; )
  15.694 -    {
  15.695 -        sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
  15.696 -        if ( sts & DMA_GSTS_RTPS )
  15.697 -            break;
  15.698 -        cpu_relax();
  15.699 -    }
  15.700 -
  15.701 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.702 -
  15.703 -    return 0;
  15.704 -}
  15.705 -
  15.706 -static int iommu_enable_translation(struct iommu *iommu)
  15.707 -{
  15.708 -    u32 sts;
  15.709 -    unsigned long flags;
  15.710 -
  15.711 -    dprintk(XENLOG_INFO VTDPREFIX,
  15.712 -            "iommu_enable_translation: iommu->reg = %p\n", iommu->reg);
  15.713 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.714 -    iommu->gcmd |= DMA_GCMD_TE;
  15.715 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  15.716 -    /* Make sure hardware complete it */
  15.717 -    for ( ; ; )
  15.718 -    {
  15.719 -        sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
  15.720 -        if ( sts & DMA_GSTS_TES )
  15.721 -            break;
  15.722 -        cpu_relax();
  15.723 -    }
  15.724 -
  15.725 -    /* Disable PMRs when VT-d engine takes effect per spec definition */
  15.726 -    disable_pmr(iommu);
  15.727 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.728 -    return 0;
  15.729 -}
  15.730 -
  15.731 -int iommu_disable_translation(struct iommu *iommu)
  15.732 -{
  15.733 -    u32 sts;
  15.734 -    unsigned long flags;
  15.735 -
  15.736 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.737 -    iommu->gcmd &= ~ DMA_GCMD_TE;
  15.738 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  15.739 -
  15.740 -    /* Make sure hardware complete it */
  15.741 -    for ( ; ; )
  15.742 -    {
  15.743 -        sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
  15.744 -        if ( !(sts & DMA_GSTS_TES) )
  15.745 -            break;
  15.746 -        cpu_relax();
  15.747 -    }
  15.748 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.749 -    return 0;
  15.750 -}
  15.751 -
  15.752 -static struct iommu *vector_to_iommu[NR_VECTORS];
  15.753 -static int iommu_page_fault_do_one(struct iommu *iommu, int type,
  15.754 -                                   u8 fault_reason, u16 source_id, u32 addr)
  15.755 -{
  15.756 -    dprintk(XENLOG_WARNING VTDPREFIX,
  15.757 -            "iommu_fault:%s: %x:%x.%x addr %x REASON %x iommu->reg = %p\n",
  15.758 -            (type ? "DMA Read" : "DMA Write"), (source_id >> 8),
  15.759 -            PCI_SLOT(source_id & 0xFF), PCI_FUNC(source_id & 0xFF), addr,
  15.760 -            fault_reason, iommu->reg);
  15.761 -
  15.762 -    if (fault_reason < 0x20) 
  15.763 -        print_vtd_entries(current->domain, iommu, (source_id >> 8),
  15.764 -                          (source_id & 0xff), (addr >> PAGE_SHIFT)); 
  15.765 -
  15.766 -    return 0;
  15.767 -}
  15.768 -
  15.769 -static void iommu_fault_status(u32 fault_status)
  15.770 -{
  15.771 -    if (fault_status & DMA_FSTS_PFO)
  15.772 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.773 -            "iommu_fault_status: Fault Overflow\n");
  15.774 -    else
  15.775 -    if (fault_status & DMA_FSTS_PPF)
  15.776 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.777 -            "iommu_fault_status: Primary Pending Fault\n");
  15.778 -    else
  15.779 -    if (fault_status & DMA_FSTS_AFO)
  15.780 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.781 -            "iommu_fault_status: Advanced Fault Overflow\n");
  15.782 -    else
  15.783 -    if (fault_status & DMA_FSTS_APF)
  15.784 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.785 -            "iommu_fault_status: Advanced Pending Fault\n");
  15.786 -    else
  15.787 -    if (fault_status & DMA_FSTS_IQE)
  15.788 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.789 -            "iommu_fault_status: Invalidation Queue Error\n");
  15.790 -    else
  15.791 -    if (fault_status & DMA_FSTS_ICE)
  15.792 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.793 -            "iommu_fault_status: Invalidation Completion Error\n");
  15.794 -    else
  15.795 -    if (fault_status & DMA_FSTS_ITE)
  15.796 -        dprintk(XENLOG_ERR VTDPREFIX,
  15.797 -            "iommu_fault_status: Invalidation Time-out Error\n");
  15.798 -}
  15.799 -
  15.800 -#define PRIMARY_FAULT_REG_LEN (16)
  15.801 -static void iommu_page_fault(int vector, void *dev_id,
  15.802 -                             struct cpu_user_regs *regs)
  15.803 -{
  15.804 -    struct iommu *iommu = dev_id;
  15.805 -    int reg, fault_index;
  15.806 -    u32 fault_status;
  15.807 -    unsigned long flags;
  15.808 -
  15.809 -    dprintk(XENLOG_WARNING VTDPREFIX,
  15.810 -            "iommu_page_fault: iommu->reg = %p\n", iommu->reg);
  15.811 -
  15.812 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.813 -    fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG);
  15.814 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.815 -
  15.816 -    iommu_fault_status(fault_status);
  15.817 -
  15.818 -    /* FIXME: ignore advanced fault log */
  15.819 -    if ( !(fault_status & DMA_FSTS_PPF) )
  15.820 -        return;
  15.821 -    fault_index = dma_fsts_fault_record_index(fault_status);
  15.822 -    reg = cap_fault_reg_offset(iommu->cap);
  15.823 -    for ( ; ; )
  15.824 -    {
  15.825 -        u8 fault_reason;
  15.826 -        u16 source_id;
  15.827 -        u32 guest_addr, data;
  15.828 -        int type;
  15.829 -
  15.830 -        /* highest 32 bits */
  15.831 -        spin_lock_irqsave(&iommu->register_lock, flags);
  15.832 -        data = dmar_readl(iommu->reg, reg +
  15.833 -                          fault_index * PRIMARY_FAULT_REG_LEN + 12);
  15.834 -        if ( !(data & DMA_FRCD_F) )
  15.835 -        {
  15.836 -            spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.837 -            break;
  15.838 -        }
  15.839 -
  15.840 -        fault_reason = dma_frcd_fault_reason(data);
  15.841 -        type = dma_frcd_type(data);
  15.842 -
  15.843 -        data = dmar_readl(iommu->reg, reg +
  15.844 -                          fault_index * PRIMARY_FAULT_REG_LEN + 8);
  15.845 -        source_id = dma_frcd_source_id(data);
  15.846 -
  15.847 -        guest_addr = dmar_readq(iommu->reg, reg +
  15.848 -                                fault_index * PRIMARY_FAULT_REG_LEN);
  15.849 -        guest_addr = dma_frcd_page_addr(guest_addr);
  15.850 -        /* clear the fault */
  15.851 -        dmar_writel(iommu->reg, reg +
  15.852 -                    fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F);
  15.853 -        spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.854 -
  15.855 -        iommu_page_fault_do_one(iommu, type, fault_reason,
  15.856 -                                source_id, guest_addr);
  15.857 -
  15.858 -        fault_index++;
  15.859 -        if ( fault_index > cap_num_fault_regs(iommu->cap) )
  15.860 -            fault_index = 0;
  15.861 -    }
  15.862 -
  15.863 -    /* clear primary fault overflow */
  15.864 -    if ( fault_status & DMA_FSTS_PFO )
  15.865 -    {
  15.866 -        spin_lock_irqsave(&iommu->register_lock, flags);
  15.867 -        dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
  15.868 -        spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.869 -    }
  15.870 -}
  15.871 -
  15.872 -static void dma_msi_unmask(unsigned int vector)
  15.873 -{
  15.874 -    struct iommu *iommu = vector_to_iommu[vector];
  15.875 -    unsigned long flags;
  15.876 -
  15.877 -    /* unmask it */
  15.878 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.879 -    dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
  15.880 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.881 -}
  15.882 -
  15.883 -static void dma_msi_mask(unsigned int vector)
  15.884 -{
  15.885 -    unsigned long flags;
  15.886 -    struct iommu *iommu = vector_to_iommu[vector];
  15.887 -
  15.888 -    /* mask it */
  15.889 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.890 -    dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
  15.891 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.892 -}
  15.893 -
  15.894 -static unsigned int dma_msi_startup(unsigned int vector)
  15.895 -{
  15.896 -    dma_msi_unmask(vector);
  15.897 -    return 0;
  15.898 -}
  15.899 -
  15.900 -static void dma_msi_end(unsigned int vector)
  15.901 -{
  15.902 -    dma_msi_unmask(vector);
  15.903 -    ack_APIC_irq();
  15.904 -}
  15.905 -
  15.906 -static void dma_msi_data_init(struct iommu *iommu, int vector)
  15.907 -{
  15.908 -    u32 msi_data = 0;
  15.909 -    unsigned long flags;
  15.910 -
  15.911 -    /* Fixed, edge, assert mode. Follow MSI setting */
  15.912 -    msi_data |= vector & 0xff;
  15.913 -    msi_data |= 1 << 14;
  15.914 -
  15.915 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.916 -    dmar_writel(iommu->reg, DMAR_FEDATA_REG, msi_data);
  15.917 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.918 -}
  15.919 -
  15.920 -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
  15.921 -{
  15.922 -    u64 msi_address;
  15.923 -    unsigned long flags;
  15.924 -
  15.925 -    /* Physical, dedicated cpu. Follow MSI setting */
  15.926 -    msi_address = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
  15.927 -    msi_address |= MSI_PHYSICAL_MODE << 2;
  15.928 -    msi_address |= MSI_REDIRECTION_HINT_MODE << 3;
  15.929 -    msi_address |= phy_cpu << MSI_TARGET_CPU_SHIFT;
  15.930 -
  15.931 -    spin_lock_irqsave(&iommu->register_lock, flags);
  15.932 -    dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32)msi_address);
  15.933 -    dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
  15.934 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  15.935 -}
  15.936 -
  15.937 -static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
  15.938 -{
  15.939 -    struct iommu *iommu = vector_to_iommu[vector];
  15.940 -    dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
  15.941 -}
  15.942 -
  15.943 -static struct hw_interrupt_type dma_msi_type = {
  15.944 -    .typename = "DMA_MSI",
  15.945 -    .startup = dma_msi_startup,
  15.946 -    .shutdown = dma_msi_mask,
  15.947 -    .enable = dma_msi_unmask,
  15.948 -    .disable = dma_msi_mask,
  15.949 -    .ack = dma_msi_mask,
  15.950 -    .end = dma_msi_end,
  15.951 -    .set_affinity = dma_msi_set_affinity,
  15.952 -};
  15.953 -
  15.954 -int iommu_set_interrupt(struct iommu *iommu)
  15.955 -{
  15.956 -    int vector, ret;
  15.957 -
  15.958 -    vector = assign_irq_vector(AUTO_ASSIGN);
  15.959 -    vector_to_iommu[vector] = iommu;
  15.960 -
  15.961 -    /* VT-d fault is a MSI, make irq == vector */
  15.962 -    irq_vector[vector] = vector;
  15.963 -    vector_irq[vector] = vector;
  15.964 -
  15.965 -    if ( !vector )
  15.966 -    {
  15.967 -        gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
  15.968 -        return -EINVAL;
  15.969 -    }
  15.970 -
  15.971 -    irq_desc[vector].handler = &dma_msi_type;
  15.972 -    ret = request_irq(vector, iommu_page_fault, 0, "dmar", iommu);
  15.973 -    if ( ret )
  15.974 -        gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
  15.975 -    return vector;
  15.976 -}
  15.977 -
  15.978 -struct iommu *iommu_alloc(void *hw_data)
  15.979 -{
  15.980 -    struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
  15.981 -    struct iommu *iommu;
  15.982 -    struct qi_ctrl *qi_ctrl;
  15.983 -    struct ir_ctrl *ir_ctrl;
  15.984 -
  15.985 -    if ( nr_iommus > MAX_IOMMUS )
  15.986 -    {
  15.987 -        gdprintk(XENLOG_ERR VTDPREFIX,
  15.988 -                 "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus);
  15.989 -        return NULL;
  15.990 -    }
  15.991 -
  15.992 -    iommu = xmalloc(struct iommu);
  15.993 -    if ( !iommu )
  15.994 -        return NULL;
  15.995 -    memset(iommu, 0, sizeof(struct iommu));
  15.996 -
  15.997 -    set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
  15.998 -    iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
  15.999 -
 15.1000 -    printk("iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
 15.1001 -           iommu->reg, drhd->address);
 15.1002 -
 15.1003 -    nr_iommus++;
 15.1004 -
 15.1005 -    if ( !iommu->reg )
 15.1006 -    {
 15.1007 -        printk(KERN_ERR VTDPREFIX "IOMMU: can't mapping the region\n");
 15.1008 -        goto error;
 15.1009 -    }
 15.1010 -
 15.1011 -    iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
 15.1012 -    iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
 15.1013 -
 15.1014 -    printk("iommu_alloc: cap = %"PRIx64"\n",iommu->cap);
 15.1015 -    printk("iommu_alloc: ecap = %"PRIx64"\n", iommu->ecap);
 15.1016 -
 15.1017 -    spin_lock_init(&iommu->lock);
 15.1018 -    spin_lock_init(&iommu->register_lock);
 15.1019 -
 15.1020 -    qi_ctrl = iommu_qi_ctrl(iommu);
 15.1021 -    spin_lock_init(&qi_ctrl->qinval_lock);
 15.1022 -    spin_lock_init(&qi_ctrl->qinval_poll_lock);
 15.1023 -
 15.1024 -    ir_ctrl = iommu_ir_ctrl(iommu);
 15.1025 -    spin_lock_init(&ir_ctrl->iremap_lock);
 15.1026 -
 15.1027 -    drhd->iommu = iommu;
 15.1028 -    return iommu;
 15.1029 - error:
 15.1030 -    xfree(iommu);
 15.1031 -    return NULL;
 15.1032 -}
 15.1033 -
 15.1034 -static void free_iommu(struct iommu *iommu)
 15.1035 -{
 15.1036 -    if ( !iommu )
 15.1037 -        return;
 15.1038 -    if ( iommu->root_entry )
 15.1039 -        free_xenheap_page((void *)iommu->root_entry);
 15.1040 -    if ( iommu->reg )
 15.1041 -        iounmap(iommu->reg);
 15.1042 -    free_irq(iommu->vector);
 15.1043 -    xfree(iommu);
 15.1044 -}
 15.1045 -
 15.1046 -#define guestwidth_to_adjustwidth(gaw) ({       \
 15.1047 -    int agaw, r = (gaw - 12) % 9;               \
 15.1048 -    agaw = (r == 0) ? gaw : (gaw + 9 - r);      \
 15.1049 -    if ( agaw > 64 )                            \
 15.1050 -        agaw = 64;                              \
 15.1051 -    agaw; })
 15.1052 -
 15.1053 -int intel_iommu_domain_init(struct domain *domain)
 15.1054 -{
 15.1055 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
 15.1056 -    struct iommu *iommu = NULL;
 15.1057 -    int guest_width = DEFAULT_DOMAIN_ADDRESS_WIDTH;
 15.1058 -    int adjust_width, agaw;
 15.1059 -    unsigned long sagaw;
 15.1060 -    struct acpi_drhd_unit *drhd;
 15.1061 -
 15.1062 -    if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
 15.1063 -        return 0;
 15.1064 -
 15.1065 -    for_each_drhd_unit ( drhd )
 15.1066 -        iommu = drhd->iommu ? : iommu_alloc(drhd);
 15.1067 -
 15.1068 -    /* calculate AGAW */
 15.1069 -    if (guest_width > cap_mgaw(iommu->cap))
 15.1070 -        guest_width = cap_mgaw(iommu->cap);
 15.1071 -    adjust_width = guestwidth_to_adjustwidth(guest_width);
 15.1072 -    agaw = width_to_agaw(adjust_width);
 15.1073 -    /* FIXME: hardware doesn't support it, choose a bigger one? */
 15.1074 -    sagaw = cap_sagaw(iommu->cap);
 15.1075 -    if ( !test_bit(agaw, &sagaw) )
 15.1076 -    {
 15.1077 -        gdprintk(XENLOG_ERR VTDPREFIX,
 15.1078 -                 "IOMMU: hardware doesn't support the agaw\n");
 15.1079 -        agaw = find_next_bit(&sagaw, 5, agaw);
 15.1080 -        if ( agaw >= 5 )
 15.1081 -            return -ENODEV;
 15.1082 -    }
 15.1083 -    hd->agaw = agaw;
 15.1084 -    return 0;
 15.1085 -}
 15.1086 -
 15.1087 -static int domain_context_mapping_one(
 15.1088 -    struct domain *domain,
 15.1089 -    struct iommu *iommu,
 15.1090 -    u8 bus, u8 devfn)
 15.1091 -{
 15.1092 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
 15.1093 -    struct context_entry *context;
 15.1094 -    unsigned long flags;
 15.1095 -    int ret = 0;
 15.1096 -
 15.1097 -    context = device_to_context_entry(iommu, bus, devfn);
 15.1098 -    if ( !context )
 15.1099 -    {
 15.1100 -        gdprintk(XENLOG_ERR VTDPREFIX,
 15.1101 -                 "domain_context_mapping_one:context == NULL:"
 15.1102 -                 "bdf = %x:%x:%x\n",
 15.1103 -                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1104 -        return -ENOMEM;
 15.1105 -    }
 15.1106 -
 15.1107 -    if ( context_present(*context) )
 15.1108 -    {
 15.1109 -        gdprintk(XENLOG_WARNING VTDPREFIX,
 15.1110 -                 "domain_context_mapping_one:context present:bdf=%x:%x:%x\n",
 15.1111 -                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1112 -        return 0;
 15.1113 -    }
 15.1114 -
 15.1115 -    spin_lock_irqsave(&iommu->lock, flags);
 15.1116 -    /*
 15.1117 -     * domain_id 0 is not valid on Intel's IOMMU, force domain_id to
 15.1118 -     * be 1 based as required by intel's iommu hw.
 15.1119 -     */
 15.1120 -    context_set_domain_id(context, domain);
 15.1121 -    context_set_address_width(*context, hd->agaw);
 15.1122 -
 15.1123 -    if ( ecap_pass_thru(iommu->ecap) )
 15.1124 -        context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
 15.1125 -#ifdef CONTEXT_PASSTHRU
 15.1126 -    else
 15.1127 -    {
 15.1128 -#endif
 15.1129 -        if ( !hd->pgd )
 15.1130 -        {
 15.1131 -            struct dma_pte *pgd = (struct dma_pte *)alloc_xenheap_page();
 15.1132 -            if ( !pgd )
 15.1133 -            {
 15.1134 -                spin_unlock_irqrestore(&hd->mapping_lock, flags);
 15.1135 -                return -ENOMEM;
 15.1136 -            }
 15.1137 -            memset(pgd, 0, PAGE_SIZE);
 15.1138 -            hd->pgd = pgd;
 15.1139 -        }
 15.1140 - 
 15.1141 -        context_set_address_root(*context, virt_to_maddr(hd->pgd));
 15.1142 -        context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
 15.1143 -#ifdef CONTEXT_PASSTHRU
 15.1144 -    }
 15.1145 -#endif
 15.1146 -
 15.1147 -    context_set_fault_enable(*context);
 15.1148 -    context_set_present(*context);
 15.1149 -    iommu_flush_cache_entry(iommu, context);
 15.1150 -
 15.1151 -    gdprintk(XENLOG_INFO VTDPREFIX,
 15.1152 -             "domain_context_mapping_one-%x:%x:%x-*context=%"PRIx64":%"PRIx64
 15.1153 -             " hd->pgd=%p\n",
 15.1154 -             bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
 15.1155 -             context->hi, context->lo, hd->pgd);
 15.1156 -
 15.1157 -    if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
 15.1158 -                                    (((u16)bus) << 8) | devfn,
 15.1159 -                                    DMA_CCMD_MASK_NOBIT, 1) )
 15.1160 -        iommu_flush_write_buffer(iommu);
 15.1161 -    else
 15.1162 -        iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
 15.1163 -    spin_unlock_irqrestore(&iommu->lock, flags);
 15.1164 -    return ret;
 15.1165 -}
 15.1166 -
 15.1167 -static int __pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap)
 15.1168 -{
 15.1169 -    u8 id;
 15.1170 -    int ttl = 48;
 15.1171 -
 15.1172 -    while ( ttl-- )
 15.1173 -    {
 15.1174 -        pos = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
 15.1175 -        if ( pos < 0x40 )
 15.1176 -            break;
 15.1177 -
 15.1178 -        pos &= ~3;
 15.1179 -        id = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
 15.1180 -                                  pos + PCI_CAP_LIST_ID);
 15.1181 -
 15.1182 -        if ( id == 0xff )
 15.1183 -            break;
 15.1184 -        if ( id == cap )
 15.1185 -            return pos;
 15.1186 -
 15.1187 -        pos += PCI_CAP_LIST_NEXT;
 15.1188 -    }
 15.1189 -    return 0;
 15.1190 -}
 15.1191 -
 15.1192 -#define PCI_BASE_CLASS_BRIDGE    0x06
 15.1193 -#define PCI_CLASS_BRIDGE_PCI     0x0604
 15.1194 -
 15.1195 -#define DEV_TYPE_PCIe_ENDPOINT   1
 15.1196 -#define DEV_TYPE_PCI_BRIDGE      2
 15.1197 -#define DEV_TYPE_PCI             3
 15.1198 -
 15.1199 -int pdev_type(struct pci_dev *dev)
 15.1200 -{
 15.1201 -    u16 class_device;
 15.1202 -    u16 status;
 15.1203 -
 15.1204 -    class_device = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
 15.1205 -                                      PCI_FUNC(dev->devfn), PCI_CLASS_DEVICE);
 15.1206 -    if ( class_device == PCI_CLASS_BRIDGE_PCI )
 15.1207 -        return DEV_TYPE_PCI_BRIDGE;
 15.1208 -
 15.1209 -    status = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
 15.1210 -                                PCI_FUNC(dev->devfn), PCI_STATUS);
 15.1211 -
 15.1212 -    if ( !(status & PCI_STATUS_CAP_LIST) )
 15.1213 -        return DEV_TYPE_PCI;
 15.1214 -
 15.1215 -    if ( __pci_find_next_cap(dev->bus, dev->devfn,
 15.1216 -                            PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) )
 15.1217 -        return DEV_TYPE_PCIe_ENDPOINT;
 15.1218 -
 15.1219 -    return DEV_TYPE_PCI;
 15.1220 -}
 15.1221 -
 15.1222 -#define MAX_BUSES 256
 15.1223 -struct pci_dev bus2bridge[MAX_BUSES];
 15.1224 -
 15.1225 -static int domain_context_mapping(
 15.1226 -    struct domain *domain,
 15.1227 -    struct iommu *iommu,
 15.1228 -    struct pci_dev *pdev)
 15.1229 -{
 15.1230 -    int ret = 0;
 15.1231 -    int dev, func, sec_bus, sub_bus;
 15.1232 -    u32 type;
 15.1233 -
 15.1234 -    type = pdev_type(pdev);
 15.1235 -    switch ( type )
 15.1236 -    {
 15.1237 -    case DEV_TYPE_PCI_BRIDGE:
 15.1238 -        sec_bus = read_pci_config_byte(
 15.1239 -            pdev->bus, PCI_SLOT(pdev->devfn),
 15.1240 -            PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
 15.1241 -
 15.1242 -        if ( bus2bridge[sec_bus].bus == 0 )
 15.1243 -        {
 15.1244 -            bus2bridge[sec_bus].bus   =  pdev->bus;
 15.1245 -            bus2bridge[sec_bus].devfn =  pdev->devfn;
 15.1246 -        }
 15.1247 -
 15.1248 -        sub_bus = read_pci_config_byte(
 15.1249 -            pdev->bus, PCI_SLOT(pdev->devfn),
 15.1250 -            PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
 15.1251 -
 15.1252 -        if ( sec_bus != sub_bus )
 15.1253 -            gdprintk(XENLOG_WARNING VTDPREFIX,
 15.1254 -                     "context_context_mapping: nested PCI bridge not "
 15.1255 -                     "supported: bdf = %x:%x:%x sec_bus = %x sub_bus = %x\n",
 15.1256 -                     pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
 15.1257 -                     sec_bus, sub_bus);
 15.1258 -        break;
 15.1259 -    case DEV_TYPE_PCIe_ENDPOINT:
 15.1260 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1261 -                 "domain_context_mapping:PCIe : bdf = %x:%x:%x\n",
 15.1262 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1263 -        ret = domain_context_mapping_one(domain, iommu,
 15.1264 -                                         (u8)(pdev->bus), (u8)(pdev->devfn));
 15.1265 -        break;
 15.1266 -    case DEV_TYPE_PCI:
 15.1267 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1268 -                 "domain_context_mapping:PCI: bdf = %x:%x:%x\n",
 15.1269 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1270 -
 15.1271 -        if ( pdev->bus == 0 )
 15.1272 -            ret = domain_context_mapping_one(
 15.1273 -                domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn));
 15.1274 -        else
 15.1275 -        {
 15.1276 -            if ( bus2bridge[pdev->bus].bus != 0 )
 15.1277 -                gdprintk(XENLOG_WARNING VTDPREFIX,
 15.1278 -                         "domain_context_mapping:bus2bridge"
 15.1279 -                         "[%d].bus != 0\n", pdev->bus);
 15.1280 -
 15.1281 -            ret = domain_context_mapping_one(
 15.1282 -                domain, iommu,
 15.1283 -                (u8)(bus2bridge[pdev->bus].bus),
 15.1284 -                (u8)(bus2bridge[pdev->bus].devfn));
 15.1285 -
 15.1286 -            /* now map everything behind the PCI bridge */
 15.1287 -            for ( dev = 0; dev < 32; dev++ )
 15.1288 -            {
 15.1289 -                for ( func = 0; func < 8; func++ )
 15.1290 -                {
 15.1291 -                    ret = domain_context_mapping_one(
 15.1292 -                        domain, iommu,
 15.1293 -                        pdev->bus, (u8)PCI_DEVFN(dev, func));
 15.1294 -                    if ( ret )
 15.1295 -                        return ret;
 15.1296 -                }
 15.1297 -            }
 15.1298 -        }
 15.1299 -        break;
 15.1300 -    default:
 15.1301 -        gdprintk(XENLOG_ERR VTDPREFIX,
 15.1302 -                 "domain_context_mapping:unknown type : bdf = %x:%x:%x\n",
 15.1303 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1304 -        ret = -EINVAL;
 15.1305 -        break;
 15.1306 -    }
 15.1307 -
 15.1308 -    return ret;
 15.1309 -}
 15.1310 -
 15.1311 -static int domain_context_unmap_one(
 15.1312 -    struct domain *domain,
 15.1313 -    struct iommu *iommu,
 15.1314 -    u8 bus, u8 devfn)
 15.1315 -{
 15.1316 -    struct context_entry *context;
 15.1317 -    unsigned long flags;
 15.1318 -
 15.1319 -    context = device_to_context_entry(iommu, bus, devfn);
 15.1320 -    if ( !context )
 15.1321 -    {
 15.1322 -        gdprintk(XENLOG_ERR VTDPREFIX,
 15.1323 -                 "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n",
 15.1324 -                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1325 -        return -ENOMEM;
 15.1326 -    }
 15.1327 -
 15.1328 -    if ( !context_present(*context) )
 15.1329 -    {
 15.1330 -        gdprintk(XENLOG_WARNING VTDPREFIX,
 15.1331 -                 "domain_context_unmap_one-%x:%x:%x- "
 15.1332 -                 "context NOT present:return\n",
 15.1333 -                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1334 -        return 0;
 15.1335 -    }
 15.1336 -
 15.1337 -    gdprintk(XENLOG_INFO VTDPREFIX,
 15.1338 -             "domain_context_unmap_one: bdf = %x:%x:%x\n",
 15.1339 -             bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1340 -
 15.1341 -    spin_lock_irqsave(&iommu->lock, flags);
 15.1342 -    context_clear_present(*context);
 15.1343 -    context_clear_entry(*context);
 15.1344 -    iommu_flush_cache_entry(iommu, context);
 15.1345 -    iommu_flush_context_global(iommu, 0);
 15.1346 -    iommu_flush_iotlb_global(iommu, 0);
 15.1347 -    spin_unlock_irqrestore(&iommu->lock, flags);
 15.1348 -
 15.1349 -    return 0;
 15.1350 -}
 15.1351 -
 15.1352 -static int domain_context_unmap(
 15.1353 -    struct domain *domain,
 15.1354 -    struct iommu *iommu,
 15.1355 -    struct pci_dev *pdev)
 15.1356 -{
 15.1357 -    int ret = 0;
 15.1358 -    int dev, func, sec_bus, sub_bus;
 15.1359 -    u32 type;
 15.1360 -
 15.1361 -    type = pdev_type(pdev);
 15.1362 -    switch ( type )
 15.1363 -    {
 15.1364 -    case DEV_TYPE_PCI_BRIDGE:
 15.1365 -        sec_bus = read_pci_config_byte(
 15.1366 -            pdev->bus, PCI_SLOT(pdev->devfn),
 15.1367 -            PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
 15.1368 -        sub_bus = read_pci_config_byte(
 15.1369 -            pdev->bus, PCI_SLOT(pdev->devfn),
 15.1370 -            PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
 15.1371 -
 15.1372 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1373 -                 "domain_context_unmap:BRIDGE:%x:%x:%x "
 15.1374 -                 "sec_bus=%x sub_bus=%x\n",
 15.1375 -                 pdev->bus, PCI_SLOT(pdev->devfn),
 15.1376 -                 PCI_FUNC(pdev->devfn), sec_bus, sub_bus);
 15.1377 -        break;
 15.1378 -    case DEV_TYPE_PCIe_ENDPOINT:
 15.1379 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1380 -                 "domain_context_unmap:PCIe : bdf = %x:%x:%x\n",
 15.1381 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1382 -        ret = domain_context_unmap_one(domain, iommu,
 15.1383 -                                       (u8)(pdev->bus), (u8)(pdev->devfn));
 15.1384 -        break;
 15.1385 -    case DEV_TYPE_PCI:
 15.1386 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1387 -                 "domain_context_unmap:PCI: bdf = %x:%x:%x\n",
 15.1388 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1389 -        if ( pdev->bus == 0 )
 15.1390 -            ret = domain_context_unmap_one(
 15.1391 -                domain, iommu,
 15.1392 -                (u8)(pdev->bus), (u8)(pdev->devfn));
 15.1393 -        else
 15.1394 -        {
 15.1395 -            if ( bus2bridge[pdev->bus].bus != 0 )
 15.1396 -                gdprintk(XENLOG_WARNING VTDPREFIX,
 15.1397 -                         "domain_context_unmap:"
 15.1398 -                         "bus2bridge[%d].bus != 0\n", pdev->bus);
 15.1399 -
 15.1400 -            ret = domain_context_unmap_one(domain, iommu,
 15.1401 -                                           (u8)(bus2bridge[pdev->bus].bus),
 15.1402 -                                           (u8)(bus2bridge[pdev->bus].devfn));
 15.1403 -
 15.1404 -            /* Unmap everything behind the PCI bridge */
 15.1405 -            for ( dev = 0; dev < 32; dev++ )
 15.1406 -            {
 15.1407 -                for ( func = 0; func < 8; func++ )
 15.1408 -                {
 15.1409 -                    ret = domain_context_unmap_one(
 15.1410 -                        domain, iommu,
 15.1411 -                        pdev->bus, (u8)PCI_DEVFN(dev, func));
 15.1412 -                    if ( ret )
 15.1413 -                        return ret;
 15.1414 -                }
 15.1415 -            }
 15.1416 -        }
 15.1417 -        break;
 15.1418 -    default:
 15.1419 -        gdprintk(XENLOG_ERR VTDPREFIX,
 15.1420 -                 "domain_context_unmap:unknown type: bdf = %x:%x:%x\n",
 15.1421 -                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1422 -        ret = -EINVAL;
 15.1423 -        break;
 15.1424 -    }
 15.1425 -
 15.1426 -    return ret;
 15.1427 -}
 15.1428 -
 15.1429 -void reassign_device_ownership(
 15.1430 -    struct domain *source,
 15.1431 -    struct domain *target,
 15.1432 -    u8 bus, u8 devfn)
 15.1433 -{
 15.1434 -    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
 15.1435 -    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
 15.1436 -    struct pci_dev *pdev;
 15.1437 -    struct acpi_drhd_unit *drhd;
 15.1438 -    struct iommu *iommu;
 15.1439 -    int status;
 15.1440 -    unsigned long flags;
 15.1441 -
 15.1442 -    gdprintk(XENLOG_INFO VTDPREFIX,
 15.1443 -             "reassign_device-%x:%x:%x- source = %d target = %d\n",
 15.1444 -             bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
 15.1445 -             source->domain_id, target->domain_id);
 15.1446 -
 15.1447 -    pdev_flr(bus, devfn);
 15.1448 -
 15.1449 -    for_each_pdev( source, pdev )
 15.1450 -    {
 15.1451 -        if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
 15.1452 -            continue;
 15.1453 -
 15.1454 -        drhd = acpi_find_matched_drhd_unit(pdev);
 15.1455 -        iommu = drhd->iommu;
 15.1456 -        domain_context_unmap(source, iommu, pdev);
 15.1457 -
 15.1458 -        /* Move pci device from the source domain to target domain. */
 15.1459 -        spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
 15.1460 -        spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
 15.1461 -        list_move(&pdev->list, &target_hd->pdev_list);
 15.1462 -        spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
 15.1463 -        spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
 15.1464 -
 15.1465 -        status = domain_context_mapping(target, iommu, pdev);
 15.1466 -        if ( status != 0 )
 15.1467 -            gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n");
 15.1468 -
 15.1469 -        break;
 15.1470 -    }
 15.1471 -}
 15.1472 -
 15.1473 -void return_devices_to_dom0(struct domain *d)
 15.1474 -{
 15.1475 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
 15.1476 -    struct pci_dev *pdev;
 15.1477 -
 15.1478 -    while ( !list_empty(&hd->pdev_list) )
 15.1479 -    {
 15.1480 -        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
 15.1481 -        dprintk(XENLOG_INFO VTDPREFIX,
 15.1482 -                "return_devices_to_dom0: bdf = %x:%x:%x\n",
 15.1483 -                pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1484 -        reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
 15.1485 -    }
 15.1486 -
 15.1487 -#ifdef VTD_DEBUG
 15.1488 -    for_each_pdev ( dom0, pdev )
 15.1489 -        dprintk(XENLOG_INFO VTDPREFIX,
 15.1490 -                "return_devices_to_dom0:%x: bdf = %x:%x:%x\n",
 15.1491 -                dom0->domain_id, pdev->bus,
 15.1492 -                PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1493 -#endif
 15.1494 -}
 15.1495 -
 15.1496 -void iommu_domain_teardown(struct domain *d)
 15.1497 -{
 15.1498 -    if ( list_empty(&acpi_drhd_units) )
 15.1499 -        return;
 15.1500 -
 15.1501 -    iommu_domid_release(d);
 15.1502 -
 15.1503 -#if CONFIG_PAGING_LEVELS == 3
 15.1504 -    {
 15.1505 -        struct hvm_iommu *hd  = domain_hvm_iommu(d);
 15.1506 -        int level = agaw_to_level(hd->agaw);
 15.1507 -        struct dma_pte *pgd = NULL;
 15.1508 -
 15.1509 -        switch ( level )
 15.1510 -        {
 15.1511 -        case VTD_PAGE_TABLE_LEVEL_3:
 15.1512 -            if ( hd->pgd )
 15.1513 -                free_xenheap_page((void *)hd->pgd);
 15.1514 -            break;
 15.1515 -        case VTD_PAGE_TABLE_LEVEL_4:
 15.1516 -            if ( hd->pgd )
 15.1517 -            {
 15.1518 -                pgd = hd->pgd;
 15.1519 -                if ( pgd[0].val != 0 )
 15.1520 -                    free_xenheap_page((void*)maddr_to_virt(
 15.1521 -                        dma_pte_addr(pgd[0])));
 15.1522 -                free_xenheap_page((void *)hd->pgd);
 15.1523 -            }
 15.1524 -            break;
 15.1525 -        default:
 15.1526 -            gdprintk(XENLOG_ERR VTDPREFIX,
 15.1527 -                     "Unsupported p2m table sharing level!\n");
 15.1528 -            break;
 15.1529 -        }
 15.1530 -    }
 15.1531 -#endif
 15.1532 -    return_devices_to_dom0(d);
 15.1533 -}
 15.1534 -
 15.1535 -static int domain_context_mapped(struct pci_dev *pdev)
 15.1536 -{
 15.1537 -    struct acpi_drhd_unit *drhd;
 15.1538 -    struct iommu *iommu;
 15.1539 -    int ret;
 15.1540 -
 15.1541 -    for_each_drhd_unit ( drhd )
 15.1542 -    {
 15.1543 -        iommu = drhd->iommu;
 15.1544 -        ret = device_context_mapped(iommu, pdev->bus, pdev->devfn);
 15.1545 -        if ( ret )
 15.1546 -            return ret;
 15.1547 -    }
 15.1548 -
 15.1549 -    return 0;
 15.1550 -}
 15.1551 -
 15.1552 -int intel_iommu_map_page(
 15.1553 -    struct domain *d, unsigned long gfn, unsigned long mfn)
 15.1554 -{
 15.1555 -    struct acpi_drhd_unit *drhd;
 15.1556 -    struct iommu *iommu;
 15.1557 -    struct dma_pte *pte = NULL;
 15.1558 -    struct page_info *pg = NULL;
 15.1559 -
 15.1560 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
 15.1561 -    iommu = drhd->iommu;
 15.1562 -
 15.1563 -#ifdef CONTEXT_PASSTHRU
 15.1564 -    /* do nothing if dom0 and iommu supports pass thru */
 15.1565 -    if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
 15.1566 -        return 0;
 15.1567 -#endif
 15.1568 -
 15.1569 -    pg = addr_to_dma_page(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 15.1570 -    if ( !pg )
 15.1571 -        return -ENOMEM;
 15.1572 -    pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
 15.1573 -    pte += gfn & LEVEL_MASK;
 15.1574 -    dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
 15.1575 -    dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
 15.1576 -    iommu_flush_cache_entry(iommu, pte);
 15.1577 -    unmap_domain_page(pte);
 15.1578 -
 15.1579 -    for_each_drhd_unit ( drhd )
 15.1580 -    {
 15.1581 -        iommu = drhd->iommu;
 15.1582 -        if ( cap_caching_mode(iommu->cap) )
 15.1583 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
 15.1584 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
 15.1585 -        else if ( cap_rwbf(iommu->cap) )
 15.1586 -            iommu_flush_write_buffer(iommu);
 15.1587 -    }
 15.1588 -
 15.1589 -    return 0;
 15.1590 -}
 15.1591 -
 15.1592 -int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
 15.1593 -{
 15.1594 -    struct acpi_drhd_unit *drhd;
 15.1595 -    struct iommu *iommu;
 15.1596 -
 15.1597 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
 15.1598 -    iommu = drhd->iommu;
 15.1599 -
 15.1600 -#ifdef CONTEXT_PASSTHRU
 15.1601 -    /* do nothing if dom0 and iommu supports pass thru */
 15.1602 -    if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
 15.1603 -        return 0;
 15.1604 -#endif
 15.1605 -
 15.1606 -    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 15.1607 -
 15.1608 -    return 0;
 15.1609 -}
 15.1610 -
 15.1611 -int iommu_page_mapping(struct domain *domain, paddr_t iova,
 15.1612 -                       void *hpa, size_t size, int prot)
 15.1613 -{
 15.1614 -    struct acpi_drhd_unit *drhd;
 15.1615 -    struct iommu *iommu;
 15.1616 -    unsigned long start_pfn, end_pfn;
 15.1617 -    struct dma_pte *pte = NULL;
 15.1618 -    int index;
 15.1619 -    struct page_info *pg = NULL;
 15.1620 -
 15.1621 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
 15.1622 -    iommu = drhd->iommu;
 15.1623 -    if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
 15.1624 -        return -EINVAL;
 15.1625 -    iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
 15.1626 -    start_pfn = (unsigned long)(((unsigned long) hpa) >> PAGE_SHIFT_4K);
 15.1627 -    end_pfn = (unsigned long)
 15.1628 -        ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K);
 15.1629 -    index = 0;
 15.1630 -    while ( start_pfn < end_pfn )
 15.1631 -    {
 15.1632 -        pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index);
 15.1633 -        if ( !pg )
 15.1634 -            return -ENOMEM;
 15.1635 -        pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
 15.1636 -        pte += start_pfn & LEVEL_MASK;
 15.1637 -        dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
 15.1638 -        dma_set_pte_prot(*pte, prot);
 15.1639 -        iommu_flush_cache_entry(iommu, pte);
 15.1640 -        unmap_domain_page(pte);
 15.1641 -        start_pfn++;
 15.1642 -        index++;
 15.1643 -    }
 15.1644 -
 15.1645 -    for_each_drhd_unit ( drhd )
 15.1646 -    {
 15.1647 -        iommu = drhd->iommu;
 15.1648 -        if ( cap_caching_mode(iommu->cap) )
 15.1649 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
 15.1650 -                                  iova, index, 0);
 15.1651 -        else if ( cap_rwbf(iommu->cap) )
 15.1652 -            iommu_flush_write_buffer(iommu);
 15.1653 -    }
 15.1654 -
 15.1655 -    return 0;
 15.1656 -}
 15.1657 -
 15.1658 -int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
 15.1659 -{
 15.1660 -    dma_pte_clear_range(domain, addr, addr + size);
 15.1661 -
 15.1662 -    return 0;
 15.1663 -}
 15.1664 -
 15.1665 -void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
 15.1666 -{
 15.1667 -    struct acpi_drhd_unit *drhd;
 15.1668 -    struct iommu *iommu = NULL;
 15.1669 -    struct dma_pte *pte = (struct dma_pte *) p2m_entry;
 15.1670 -
 15.1671 -    for_each_drhd_unit ( drhd )
 15.1672 -    {
 15.1673 -        iommu = drhd->iommu;
 15.1674 -        if ( cap_caching_mode(iommu->cap) )
 15.1675 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
 15.1676 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
 15.1677 -        else if ( cap_rwbf(iommu->cap) )
 15.1678 -            iommu_flush_write_buffer(iommu);
 15.1679 -    }
 15.1680 -
 15.1681 -    iommu_flush_cache_entry(iommu, pte);
 15.1682 -}
 15.1683 -
 15.1684 -static int iommu_prepare_rmrr_dev(
 15.1685 -    struct domain *d,
 15.1686 -    struct acpi_rmrr_unit *rmrr,
 15.1687 -    struct pci_dev *pdev)
 15.1688 -{
 15.1689 -    struct acpi_drhd_unit *drhd;
 15.1690 -    unsigned long size;
 15.1691 -    int ret;
 15.1692 -
 15.1693 -    /* page table init */
 15.1694 -    size = rmrr->end_address - rmrr->base_address + 1;
 15.1695 -    ret = iommu_page_mapping(d, rmrr->base_address,
 15.1696 -                             (void *)rmrr->base_address, size,
 15.1697 -                             DMA_PTE_READ|DMA_PTE_WRITE);
 15.1698 -    if ( ret )
 15.1699 -        return ret;
 15.1700 -
 15.1701 -    if ( domain_context_mapped(pdev) == 0 )
 15.1702 -    {
 15.1703 -        drhd = acpi_find_matched_drhd_unit(pdev);
 15.1704 -        ret = domain_context_mapping(d, drhd->iommu, pdev);
 15.1705 -        if ( !ret )
 15.1706 -            return 0;
 15.1707 -    }
 15.1708 -
 15.1709 -    return ret;
 15.1710 -}
 15.1711 -
 15.1712 -void __init setup_dom0_devices(void)
 15.1713 -{
 15.1714 -    struct hvm_iommu *hd  = domain_hvm_iommu(dom0);
 15.1715 -    struct acpi_drhd_unit *drhd;
 15.1716 -    struct pci_dev *pdev;
 15.1717 -    int bus, dev, func, ret;
 15.1718 -    u32 l;
 15.1719 -
 15.1720 -#ifdef DEBUG_VTD_CONTEXT_ENTRY
 15.1721 -    for ( bus = 0; bus < 256; bus++ )
 15.1722 -    {
 15.1723 -        for ( dev = 0; dev < 32; dev++ )
 15.1724 -        { 
 15.1725 -            for ( func = 0; func < 8; func++ )
 15.1726 -            {
 15.1727 -                struct context_entry *context;
 15.1728 -                struct pci_dev device;
 15.1729 -
 15.1730 -                device.bus = bus; 
 15.1731 -                device.devfn = PCI_DEVFN(dev, func); 
 15.1732 -                drhd = acpi_find_matched_drhd_unit(&device);
 15.1733 -                context = device_to_context_entry(drhd->iommu,
 15.1734 -                                                  bus, PCI_DEVFN(dev, func));
 15.1735 -                if ( (context->lo != 0) || (context->hi != 0) )
 15.1736 -                    dprintk(XENLOG_INFO VTDPREFIX,
 15.1737 -                            "setup_dom0_devices-%x:%x:%x- context not 0\n",
 15.1738 -                            bus, dev, func);
 15.1739 -            }
 15.1740 -        }    
 15.1741 -    }        
 15.1742 -#endif
 15.1743 -
 15.1744 -    for ( bus = 0; bus < 256; bus++ )
 15.1745 -    {
 15.1746 -        for ( dev = 0; dev < 32; dev++ )
 15.1747 -        {
 15.1748 -            for ( func = 0; func < 8; func++ )
 15.1749 -            {
 15.1750 -                l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
 15.1751 -                /* some broken boards return 0 or ~0 if a slot is empty: */
 15.1752 -                if ( (l == 0xffffffff) || (l == 0x00000000) ||
 15.1753 -                     (l == 0x0000ffff) || (l == 0xffff0000) )
 15.1754 -                    continue;
 15.1755 -                pdev = xmalloc(struct pci_dev);
 15.1756 -                pdev->bus = bus;
 15.1757 -                pdev->devfn = PCI_DEVFN(dev, func);
 15.1758 -                list_add_tail(&pdev->list, &hd->pdev_list);
 15.1759 -
 15.1760 -                drhd = acpi_find_matched_drhd_unit(pdev);
 15.1761 -                ret = domain_context_mapping(dom0, drhd->iommu, pdev);
 15.1762 -                if ( ret != 0 )
 15.1763 -                    gdprintk(XENLOG_ERR VTDPREFIX,
 15.1764 -                             "domain_context_mapping failed\n");
 15.1765 -            }
 15.1766 -        }
 15.1767 -    }
 15.1768 -
 15.1769 -    for_each_pdev ( dom0, pdev )
 15.1770 -        dprintk(XENLOG_INFO VTDPREFIX,
 15.1771 -                "setup_dom0_devices: bdf = %x:%x:%x\n",
 15.1772 -                pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 15.1773 -}
 15.1774 -
 15.1775 -void clear_fault_bits(struct iommu *iommu)
 15.1776 -{
 15.1777 -    u64 val;
 15.1778 -
 15.1779 -    val = dmar_readq(
 15.1780 -        iommu->reg,
 15.1781 -        cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
 15.1782 -    dmar_writeq(
 15.1783 -        iommu->reg,
 15.1784 -        cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
 15.1785 -        val);
 15.1786 -    dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS);
 15.1787 -}
 15.1788 -
 15.1789 -static int init_vtd_hw(void)
 15.1790 -{
 15.1791 -    struct acpi_drhd_unit *drhd;
 15.1792 -    struct iommu *iommu;
 15.1793 -    struct iommu_flush *flush = NULL;
 15.1794 -    int vector;
 15.1795 -    int ret;
 15.1796 -
 15.1797 -    for_each_drhd_unit ( drhd )
 15.1798 -    {
 15.1799 -        iommu = drhd->iommu;
 15.1800 -        ret = iommu_set_root_entry(iommu);
 15.1801 -        if ( ret )
 15.1802 -        {
 15.1803 -            gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n");
 15.1804 -            return -EIO;
 15.1805 -        }
 15.1806 -
 15.1807 -        vector = iommu_set_interrupt(iommu);
 15.1808 -        dma_msi_data_init(iommu, vector);
 15.1809 -        dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
 15.1810 -        iommu->vector = vector;
 15.1811 -        clear_fault_bits(iommu);
 15.1812 -        dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
 15.1813 -
 15.1814 -        /* initialize flush functions */
 15.1815 -        flush = iommu_get_flush(iommu);
 15.1816 -        flush->context = flush_context_reg;
 15.1817 -        flush->iotlb = flush_iotlb_reg;
 15.1818 -    }
 15.1819 -    return 0;
 15.1820 -}
 15.1821 -
 15.1822 -static int init_vtd2_hw(void)
 15.1823 -{
 15.1824 -    struct acpi_drhd_unit *drhd;
 15.1825 -    struct iommu *iommu;
 15.1826 -
 15.1827 -    for_each_drhd_unit ( drhd )
 15.1828 -    {
 15.1829 -        iommu = drhd->iommu;
 15.1830 -        if ( qinval_setup(iommu) != 0 )
 15.1831 -            dprintk(XENLOG_ERR VTDPREFIX,
 15.1832 -                    "Queued Invalidation hardware not found\n");
 15.1833 -
 15.1834 -        if ( intremap_setup(iommu) != 0 )
 15.1835 -            dprintk(XENLOG_ERR VTDPREFIX,
 15.1836 -                    "Interrupt Remapping hardware not found\n");
 15.1837 -    }
 15.1838 -    return 0;
 15.1839 -}
 15.1840 -
 15.1841 -static int enable_vtd_translation(void)
 15.1842 -{
 15.1843 -    struct acpi_drhd_unit *drhd;
 15.1844 -    struct iommu *iommu;
 15.1845 -
 15.1846 -    for_each_drhd_unit ( drhd )
 15.1847 -    {
 15.1848 -        iommu = drhd->iommu;
 15.1849 -        if ( iommu_enable_translation(iommu) )
 15.1850 -            return -EIO;
 15.1851 -    }
 15.1852 -    return 0;
 15.1853 -}
 15.1854 -
 15.1855 -static void setup_dom0_rmrr(void)
 15.1856 -{
 15.1857 -    struct acpi_rmrr_unit *rmrr;
 15.1858 -    struct pci_dev *pdev;
 15.1859 -    int ret;
 15.1860 -
 15.1861 -    for_each_rmrr_device ( rmrr, pdev )
 15.1862 -        ret = iommu_prepare_rmrr_dev(dom0, rmrr, pdev);
 15.1863 -        if ( ret )
 15.1864 -            gdprintk(XENLOG_ERR VTDPREFIX,
 15.1865 -                     "IOMMU: mapping reserved region failed\n");
 15.1866 -    end_for_each_rmrr_device ( rmrr, pdev )
 15.1867 -}
 15.1868 -
 15.1869 -int iommu_setup(void)
 15.1870 -{
 15.1871 -    struct hvm_iommu *hd  = domain_hvm_iommu(dom0);
 15.1872 -    struct acpi_drhd_unit *drhd;
 15.1873 -    struct iommu *iommu;
 15.1874 -    unsigned long i;
 15.1875 -
 15.1876 -    if ( !vtd_enabled )
 15.1877 -        return 0;
 15.1878 -
 15.1879 -    spin_lock_init(&domid_bitmap_lock);
 15.1880 -    INIT_LIST_HEAD(&hd->pdev_list);
 15.1881 -
 15.1882 -    /* setup clflush size */
 15.1883 -    x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8;
 15.1884 -
 15.1885 -    /* Allocate IO page directory page for the domain. */
 15.1886 -    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
 15.1887 -    iommu = drhd->iommu;
 15.1888 -
 15.1889 -    /* Allocate domain id bitmap, and set bit 0 as reserved */
 15.1890 -    domid_bitmap_size = cap_ndoms(iommu->cap);
 15.1891 -    domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8);
 15.1892 -    if ( domid_bitmap == NULL )
 15.1893 -        goto error;
 15.1894 -    memset(domid_bitmap, 0, domid_bitmap_size / 8);
 15.1895 -    set_bit(0, domid_bitmap);
 15.1896 -
 15.1897 -    /* setup 1:1 page table for dom0 */
 15.1898 -    for ( i = 0; i < max_page; i++ )
 15.1899 -        iommu_map_page(dom0, i, i);
 15.1900 -
 15.1901 -    init_vtd_hw();
 15.1902 -    setup_dom0_devices();
 15.1903 -    setup_dom0_rmrr();
 15.1904 -    iommu_flush_all();
 15.1905 -    enable_vtd_translation();
 15.1906 -    init_vtd2_hw();
 15.1907 -
 15.1908 -    return 0;
 15.1909 -
 15.1910 - error:
 15.1911 -    printk("iommu_setup() failed\n");
 15.1912 -    for_each_drhd_unit ( drhd )
 15.1913 -    {
 15.1914 -        iommu = drhd->iommu;
 15.1915 -        free_iommu(iommu);
 15.1916 -    }
 15.1917 -    return -EIO;
 15.1918 -}
 15.1919 -
 15.1920 -/*
 15.1921 - * If the device isn't owned by dom0, it means it already
 15.1922 - * has been assigned to other domain, or it's not exist.
 15.1923 - */
 15.1924 -int device_assigned(u8 bus, u8 devfn)
 15.1925 -{
 15.1926 -    struct pci_dev *pdev;
 15.1927 -
 15.1928 -    for_each_pdev( dom0, pdev )
 15.1929 -        if ( (pdev->bus == bus ) && (pdev->devfn == devfn) )
 15.1930 -            return 0;
 15.1931 -
 15.1932 -    return 1;
 15.1933 -}
 15.1934 -
 15.1935 -int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
 15.1936 -{
 15.1937 -    struct acpi_rmrr_unit *rmrr;
 15.1938 -    struct pci_dev *pdev;
 15.1939 -    int ret = 0;
 15.1940 -
 15.1941 -    if ( list_empty(&acpi_drhd_units) )
 15.1942 -        return ret;
 15.1943 -
 15.1944 -    gdprintk(XENLOG_INFO VTDPREFIX,
 15.1945 -             "assign_device: bus = %x dev = %x func = %x\n",
 15.1946 -             bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 15.1947 -
 15.1948 -    reassign_device_ownership(dom0, d, bus, devfn);
 15.1949 -
 15.1950 -    /* Setup rmrr identify mapping */
 15.1951 -    for_each_rmrr_device( rmrr, pdev )
 15.1952 -        if ( pdev->bus == bus && pdev->devfn == devfn )
 15.1953 -        {
 15.1954 -            ret = iommu_prepare_rmrr_dev(d, rmrr, pdev);
 15.1955 -            if ( ret )
 15.1956 -            {
 15.1957 -                gdprintk(XENLOG_ERR VTDPREFIX,
 15.1958 -                         "IOMMU: mapping reserved region failed\n");
 15.1959 -                return ret;
 15.1960 -            }
 15.1961 -        }
 15.1962 -    end_for_each_rmrr_device(rmrr, pdev)
 15.1963 -
 15.1964 -    return ret;
 15.1965 -}
 15.1966 -
 15.1967 -void iommu_set_pgd(struct domain *d)
 15.1968 -{
 15.1969 -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
 15.1970 -    unsigned long p2m_table;
 15.1971 -
 15.1972 -    if ( hd->pgd )
 15.1973 -    {
 15.1974 -        gdprintk(XENLOG_INFO VTDPREFIX,
 15.1975 -                 "iommu_set_pgd_1: hd->pgd = %p\n", hd->pgd);
 15.1976 -        hd->pgd = NULL;
 15.1977 -    }
 15.1978 -    p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table));
 15.1979 -
 15.1980 -#if CONFIG_PAGING_LEVELS == 3
 15.1981 -    if ( !hd->pgd )
 15.1982 -    {
 15.1983 -        int level = agaw_to_level(hd->agaw);
 15.1984 -        struct dma_pte *pmd = NULL;
 15.1985 -        struct dma_pte *pgd = NULL;
 15.1986 -        struct dma_pte *pte = NULL;
 15.1987 -        l3_pgentry_t *l3e;
 15.1988 -        unsigned long flags;
 15.1989 -        int i;
 15.1990 -
 15.1991 -        spin_lock_irqsave(&hd->mapping_lock, flags);
 15.1992 -        if ( !hd->pgd )
 15.1993 -        {
 15.1994 -            pgd = (struct dma_pte *)alloc_xenheap_page();
 15.1995 -            if ( !pgd )
 15.1996 -            {
 15.1997 -                spin_unlock_irqrestore(&hd->mapping_lock, flags);
 15.1998 -                gdprintk(XENLOG_ERR VTDPREFIX,
 15.1999 -                         "Allocate pgd memory failed!\n");
 15.2000 -                return;
 15.2001 -            }
 15.2002 -            memset(pgd, 0, PAGE_SIZE);
 15.2003 -            hd->pgd = pgd;
 15.2004 -       }
 15.2005 -
 15.2006 -        l3e = map_domain_page(p2m_table);
 15.2007 -        switch ( level )
 15.2008 -        {
 15.2009 -        case VTD_PAGE_TABLE_LEVEL_3:        /* Weybridge */
 15.2010 -            /* We only support 8 entries for the PAE L3 p2m table */
 15.2011 -            for ( i = 0; i < 8 ; i++ )
 15.2012 -            {
 15.2013 -                /* Don't create new L2 entry, use ones from p2m table */
 15.2014 -                pgd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
 15.2015 -            }
 15.2016 -            break;
 15.2017 -
 15.2018 -        case VTD_PAGE_TABLE_LEVEL_4:        /* Stoakley */
 15.2019 -            /* We allocate one more page for the top vtd page table. */
 15.2020 -            pmd = (struct dma_pte *)alloc_xenheap_page();
 15.2021 -            if ( !pmd )
 15.2022 -            {
 15.2023 -                unmap_domain_page(l3e);
 15.2024 -                spin_unlock_irqrestore(&hd->mapping_lock, flags);
 15.2025 -                gdprintk(XENLOG_ERR VTDPREFIX,
 15.2026 -                         "Allocate pmd memory failed!\n");
 15.2027 -                return;
 15.2028 -            }
 15.2029 -            memset((u8*)pmd, 0, PAGE_SIZE);
 15.2030 -            pte = &pgd[0];
 15.2031 -            dma_set_pte_addr(*pte, virt_to_maddr(pmd));
 15.2032 -            dma_set_pte_readable(*pte);
 15.2033 -            dma_set_pte_writable(*pte);
 15.2034 -
 15.2035 -            for ( i = 0; i < 8; i++ )
 15.2036 -            {
 15.2037 -                /* Don't create new L2 entry, use ones from p2m table */
 15.2038 -                pmd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
 15.2039 -            }
 15.2040 -            break;
 15.2041 -        default:
 15.2042 -            gdprintk(XENLOG_ERR VTDPREFIX,
 15.2043 -                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
 15.2044 -            break;
 15.2045 -        }
 15.2046 -        unmap_domain_page(l3e);
 15.2047 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
 15.2048 -    }
 15.2049 -#elif CONFIG_PAGING_LEVELS == 4
 15.2050 -    if ( !hd->pgd )
 15.2051 -    {
 15.2052 -        int level = agaw_to_level(hd->agaw);
 15.2053 -        l3_pgentry_t *l3e;
 15.2054 -        mfn_t pgd_mfn;
 15.2055 -
 15.2056 -        switch ( level )
 15.2057 -        {
 15.2058 -        case VTD_PAGE_TABLE_LEVEL_3:
 15.2059 -            l3e = map_domain_page(p2m_table);
 15.2060 -            if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
 15.2061 -            {
 15.2062 -                gdprintk(XENLOG_ERR VTDPREFIX,
 15.2063 -                         "iommu_set_pgd: second level wasn't there\n");
 15.2064 -                unmap_domain_page(l3e);
 15.2065 -                return;
 15.2066 -            }
 15.2067 -            pgd_mfn = _mfn(l3e_get_pfn(*l3e));
 15.2068 -            unmap_domain_page(l3e);
 15.2069 -            hd->pgd = maddr_to_virt(pagetable_get_paddr(
 15.2070 -                pagetable_from_mfn(pgd_mfn)));
 15.2071 -            break;
 15.2072 -
 15.2073 -        case VTD_PAGE_TABLE_LEVEL_4:
 15.2074 -            pgd_mfn = _mfn(p2m_table);
 15.2075 -            hd->pgd = maddr_to_virt(pagetable_get_paddr(
 15.2076 -                pagetable_from_mfn(pgd_mfn)));
 15.2077 -            break;
 15.2078 -        default:
 15.2079 -            gdprintk(XENLOG_ERR VTDPREFIX,
 15.2080 -                     "iommu_set_pgd:Unsupported p2m table sharing level!\n");
 15.2081 -            break;
 15.2082 -        }
 15.2083 -    }
 15.2084 -#endif
 15.2085 -    gdprintk(XENLOG_INFO VTDPREFIX,
 15.2086 -             "iommu_set_pgd: hd->pgd = %p\n", hd->pgd);
 15.2087 -}
 15.2088 -
 15.2089 -
 15.2090 -u8 iommu_state[MAX_IOMMU_REGS * MAX_IOMMUS];
 15.2091 -int iommu_suspend(void)
 15.2092 -{
 15.2093 -    struct acpi_drhd_unit *drhd;
 15.2094 -    struct iommu *iommu;
 15.2095 -    int i = 0;
 15.2096 -
 15.2097 -    iommu_flush_all();
 15.2098 -
 15.2099 -    for_each_drhd_unit ( drhd )
 15.2100 -    {
 15.2101 -        iommu = drhd->iommu;
 15.2102 -        iommu_state[DMAR_RTADDR_REG * i] =
 15.2103 -            (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG);
 15.2104 -        iommu_state[DMAR_FECTL_REG * i] =
 15.2105 -            (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG);
 15.2106 -        iommu_state[DMAR_FEDATA_REG * i] =
 15.2107 -            (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG);
 15.2108 -        iommu_state[DMAR_FEADDR_REG * i] =
 15.2109 -            (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG);
 15.2110 -        iommu_state[DMAR_FEUADDR_REG * i] =
 15.2111 -            (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG);
 15.2112 -        iommu_state[DMAR_PLMBASE_REG * i] =
 15.2113 -            (u32) dmar_readl(iommu->reg, DMAR_PLMBASE_REG);
 15.2114 -        iommu_state[DMAR_PLMLIMIT_REG * i] =
 15.2115 -            (u32) dmar_readl(iommu->reg, DMAR_PLMLIMIT_REG);
 15.2116 -        iommu_state[DMAR_PHMBASE_REG * i] =
 15.2117 -            (u64) dmar_readq(iommu->reg, DMAR_PHMBASE_REG);
 15.2118 -        iommu_state[DMAR_PHMLIMIT_REG * i] =
 15.2119 -            (u64) dmar_readq(iommu->reg, DMAR_PHMLIMIT_REG);
 15.2120 -        i++;
 15.2121 -    }
 15.2122 -
 15.2123 -    return 0;
 15.2124 -}
 15.2125 -
 15.2126 -int iommu_resume(void)
 15.2127 -{
 15.2128 -    struct acpi_drhd_unit *drhd;
 15.2129 -    struct iommu *iommu;
 15.2130 -    int i = 0;
 15.2131 -
 15.2132 -    iommu_flush_all();
 15.2133 -
 15.2134 -    init_vtd_hw();
 15.2135 -    for_each_drhd_unit ( drhd )
 15.2136 -    {
 15.2137 -        iommu = drhd->iommu;
 15.2138 -        dmar_writeq( iommu->reg, DMAR_RTADDR_REG,
 15.2139 -                     (u64) iommu_state[DMAR_RTADDR_REG * i]);
 15.2140 -        dmar_writel(iommu->reg, DMAR_FECTL_REG,
 15.2141 -                    (u32) iommu_state[DMAR_FECTL_REG * i]);
 15.2142 -        dmar_writel(iommu->reg, DMAR_FEDATA_REG,
 15.2143 -                    (u32) iommu_state[DMAR_FEDATA_REG * i]);
 15.2144 -        dmar_writel(iommu->reg, DMAR_FEADDR_REG,
 15.2145 -                    (u32) iommu_state[DMAR_FEADDR_REG * i]);
 15.2146 -        dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
 15.2147 -                    (u32) iommu_state[DMAR_FEUADDR_REG * i]);
 15.2148 -        dmar_writel(iommu->reg, DMAR_PLMBASE_REG,
 15.2149 -                    (u32) iommu_state[DMAR_PLMBASE_REG * i]);
 15.2150 -        dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG,
 15.2151 -                    (u32) iommu_state[DMAR_PLMLIMIT_REG * i]);
 15.2152 -        dmar_writeq(iommu->reg, DMAR_PHMBASE_REG,
 15.2153 -                    (u64) iommu_state[DMAR_PHMBASE_REG * i]);
 15.2154 -        dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG,
 15.2155 -                    (u64) iommu_state[DMAR_PHMLIMIT_REG * i]);
 15.2156 -
 15.2157 -        if ( iommu_enable_translation(iommu) )
 15.2158 -            return -EIO;
 15.2159 -        i++;
 15.2160 -    }
 15.2161 -    return 0;
 15.2162 -}
 15.2163 -
 15.2164 -struct iommu_ops intel_iommu_ops = {
 15.2165 -    .init = intel_iommu_domain_init,
 15.2166 -    .assign_device  = intel_iommu_assign_device,
 15.2167 -    .teardown = iommu_domain_teardown,
 15.2168 -    .map_page = intel_iommu_map_page,
 15.2169 -    .unmap_page = intel_iommu_unmap_page,
 15.2170 -    .reassign_device = reassign_device_ownership,
 15.2171 -};
 15.2172 -
 15.2173 -/*
 15.2174 - * Local variables:
 15.2175 - * mode: C
 15.2176 - * c-set-style: "BSD"
 15.2177 - * c-basic-offset: 4
 15.2178 - * tab-width: 4
 15.2179 - * indent-tabs-mode: nil
 15.2180 - * End:
 15.2181 - */
    16.1 --- a/xen/arch/x86/hvm/vmx/vtd/intremap.c	Thu Feb 21 14:50:27 2008 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,340 +0,0 @@
    16.4 -/*
    16.5 - * Copyright (c) 2006, Intel Corporation.
    16.6 - *
    16.7 - * This program is free software; you can redistribute it and/or modify it
    16.8 - * under the terms and conditions of the GNU General Public License,
    16.9 - * version 2, as published by the Free Software Foundation.
   16.10 - *
   16.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   16.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   16.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   16.14 - * more details.
   16.15 - *
   16.16 - * You should have received a copy of the GNU General Public License along with
   16.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   16.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   16.19 - *
   16.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   16.21 - * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
   16.22 - */
   16.23 -
   16.24 -#include <xen/config.h>
   16.25 -#include <xen/lib.h>
   16.26 -#include <xen/init.h>
   16.27 -#include <xen/irq.h>
   16.28 -#include <xen/delay.h>
   16.29 -#include <xen/sched.h>
   16.30 -#include <xen/acpi.h>
   16.31 -#include <xen/keyhandler.h>
   16.32 -#include <xen/spinlock.h>
   16.33 -#include <asm/io.h>
   16.34 -#include <asm/mc146818rtc.h>
   16.35 -#include <asm/smp.h>
   16.36 -#include <asm/desc.h>
   16.37 -#include <mach_apic.h>
   16.38 -#include <io_ports.h>
   16.39 -
   16.40 -#include <xen/spinlock.h>
   16.41 -#include <xen/xmalloc.h>
   16.42 -#include <xen/domain_page.h>
   16.43 -#include <asm/delay.h>
   16.44 -#include <asm/string.h>
   16.45 -#include <asm/iommu.h>
   16.46 -#include <asm/hvm/vmx/intel-iommu.h>
   16.47 -#include "dmar.h"
   16.48 -#include "vtd.h"
   16.49 -#include "pci-direct.h"
   16.50 -#include "pci_regs.h"
   16.51 -#include "msi.h"
   16.52 -#include "extern.h"
   16.53 -
   16.54 -u16 apicid_to_bdf(int apic_id)
   16.55 -{
   16.56 -    struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
   16.57 -    struct acpi_ioapic_unit *acpi_ioapic_unit;
   16.58 -
   16.59 -    list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
   16.60 -        if ( acpi_ioapic_unit->apic_id == apic_id )
   16.61 -            return acpi_ioapic_unit->ioapic.info;
   16.62 -
   16.63 -    dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
   16.64 -    return 0;
   16.65 -}
   16.66 -
   16.67 -static void remap_entry_to_ioapic_rte(
   16.68 -    struct iommu *iommu, struct IO_APIC_route_entry *old_rte)
   16.69 -{
   16.70 -    struct iremap_entry *iremap_entry = NULL;
   16.71 -    struct IO_APIC_route_remap_entry *remap_rte;
   16.72 -    unsigned int index;
   16.73 -    unsigned long flags;
   16.74 -    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   16.75 -
   16.76 -    if ( ir_ctrl == NULL )
   16.77 -    {
   16.78 -        dprintk(XENLOG_ERR VTDPREFIX,
   16.79 -                "remap_entry_to_ioapic_rte: ir_ctl == NULL");
   16.80 -        return;
   16.81 -    }
   16.82 -
   16.83 -    remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
   16.84 -    index = (remap_rte->index_15 << 15) + remap_rte->index_0_14;
   16.85 -
   16.86 -    if ( index > ir_ctrl->iremap_index )
   16.87 -    {
   16.88 -        dprintk(XENLOG_ERR VTDPREFIX,
   16.89 -            "Index is larger than remap table entry size. Error!\n");
   16.90 -        return;
   16.91 -    }
   16.92 -
   16.93 -    spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
   16.94 -
   16.95 -    iremap_entry = &ir_ctrl->iremap[index];
   16.96 -
   16.97 -    old_rte->vector = iremap_entry->lo.vector;
   16.98 -    old_rte->delivery_mode = iremap_entry->lo.dlm;
   16.99 -    old_rte->dest_mode = iremap_entry->lo.dm;
  16.100 -    old_rte->trigger = iremap_entry->lo.tm;
  16.101 -    old_rte->__reserved_2 = 0;
  16.102 -    old_rte->dest.logical.__reserved_1 = 0;
  16.103 -    old_rte->dest.logical.logical_dest = iremap_entry->lo.dst;
  16.104 -
  16.105 -    spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
  16.106 -}
  16.107 -
  16.108 -static void ioapic_rte_to_remap_entry(struct iommu *iommu,
  16.109 -    int apic_id, struct IO_APIC_route_entry *old_rte)
  16.110 -{
  16.111 -    struct iremap_entry *iremap_entry = NULL;
  16.112 -    struct IO_APIC_route_remap_entry *remap_rte;
  16.113 -    unsigned int index;
  16.114 -    unsigned long flags;
  16.115 -    int ret = 0;
  16.116 -    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
  16.117 -
  16.118 -    remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
  16.119 -    spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
  16.120 -    index = ir_ctrl->iremap_index;
  16.121 -    if ( index > IREMAP_ENTRY_NR - 1 )
  16.122 -    {
  16.123 -        dprintk(XENLOG_ERR VTDPREFIX,
  16.124 -               "The interrupt number is more than 256!\n");
  16.125 -        goto out;
  16.126 -    }
  16.127 -
  16.128 -    iremap_entry = &(ir_ctrl->iremap[index]);
  16.129 -    if ( *(u64 *)iremap_entry != 0 )
  16.130 -        dprintk(XENLOG_WARNING VTDPREFIX,
  16.131 -               "Interrupt remapping entry is in use already!\n");
  16.132 -    iremap_entry->lo.fpd = 0;
  16.133 -    iremap_entry->lo.dm = old_rte->dest_mode;
  16.134 -    iremap_entry->lo.rh = 0;
  16.135 -    iremap_entry->lo.tm = old_rte->trigger;
  16.136 -    iremap_entry->lo.dlm = old_rte->delivery_mode;
  16.137 -    iremap_entry->lo.avail = 0;
  16.138 -    iremap_entry->lo.res_1 = 0;
  16.139 -    iremap_entry->lo.vector = old_rte->vector;
  16.140 -    iremap_entry->lo.res_2 = 0;
  16.141 -    iremap_entry->lo.dst = (old_rte->dest.logical.logical_dest << 8);
  16.142 -    iremap_entry->hi.sid = apicid_to_bdf(apic_id);
  16.143 -    iremap_entry->hi.sq = 0;    /* comparing all 16-bit of SID */
  16.144 -    iremap_entry->hi.svt = 1;   /* turn on requestor ID verification SID/SQ */
  16.145 -    iremap_entry->hi.res_1 = 0;
  16.146 -    iremap_entry->lo.p = 1;    /* finally, set present bit */
  16.147 -    ir_ctrl->iremap_index++;
  16.148 -
  16.149 -    iommu_flush_iec_index(iommu, 0, index);
  16.150 -    ret = invalidate_sync(iommu);
  16.151 -
  16.152 -    /* now construct new ioapic rte entry */ 
  16.153 -    remap_rte->vector = old_rte->vector;
  16.154 -    remap_rte->delivery_mode = 0;    /* has to be 0 for remap format */ 
  16.155 -    remap_rte->index_15 = index & 0x8000;
  16.156 -    remap_rte->index_0_14 = index & 0x7fff;
  16.157 -    remap_rte->delivery_status = old_rte->delivery_status;
  16.158 -    remap_rte->polarity = old_rte->polarity;
  16.159 -    remap_rte->irr = old_rte->irr;
  16.160 -    remap_rte->trigger = old_rte->trigger;
  16.161 -    remap_rte->mask = 1;
  16.162 -    remap_rte->reserved = 0;
  16.163 -    remap_rte->format = 1;    /* indicate remap format */
  16.164 -out:
  16.165 -    spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
  16.166 -    return;
  16.167 -}
  16.168 -
  16.169 -unsigned int
  16.170 -io_apic_read_remap_rte(
  16.171 -    unsigned int apic, unsigned int reg)
  16.172 -{
  16.173 -    struct IO_APIC_route_entry old_rte = { 0 };
  16.174 -    struct IO_APIC_route_remap_entry *remap_rte;
  16.175 -    int rte_upper = (reg & 1) ? 1 : 0;
  16.176 -    struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
  16.177 -    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
  16.178 -
  16.179 -    if ( !iommu || !(ir_ctrl->iremap) )
  16.180 -    {
  16.181 -        *IO_APIC_BASE(apic) = reg;
  16.182 -        return *(IO_APIC_BASE(apic)+4);
  16.183 -    }
  16.184 -
  16.185 -    if ( rte_upper )
  16.186 -        reg--;
  16.187 -
  16.188 -    /* read lower and upper 32-bits of rte entry */
  16.189 -    *IO_APIC_BASE(apic) = reg;
  16.190 -    *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
  16.191 -    *IO_APIC_BASE(apic) = reg + 1;
  16.192 -    *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
  16.193 -
  16.194 -    remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
  16.195 -
  16.196 -    if ( remap_rte->mask || (remap_rte->format == 0) )
  16.197 -    {
  16.198 -        *IO_APIC_BASE(apic) = reg;
  16.199 -        return *(IO_APIC_BASE(apic)+4);
  16.200 -    }
  16.201 -
  16.202 -    remap_entry_to_ioapic_rte(iommu, &old_rte);
  16.203 -    if ( rte_upper )
  16.204 -    {
  16.205 -        *IO_APIC_BASE(apic) = reg + 1;
  16.206 -        return (*(((u32 *)&old_rte) + 1));
  16.207 -    }
  16.208 -    else
  16.209 -    {
  16.210 -        *IO_APIC_BASE(apic) = reg;
  16.211 -        return (*(((u32 *)&old_rte) + 0));
  16.212 -    }
  16.213 -}
  16.214 -
  16.215 -void
  16.216 -io_apic_write_remap_rte(
  16.217 -    unsigned int apic, unsigned int reg, unsigned int value)
  16.218 -{
  16.219 -    struct IO_APIC_route_entry old_rte = { 0 };
  16.220 -    struct IO_APIC_route_remap_entry *remap_rte;
  16.221 -    int rte_upper = (reg & 1) ? 1 : 0;
  16.222 -    struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
  16.223 -    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
  16.224 -
  16.225 -    if ( !iommu || !(ir_ctrl->iremap) )
  16.226 -    {
  16.227 -        *IO_APIC_BASE(apic) = reg;
  16.228 -        *(IO_APIC_BASE(apic)+4) = value;
  16.229 -        return;
  16.230 -    }
  16.231 -
  16.232 -    if ( rte_upper )
  16.233 -        reg--;
  16.234 -
  16.235 -    /* read both lower and upper 32-bits of rte entry */
  16.236 -    *IO_APIC_BASE(apic) = reg;
  16.237 -    *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
  16.238 -    *IO_APIC_BASE(apic) = reg + 1;
  16.239 -    *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
  16.240 -
  16.241 -    remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
  16.242 -    if ( remap_rte->mask || (remap_rte->format == 0) )
  16.243 -    {
  16.244 -        *IO_APIC_BASE(apic) = rte_upper ? ++reg : reg;
  16.245 -        *(IO_APIC_BASE(apic)+4) = value;
  16.246 -        return;
  16.247 -    }
  16.248 -
  16.249 -    *(((u32 *)&old_rte) + rte_upper) = value;
  16.250 -    ioapic_rte_to_remap_entry(iommu, mp_ioapics[apic].mpc_apicid, &old_rte);
  16.251 -
  16.252 -    /* write new entry to ioapic */
  16.253 -    *IO_APIC_BASE(apic) = reg;
  16.254 -    *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
  16.255 -    *IO_APIC_BASE(apic) = reg + 1;
  16.256 -    *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+1);
  16.257 -}
  16.258 -
  16.259 -int intremap_setup(struct iommu *iommu)
  16.260 -{
  16.261 -    struct ir_ctrl *ir_ctrl;
  16.262 -    unsigned long start_time;
  16.263 -    u64 paddr;
  16.264 -
  16.265 -    if ( !ecap_intr_remap(iommu->ecap) )
  16.266 -        return -ENODEV;
  16.267 -
  16.268 -    ir_ctrl = iommu_ir_ctrl(iommu);
  16.269 -    if ( ir_ctrl->iremap == NULL )
  16.270 -    {
  16.271 -        ir_ctrl->iremap = alloc_xenheap_page();
  16.272 -        if ( ir_ctrl->iremap == NULL )
  16.273 -        {
  16.274 -            dprintk(XENLOG_WARNING VTDPREFIX,
  16.275 -                    "Cannot allocate memory for ir_ctrl->iremap\n");
  16.276 -            return -ENODEV;
  16.277 -        }
  16.278 -        memset(ir_ctrl->iremap, 0, PAGE_SIZE);
  16.279 -    }
  16.280 -
  16.281 -    paddr = virt_to_maddr(ir_ctrl->iremap);
  16.282 -#if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
  16.283 -    /* set extended interrupt mode bit */
  16.284 -    paddr |= ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0;
  16.285 -#endif
  16.286 -    /* size field = 256 entries per 4K page = 8 - 1 */
  16.287 -    paddr |= 7;
  16.288 -    dmar_writeq(iommu->reg, DMAR_IRTA_REG, paddr);
  16.289 -
  16.290 -    /* set SIRTP */
  16.291 -    iommu->gcmd |= DMA_GCMD_SIRTP;
  16.292 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  16.293 -
  16.294 -    /* Make sure hardware complete it */
  16.295 -    start_time = jiffies;
  16.296 -    while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
  16.297 -    {
  16.298 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  16.299 -        {
  16.300 -            dprintk(XENLOG_ERR VTDPREFIX,
  16.301 -                    "Cannot set SIRTP field for interrupt remapping\n");
  16.302 -            return -ENODEV;
  16.303 -        }
  16.304 -        cpu_relax();
  16.305 -    }
  16.306 -
  16.307 -    /* enable comaptiblity format interrupt pass through */
  16.308 -    iommu->gcmd |= DMA_GCMD_CFI;
  16.309 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  16.310 -
  16.311 -    start_time = jiffies;
  16.312 -    while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
  16.313 -    {
  16.314 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  16.315 -        {
  16.316 -            dprintk(XENLOG_ERR VTDPREFIX,
  16.317 -                    "Cannot set CFI field for interrupt remapping\n");
  16.318 -            return -ENODEV;
  16.319 -        }
  16.320 -        cpu_relax();
  16.321 -    }
  16.322 -
  16.323 -    /* enable interrupt remapping hardware */
  16.324 -    iommu->gcmd |= DMA_GCMD_IRE;
  16.325 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  16.326 -
  16.327 -    start_time = jiffies;
  16.328 -    while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
  16.329 -    {
  16.330 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
  16.331 -        {
  16.332 -            dprintk(XENLOG_ERR VTDPREFIX,
  16.333 -                    "Cannot set IRE field for interrupt remapping\n");
  16.334 -            return -ENODEV;
  16.335 -        }
  16.336 -        cpu_relax();
  16.337 -    }
  16.338 -
  16.339 -    /* After set SIRTP, we should do globally invalidate the IEC */
  16.340 -    iommu_flush_iec_global(iommu);
  16.341 -
  16.342 -    return 0;
  16.343 -}
    17.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c	Thu Feb 21 14:50:27 2008 +0000
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,296 +0,0 @@
    17.4 -/*
    17.5 - * Copyright (c) 2006, Intel Corporation.
    17.6 - *
    17.7 - * This program is free software; you can redistribute it and/or modify it
    17.8 - * under the terms and conditions of the GNU General Public License,
    17.9 - * version 2, as published by the Free Software Foundation.
   17.10 - *
   17.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   17.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   17.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   17.14 - * more details.
   17.15 - *
   17.16 - * You should have received a copy of the GNU General Public License along with
   17.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   17.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   17.19 - *
   17.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   17.21 - * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
   17.22 - */
   17.23 -
   17.24 -#include <xen/init.h>
   17.25 -#include <xen/config.h>
   17.26 -#include <xen/init.h>
   17.27 -#include <xen/mm.h>
   17.28 -#include <xen/lib.h>
   17.29 -#include <xen/errno.h>
   17.30 -#include <xen/trace.h>
   17.31 -#include <xen/event.h>
   17.32 -#include <xen/hypercall.h>
   17.33 -#include <asm/current.h>
   17.34 -#include <asm/cpufeature.h>
   17.35 -#include <asm/processor.h>
   17.36 -#include <asm/msr.h>
   17.37 -#include <asm/apic.h>
   17.38 -#include <asm/paging.h>
   17.39 -#include <asm/shadow.h>
   17.40 -#include <asm/p2m.h>
   17.41 -#include <asm/hvm/hvm.h>
   17.42 -#include <asm/hvm/support.h>
   17.43 -#include <asm/hvm/vpt.h>
   17.44 -#include <asm/hvm/vpic.h>
   17.45 -#include <asm/hvm/vlapic.h>
   17.46 -#include <public/sched.h>
   17.47 -#include <xen/iocap.h>
   17.48 -#include <public/hvm/ioreq.h>
   17.49 -#include <public/domctl.h>
   17.50 -
   17.51 -static void pt_irq_time_out(void *data)
   17.52 -{
   17.53 -    struct hvm_mirq_dpci_mapping *irq_map = data;
   17.54 -    unsigned int guest_gsi, machine_gsi = 0;
   17.55 -    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
   17.56 -    struct dev_intx_gsi_link *digl;
   17.57 -    uint32_t device, intx;
   17.58 -
   17.59 -    list_for_each_entry ( digl, &irq_map->digl_list, list )
   17.60 -    {
   17.61 -        guest_gsi = digl->gsi;
   17.62 -        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
   17.63 -        device = digl->device;
   17.64 -        intx = digl->intx;
   17.65 -        hvm_pci_intx_deassert(irq_map->dom, device, intx);
   17.66 -    }
   17.67 -
   17.68 -    clear_bit(machine_gsi, dpci->dirq_mask);
   17.69 -    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
   17.70 -    spin_lock(&dpci->dirq_lock);
   17.71 -    dpci->mirq[machine_gsi].pending = 0;
   17.72 -    spin_unlock(&dpci->dirq_lock);
   17.73 -    pirq_guest_eoi(irq_map->dom, machine_gsi);
   17.74 -}
   17.75 -
   17.76 -int pt_irq_create_bind_vtd(
   17.77 -    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
   17.78 -{
   17.79 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   17.80 -    uint32_t machine_gsi, guest_gsi;
   17.81 -    uint32_t device, intx, link;
   17.82 -    struct dev_intx_gsi_link *digl;
   17.83 -
   17.84 -    if ( hvm_irq_dpci == NULL )
   17.85 -    {
   17.86 -        hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
   17.87 -        if ( hvm_irq_dpci == NULL )
   17.88 -            return -ENOMEM;
   17.89 -
   17.90 -        memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
   17.91 -        spin_lock_init(&hvm_irq_dpci->dirq_lock);
   17.92 -        for ( int i = 0; i < NR_IRQS; i++ )
   17.93 -            INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
   17.94 -
   17.95 -        if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
   17.96 -                     0, (unsigned long)hvm_irq_dpci) != 0 )
   17.97 -            xfree(hvm_irq_dpci);
   17.98 -
   17.99 -        hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.100 -    }
  17.101 -
  17.102 -    machine_gsi = pt_irq_bind->machine_irq;
  17.103 -    device = pt_irq_bind->u.pci.device;
  17.104 -    intx = pt_irq_bind->u.pci.intx;
  17.105 -    guest_gsi = hvm_pci_intx_gsi(device, intx);
  17.106 -    link = hvm_pci_intx_link(device, intx);
  17.107 -    hvm_irq_dpci->link_cnt[link]++;
  17.108 -
  17.109 -    digl = xmalloc(struct dev_intx_gsi_link);
  17.110 -    if ( !digl )
  17.111 -        return -ENOMEM;
  17.112 -
  17.113 -    digl->device = device;
  17.114 -    digl->intx = intx;
  17.115 -    digl->gsi = guest_gsi;
  17.116 -    digl->link = link;
  17.117 -    list_add_tail(&digl->list,
  17.118 -                  &hvm_irq_dpci->mirq[machine_gsi].digl_list);
  17.119 -
  17.120 -    hvm_irq_dpci->girq[guest_gsi].valid = 1;
  17.121 -    hvm_irq_dpci->girq[guest_gsi].device = device;
  17.122 -    hvm_irq_dpci->girq[guest_gsi].intx = intx;
  17.123 -    hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
  17.124 -
  17.125 -    /* Bind the same mirq once in the same domain */
  17.126 -    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
  17.127 -    {
  17.128 -        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
  17.129 -        hvm_irq_dpci->mirq[machine_gsi].dom = d;
  17.130 -
  17.131 -        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
  17.132 -                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
  17.133 -        /* Deal with gsi for legacy devices */
  17.134 -        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
  17.135 -    }
  17.136 -
  17.137 -    gdprintk(XENLOG_INFO VTDPREFIX,
  17.138 -             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
  17.139 -             machine_gsi, device, intx);
  17.140 -    return 0;
  17.141 -}
  17.142 -
  17.143 -int pt_irq_destroy_bind_vtd(
  17.144 -    struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
  17.145 -{
  17.146 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.147 -    uint32_t machine_gsi, guest_gsi;
  17.148 -    uint32_t device, intx, link;
  17.149 -    struct list_head *digl_list, *tmp;
  17.150 -    struct dev_intx_gsi_link *digl;
  17.151 -
  17.152 -    if ( hvm_irq_dpci == NULL )
  17.153 -        return 0;
  17.154 -
  17.155 -    machine_gsi = pt_irq_bind->machine_irq;
  17.156 -    device = pt_irq_bind->u.pci.device;
  17.157 -    intx = pt_irq_bind->u.pci.intx;
  17.158 -    guest_gsi = hvm_pci_intx_gsi(device, intx);
  17.159 -    link = hvm_pci_intx_link(device, intx);
  17.160 -    hvm_irq_dpci->link_cnt[link]--;
  17.161 -
  17.162 -    gdprintk(XENLOG_INFO,
  17.163 -            "pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n",
  17.164 -            machine_gsi, guest_gsi, device, intx);
  17.165 -    memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping));
  17.166 -
  17.167 -    /* clear the mirq info */
  17.168 -    if ( hvm_irq_dpci->mirq[machine_gsi].valid )
  17.169 -    {
  17.170 -
  17.171 -        list_for_each_safe ( digl_list, tmp,
  17.172 -                &hvm_irq_dpci->mirq[machine_gsi].digl_list )
  17.173 -        {
  17.174 -            digl = list_entry(digl_list,
  17.175 -                    struct dev_intx_gsi_link, list);
  17.176 -            if ( digl->device == device &&
  17.177 -                 digl->intx   == intx &&
  17.178 -                 digl->link   == link &&
  17.179 -                 digl->gsi    == guest_gsi )
  17.180 -            {
  17.181 -                list_del(&digl->list);
  17.182 -                xfree(digl);
  17.183 -            }
  17.184 -        }
  17.185 -
  17.186 -        if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
  17.187 -        {
  17.188 -            pirq_guest_unbind(d, machine_gsi);
  17.189 -            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  17.190 -            hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
  17.191 -            hvm_irq_dpci->mirq[machine_gsi].valid = 0;
  17.192 -        }
  17.193 -    }
  17.194 -
  17.195 -    gdprintk(XENLOG_INFO,
  17.196 -             "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
  17.197 -             machine_gsi, device, intx);
  17.198 -
  17.199 -    return 0;
  17.200 -}
  17.201 -
  17.202 -int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
  17.203 -{
  17.204 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  17.205 -
  17.206 -    if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
  17.207 -         !hvm_irq->dpci->mirq[mirq].valid )
  17.208 -        return 0;
  17.209 -
  17.210 -    /*
  17.211 -     * Set a timer here to avoid situations where the IRQ line is shared, and
  17.212 -     * the device belonging to the pass-through guest is not yet active. In
  17.213 -     * this case the guest may not pick up the interrupt (e.g., masked at the
  17.214 -     * PIC) and we need to detect that.
  17.215 -     */
  17.216 -    set_bit(mirq, hvm_irq->dpci->dirq_mask);
  17.217 -    set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)],
  17.218 -              NOW() + PT_IRQ_TIME_OUT);
  17.219 -    vcpu_kick(d->vcpu[0]);
  17.220 -
  17.221 -    return 1;
  17.222 -}
  17.223 -
  17.224 -static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
  17.225 -{
  17.226 -    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  17.227 -    struct hvm_irq_dpci *dpci = hvm_irq->dpci;
  17.228 -    struct dev_intx_gsi_link *digl, *tmp;
  17.229 -    int i;
  17.230 -
  17.231 -    ASSERT(isairq < NR_ISAIRQS);
  17.232 -    if ( !iommu_enabled || !dpci ||
  17.233 -         !test_bit(isairq, dpci->isairq_map) )
  17.234 -        return;
  17.235 -
  17.236 -    /* Multiple mirq may be mapped to one isa irq */
  17.237 -    for ( i = 0; i < NR_IRQS; i++ )
  17.238 -    {
  17.239 -        if ( !dpci->mirq[i].valid )
  17.240 -            continue;
  17.241 -
  17.242 -        list_for_each_entry_safe ( digl, tmp,
  17.243 -            &dpci->mirq[i].digl_list, list )
  17.244 -        {
  17.245 -            if ( hvm_irq->pci_link.route[digl->link] == isairq )
  17.246 -            {
  17.247 -                hvm_pci_intx_deassert(d, digl->device, digl->intx);
  17.248 -                spin_lock(&dpci->dirq_lock);
  17.249 -                if ( --dpci->mirq[i].pending == 0 )
  17.250 -                {
  17.251 -                    spin_unlock(&dpci->dirq_lock);
  17.252 -                    gdprintk(XENLOG_INFO VTDPREFIX,
  17.253 -                             "hvm_dpci_isairq_eoi:: mirq = %x\n", i);
  17.254 -                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
  17.255 -                    pirq_guest_eoi(d, i);
  17.256 -                }
  17.257 -                else
  17.258 -                    spin_unlock(&dpci->dirq_lock);
  17.259 -            }
  17.260 -        }
  17.261 -    }
  17.262 -}
  17.263 -
  17.264 -void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
  17.265 -                  union vioapic_redir_entry *ent)
  17.266 -{
  17.267 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
  17.268 -    uint32_t device, intx, machine_gsi;
  17.269 -
  17.270 -    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
  17.271 -         (guest_gsi >= NR_ISAIRQS &&
  17.272 -          !hvm_irq_dpci->girq[guest_gsi].valid) )
  17.273 -        return;
  17.274 -
  17.275 -    if ( guest_gsi < NR_ISAIRQS )
  17.276 -    {
  17.277 -        hvm_dpci_isairq_eoi(d, guest_gsi);
  17.278 -        return;
  17.279 -    }
  17.280 -
  17.281 -    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
  17.282 -    device = hvm_irq_dpci->girq[guest_gsi].device;
  17.283 -    intx = hvm_irq_dpci->girq[guest_gsi].intx;
  17.284 -    hvm_pci_intx_deassert(d, device, intx);
  17.285 -
  17.286 -    spin_lock(&hvm_irq_dpci->dirq_lock);
  17.287 -    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
  17.288 -    {
  17.289 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  17.290 -
  17.291 -        gdprintk(XENLOG_INFO VTDPREFIX,
  17.292 -                 "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
  17.293 -        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
  17.294 -        if ( (ent == NULL) || !ent->fields.mask )
  17.295 -            pirq_guest_eoi(d, machine_gsi);
  17.296 -    }
  17.297 -    else
  17.298 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
  17.299 -}
    18.1 --- a/xen/arch/x86/hvm/vmx/vtd/msi.h	Thu Feb 21 14:50:27 2008 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,127 +0,0 @@
    18.4 -/*
    18.5 - * Copyright (C) 2003-2004 Intel
    18.6 - * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
    18.7 - */
    18.8 -
    18.9 -#ifndef MSI_H
   18.10 -#define MSI_H
   18.11 -
   18.12 -/*
   18.13 - * Assume the maximum number of hot plug slots supported by the system is about
   18.14 - * ten. The worstcase is that each of these slots is hot-added with a device,
   18.15 - * which has two MSI/MSI-X capable functions. To avoid any MSI-X driver, which
   18.16 - * attempts to request all available vectors, NR_HP_RESERVED_VECTORS is defined
   18.17 - * as below to ensure at least one message is assigned to each detected MSI/
   18.18 - * MSI-X device function.
   18.19 - */
   18.20 -#define NR_HP_RESERVED_VECTORS 	20
   18.21 -
   18.22 -extern int vector_irq[NR_VECTORS];
   18.23 -extern int pci_vector_resources(int last, int nr_released);
   18.24 -
   18.25 -/*
   18.26 - * MSI-X Address Register
   18.27 - */
   18.28 -#define PCI_MSIX_FLAGS_QSIZE		0x7FF
   18.29 -#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
   18.30 -#define PCI_MSIX_FLAGS_BIRMASK		(7 << 0)
   18.31 -#define PCI_MSIX_FLAGS_BITMASK		(1 << 0)
   18.32 -
   18.33 -#define PCI_MSIX_ENTRY_SIZE			16
   18.34 -#define  PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET	0
   18.35 -#define  PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET	4
   18.36 -#define  PCI_MSIX_ENTRY_DATA_OFFSET		8
   18.37 -#define  PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET	12
   18.38 -
   18.39 -#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
   18.40 -#define msi_lower_address_reg(base)	(base + PCI_MSI_ADDRESS_LO)
   18.41 -#define msi_upper_address_reg(base)	(base + PCI_MSI_ADDRESS_HI)
   18.42 -#define msi_data_reg(base, is64bit)	\
   18.43 -	( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
   18.44 -#define msi_mask_bits_reg(base, is64bit) \
   18.45 -	( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
   18.46 -#define msi_disable(control)		control &= ~PCI_MSI_FLAGS_ENABLE
   18.47 -#define multi_msi_capable(control) \
   18.48 -	(1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
   18.49 -#define multi_msi_enable(control, num) \
   18.50 -	control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
   18.51 -#define is_64bit_address(control)	(control & PCI_MSI_FLAGS_64BIT)
   18.52 -#define is_mask_bit_support(control)	(control & PCI_MSI_FLAGS_MASKBIT)
   18.53 -#define msi_enable(control, num) multi_msi_enable(control, num); \
   18.54 -	control |= PCI_MSI_FLAGS_ENABLE
   18.55 -
   18.56 -#define msix_table_offset_reg(base)	(base + 0x04)
   18.57 -#define msix_pba_offset_reg(base)	(base + 0x08)
   18.58 -#define msix_enable(control)	 	control |= PCI_MSIX_FLAGS_ENABLE
   18.59 -#define msix_disable(control)	 	control &= ~PCI_MSIX_FLAGS_ENABLE
   18.60 -#define msix_table_size(control) 	((control & PCI_MSIX_FLAGS_QSIZE)+1)
   18.61 -#define multi_msix_capable		msix_table_size
   18.62 -#define msix_unmask(address)	 	(address & ~PCI_MSIX_FLAGS_BITMASK)
   18.63 -#define msix_mask(address)		(address | PCI_MSIX_FLAGS_BITMASK)
   18.64 -#define msix_is_pending(address) 	(address & PCI_MSIX_FLAGS_PENDMASK)
   18.65 -
   18.66 -/*
   18.67 - * MSI Defined Data Structures
   18.68 - */
   18.69 -#define MSI_ADDRESS_HEADER		0xfee
   18.70 -#define MSI_ADDRESS_HEADER_SHIFT	12
   18.71 -#define MSI_ADDRESS_HEADER_MASK		0xfff000
   18.72 -#define MSI_ADDRESS_DEST_ID_MASK	0xfff0000f
   18.73 -#define MSI_TARGET_CPU_MASK		0xff
   18.74 -#define MSI_TARGET_CPU_SHIFT		12
   18.75 -#define MSI_DELIVERY_MODE		0
   18.76 -#define MSI_LEVEL_MODE			1	/* Edge always assert */
   18.77 -#define MSI_TRIGGER_MODE		0	/* MSI is edge sensitive */
   18.78 -#define MSI_PHYSICAL_MODE		0
   18.79 -#define MSI_LOGICAL_MODE		1
   18.80 -#define MSI_REDIRECTION_HINT_MODE	0
   18.81 -
   18.82 -#define __LITTLE_ENDIAN_BITFIELD	1
   18.83 -
   18.84 -struct msg_data {
   18.85 -#if defined(__LITTLE_ENDIAN_BITFIELD)
   18.86 -	__u32	vector		:  8;
   18.87 -	__u32	delivery_mode	:  3;	/* 000b: FIXED | 001b: lowest prior */
   18.88 -	__u32	reserved_1	:  3;
   18.89 -	__u32	level		:  1;	/* 0: deassert | 1: assert */
   18.90 -	__u32	trigger		:  1;	/* 0: edge | 1: level */
   18.91 -	__u32	reserved_2	: 16;
   18.92 -#elif defined(__BIG_ENDIAN_BITFIELD)
   18.93 -	__u32	reserved_2	: 16;
   18.94 -	__u32	trigger		:  1;	/* 0: edge | 1: level */
   18.95 -	__u32	level		:  1;	/* 0: deassert | 1: assert */
   18.96 -	__u32	reserved_1	:  3;
   18.97 -	__u32	delivery_mode	:  3;	/* 000b: FIXED | 001b: lowest prior */
   18.98 -	__u32	vector		:  8;
   18.99 -#else
  18.100 -#error "Bitfield endianness not defined! Check your byteorder.h"
  18.101 -#endif
  18.102 -} __attribute__ ((packed));
  18.103 -
  18.104 -struct msg_address {
  18.105 -	union {
  18.106 -		struct {
  18.107 -#if defined(__LITTLE_ENDIAN_BITFIELD)
  18.108 -			__u32	reserved_1	:  2;
  18.109 -			__u32	dest_mode	:  1;	/*0:physic | 1:logic */
  18.110 -			__u32	redirection_hint:  1;  	/*0: dedicated CPU
  18.111 -							  1: lowest priority */
  18.112 -			__u32	reserved_2	:  4;
  18.113 - 			__u32	dest_id		: 24;	/* Destination ID */
  18.114 -#elif defined(__BIG_ENDIAN_BITFIELD)
  18.115 - 			__u32	dest_id		: 24;	/* Destination ID */
  18.116 -			__u32	reserved_2	:  4;
  18.117 -			__u32	redirection_hint:  1;  	/*0: dedicated CPU
  18.118 -							  1: lowest priority */
  18.119 -			__u32	dest_mode	:  1;	/*0:physic | 1:logic */
  18.120 -			__u32	reserved_1	:  2;
  18.121 -#else
  18.122 -#error "Bitfield endianness not defined! Check your byteorder.h"
  18.123 -#endif
  18.124 -      		}u;
  18.125 -       		__u32  value;
  18.126 -	}lo_address;
  18.127 -	__u32 	hi_address;
  18.128 -} __attribute__ ((packed));
  18.129 -
  18.130 -#endif /* MSI_H */
    19.1 --- a/xen/arch/x86/hvm/vmx/vtd/pci-direct.h	Thu Feb 21 14:50:27 2008 +0000
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,48 +0,0 @@
    19.4 -#ifndef ASM_PCI_DIRECT_H
    19.5 -#define ASM_PCI_DIRECT_H 1
    19.6 -
    19.7 -#include <xen/types.h>
    19.8 -#include <asm/io.h>
    19.9 -
   19.10 -/* Direct PCI access. This is used for PCI accesses in early boot before
   19.11 -   the PCI subsystem works. */
   19.12 -
   19.13 -#define PDprintk(x...)
   19.14 -
   19.15 -static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
   19.16 -{
   19.17 -    u32 v;
   19.18 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   19.19 -    v = inl(0xcfc);
   19.20 -    if (v != 0xffffffff)
   19.21 -        PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
   19.22 -    return v;
   19.23 -}
   19.24 -
   19.25 -static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
   19.26 -{
   19.27 -    u8 v;
   19.28 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   19.29 -    v = inb(0xcfc + (offset&3));
   19.30 -    PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
   19.31 -    return v;
   19.32 -}
   19.33 -
   19.34 -static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
   19.35 -{
   19.36 -    u16 v;
   19.37 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   19.38 -    v = inw(0xcfc + (offset&2));
   19.39 -    PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
   19.40 -    return v;
   19.41 -}
   19.42 -
   19.43 -static inline void write_pci_config(
   19.44 -    u8 bus, u8 slot, u8 func, u8 offset, u32 val)
   19.45 -{
   19.46 -    PDprintk("%x writing to %x: %x\n", slot, offset, val);
   19.47 -    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   19.48 -    outl(val, 0xcfc);
   19.49 -}
   19.50 -
   19.51 -#endif
    20.1 --- a/xen/arch/x86/hvm/vmx/vtd/pci_regs.h	Thu Feb 21 14:50:27 2008 +0000
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,449 +0,0 @@
    20.4 -/*
    20.5 - *	pci_regs.h
    20.6 - *
    20.7 - *	PCI standard defines
    20.8 - *	Copyright 1994, Drew Eckhardt
    20.9 - *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
   20.10 - *
   20.11 - *	For more information, please consult the following manuals (look at
   20.12 - *	http://www.pcisig.com/ for how to get them):
   20.13 - *
   20.14 - *	PCI BIOS Specification
   20.15 - *	PCI Local Bus Specification
   20.16 - *	PCI to PCI Bridge Specification
   20.17 - *	PCI System Design Guide
   20.18 - */
   20.19 -
   20.20 -#ifndef LINUX_PCI_REGS_H
   20.21 -#define LINUX_PCI_REGS_H
   20.22 -
   20.23 -/*
   20.24 - * Under PCI, each device has 256 bytes of configuration address space,
   20.25 - * of which the first 64 bytes are standardized as follows:
   20.26 - */
   20.27 -#define PCI_VENDOR_ID		0x00	/* 16 bits */
   20.28 -#define PCI_DEVICE_ID		0x02	/* 16 bits */
   20.29 -#define PCI_COMMAND		0x04	/* 16 bits */
   20.30 -#define  PCI_COMMAND_IO		0x1	/* Enable response in I/O space */
   20.31 -#define  PCI_COMMAND_MEMORY	0x2	/* Enable response in Memory space */
   20.32 -#define  PCI_COMMAND_MASTER	0x4	/* Enable bus mastering */
   20.33 -#define  PCI_COMMAND_SPECIAL	0x8	/* Enable response to special cycles */
   20.34 -#define  PCI_COMMAND_INVALIDATE	0x10	/* Use memory write and invalidate */
   20.35 -#define  PCI_COMMAND_VGA_PALETTE 0x20	/* Enable palette snooping */
   20.36 -#define  PCI_COMMAND_PARITY	0x40	/* Enable parity checking */
   20.37 -#define  PCI_COMMAND_WAIT 	0x80	/* Enable address/data stepping */
   20.38 -#define  PCI_COMMAND_SERR	0x100	/* Enable SERR */
   20.39 -#define  PCI_COMMAND_FAST_BACK	0x200	/* Enable back-to-back writes */
   20.40 -#define  PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
   20.41 -
   20.42 -#define PCI_STATUS		0x06	/* 16 bits */
   20.43 -#define  PCI_STATUS_CAP_LIST	0x10	/* Support Capability List */
   20.44 -#define  PCI_STATUS_66MHZ	0x20	/* Support 66 Mhz PCI 2.1 bus */
   20.45 -#define  PCI_STATUS_UDF		0x40	/* Support User Definable Features [obsolete] */
   20.46 -#define  PCI_STATUS_FAST_BACK	0x80	/* Accept fast-back to back */
   20.47 -#define  PCI_STATUS_PARITY	0x100	/* Detected parity error */
   20.48 -#define  PCI_STATUS_DEVSEL_MASK	0x600	/* DEVSEL timing */
   20.49 -#define  PCI_STATUS_DEVSEL_FAST		0x000
   20.50 -#define  PCI_STATUS_DEVSEL_MEDIUM	0x200
   20.51 -#define  PCI_STATUS_DEVSEL_SLOW		0x400
   20.52 -#define  PCI_STATUS_SIG_TARGET_ABORT	0x800 /* Set on target abort */
   20.53 -#define  PCI_STATUS_REC_TARGET_ABORT	0x1000 /* Master ack of " */
   20.54 -#define  PCI_STATUS_REC_MASTER_ABORT	0x2000 /* Set on master abort */
   20.55 -#define  PCI_STATUS_SIG_SYSTEM_ERROR	0x4000 /* Set when we drive SERR */
   20.56 -#define  PCI_STATUS_DETECTED_PARITY	0x8000 /* Set on parity error */
   20.57 -
   20.58 -#define PCI_CLASS_REVISION	0x08	/* High 24 bits are class, low 8 revision */
   20.59 -#define PCI_REVISION_ID		0x08	/* Revision ID */
   20.60 -#define PCI_CLASS_PROG		0x09	/* Reg. Level Programming Interface */
   20.61 -#define PCI_CLASS_DEVICE	0x0a	/* Device class */
   20.62 -
   20.63 -#define PCI_CACHE_LINE_SIZE	0x0c	/* 8 bits */
   20.64 -#define PCI_LATENCY_TIMER	0x0d	/* 8 bits */
   20.65 -#define PCI_HEADER_TYPE		0x0e	/* 8 bits */
   20.66 -#define  PCI_HEADER_TYPE_NORMAL		0
   20.67 -#define  PCI_HEADER_TYPE_BRIDGE		1
   20.68 -#define  PCI_HEADER_TYPE_CARDBUS	2
   20.69 -
   20.70 -#define PCI_BIST		0x0f	/* 8 bits */
   20.71 -#define  PCI_BIST_CODE_MASK	0x0f	/* Return result */
   20.72 -#define  PCI_BIST_START		0x40	/* 1 to start BIST, 2 secs or less */
   20.73 -#define  PCI_BIST_CAPABLE	0x80	/* 1 if BIST capable */
   20.74 -
   20.75 -/*
   20.76 - * Base addresses specify locations in memory or I/O space.
   20.77 - * Decoded size can be determined by writing a value of
   20.78 - * 0xffffffff to the register, and reading it back.  Only
   20.79 - * 1 bits are decoded.
   20.80 - */
   20.81 -#define PCI_BASE_ADDRESS_0	0x10	/* 32 bits */
   20.82 -#define PCI_BASE_ADDRESS_1	0x14	/* 32 bits [htype 0,1 only] */
   20.83 -#define PCI_BASE_ADDRESS_2	0x18	/* 32 bits [htype 0 only] */
   20.84 -#define PCI_BASE_ADDRESS_3	0x1c	/* 32 bits */
   20.85 -#define PCI_BASE_ADDRESS_4	0x20	/* 32 bits */
   20.86 -#define PCI_BASE_ADDRESS_5	0x24	/* 32 bits */
   20.87 -#define  PCI_BASE_ADDRESS_SPACE		0x01	/* 0 = memory, 1 = I/O */
   20.88 -#define  PCI_BASE_ADDRESS_SPACE_IO	0x01
   20.89 -#define  PCI_BASE_ADDRESS_SPACE_MEMORY	0x00
   20.90 -#define  PCI_BASE_ADDRESS_MEM_TYPE_MASK	0x06
   20.91 -#define  PCI_BASE_ADDRESS_MEM_TYPE_32	0x00	/* 32 bit address */
   20.92 -#define  PCI_BASE_ADDRESS_MEM_TYPE_1M	0x02	/* Below 1M [obsolete] */
   20.93 -#define  PCI_BASE_ADDRESS_MEM_TYPE_64	0x04	/* 64 bit address */
   20.94 -#define  PCI_BASE_ADDRESS_MEM_PREFETCH	0x08	/* prefetchable? */
   20.95 -#define  PCI_BASE_ADDRESS_MEM_MASK	(~0x0fUL)
   20.96 -#define  PCI_BASE_ADDRESS_IO_MASK	(~0x03UL)
   20.97 -/* bit 1 is reserved if address_space = 1 */
   20.98 -
   20.99 -/* Header type 0 (normal devices) */
  20.100 -#define PCI_CARDBUS_CIS		0x28
  20.101 -#define PCI_SUBSYSTEM_VENDOR_ID	0x2c
  20.102 -#define PCI_SUBSYSTEM_ID	0x2e
  20.103 -#define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
  20.104 -#define  PCI_ROM_ADDRESS_ENABLE	0x01
  20.105 -#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
  20.106 -
  20.107 -#define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
  20.108 -
  20.109 -/* 0x35-0x3b are reserved */
  20.110 -#define PCI_INTERRUPT_LINE	0x3c	/* 8 bits */
  20.111 -#define PCI_INTERRUPT_PIN	0x3d	/* 8 bits */
  20.112 -#define PCI_MIN_GNT		0x3e	/* 8 bits */
  20.113 -#define PCI_MAX_LAT		0x3f	/* 8 bits */
  20.114 -
  20.115 -/* Header type 1 (PCI-to-PCI bridges) */
  20.116 -#define PCI_PRIMARY_BUS		0x18	/* Primary bus number */
  20.117 -#define PCI_SECONDARY_BUS	0x19	/* Secondary bus number */
  20.118 -#define PCI_SUBORDINATE_BUS	0x1a	/* Highest bus number behind the bridge */
  20.119 -#define PCI_SEC_LATENCY_TIMER	0x1b	/* Latency timer for secondary interface */
  20.120 -#define PCI_IO_BASE		0x1c	/* I/O range behind the bridge */
  20.121 -#define PCI_IO_LIMIT		0x1d
  20.122 -#define  PCI_IO_RANGE_TYPE_MASK	0x0fUL	/* I/O bridging type */
  20.123 -#define  PCI_IO_RANGE_TYPE_16	0x00
  20.124 -#define  PCI_IO_RANGE_TYPE_32	0x01
  20.125 -#define  PCI_IO_RANGE_MASK	(~0x0fUL)
  20.126 -#define PCI_SEC_STATUS		0x1e	/* Secondary status register, only bit 14 used */
  20.127 -#define PCI_MEMORY_BASE		0x20	/* Memory range behind */
  20.128 -#define PCI_MEMORY_LIMIT	0x22
  20.129 -#define  PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
  20.130 -#define  PCI_MEMORY_RANGE_MASK	(~0x0fUL)
  20.131 -#define PCI_PREF_MEMORY_BASE	0x24	/* Prefetchable memory range behind */
  20.132 -#define PCI_PREF_MEMORY_LIMIT	0x26
  20.133 -#define  PCI_PREF_RANGE_TYPE_MASK 0x0fUL
  20.134 -#define  PCI_PREF_RANGE_TYPE_32	0x00
  20.135 -#define  PCI_PREF_RANGE_TYPE_64	0x01
  20.136 -#define  PCI_PREF_RANGE_MASK	(~0x0fUL)
  20.137 -#define PCI_PREF_BASE_UPPER32	0x28	/* Upper half of prefetchable memory range */
  20.138 -#define PCI_PREF_LIMIT_UPPER32	0x2c
  20.139 -#define PCI_IO_BASE_UPPER16	0x30	/* Upper half of I/O addresses */
  20.140 -#define PCI_IO_LIMIT_UPPER16	0x32
  20.141 -/* 0x34 same as for htype 0 */
  20.142 -/* 0x35-0x3b is reserved */
  20.143 -#define PCI_ROM_ADDRESS1	0x38	/* Same as PCI_ROM_ADDRESS, but for htype 1 */
  20.144 -/* 0x3c-0x3d are same as for htype 0 */
  20.145 -#define PCI_BRIDGE_CONTROL	0x3e
  20.146 -#define  PCI_BRIDGE_CTL_PARITY	0x01	/* Enable parity detection on secondary interface */
  20.147 -#define  PCI_BRIDGE_CTL_SERR	0x02	/* The same for SERR forwarding */
  20.148 -#define  PCI_BRIDGE_CTL_NO_ISA	0x04	/* Disable bridging of ISA ports */
  20.149 -#define  PCI_BRIDGE_CTL_VGA	0x08	/* Forward VGA addresses */
  20.150 -#define  PCI_BRIDGE_CTL_MASTER_ABORT	0x20  /* Report master aborts */
  20.151 -#define  PCI_BRIDGE_CTL_BUS_RESET	0x40	/* Secondary bus reset */
  20.152 -#define  PCI_BRIDGE_CTL_FAST_BACK	0x80	/* Fast Back2Back enabled on secondary interface */
  20.153 -
  20.154 -/* Header type 2 (CardBus bridges) */
  20.155 -#define PCI_CB_CAPABILITY_LIST	0x14
  20.156 -/* 0x15 reserved */
  20.157 -#define PCI_CB_SEC_STATUS	0x16	/* Secondary status */
  20.158 -#define PCI_CB_PRIMARY_BUS	0x18	/* PCI bus number */
  20.159 -#define PCI_CB_CARD_BUS		0x19	/* CardBus bus number */
  20.160 -#define PCI_CB_SUBORDINATE_BUS	0x1a	/* Subordinate bus number */
  20.161 -#define PCI_CB_LATENCY_TIMER	0x1b	/* CardBus latency timer */
  20.162 -#define PCI_CB_MEMORY_BASE_0	0x1c
  20.163 -#define PCI_CB_MEMORY_LIMIT_0	0x20
  20.164 -#define PCI_CB_MEMORY_BASE_1	0x24
  20.165 -#define PCI_CB_MEMORY_LIMIT_1	0x28
  20.166 -#define PCI_CB_IO_BASE_0	0x2c
  20.167 -#define PCI_CB_IO_BASE_0_HI	0x2e
  20.168 -#define PCI_CB_IO_LIMIT_0	0x30
  20.169 -#define PCI_CB_IO_LIMIT_0_HI	0x32
  20.170 -#define PCI_CB_IO_BASE_1	0x34
  20.171 -#define PCI_CB_IO_BASE_1_HI	0x36
  20.172 -#define PCI_CB_IO_LIMIT_1	0x38
  20.173 -#define PCI_CB_IO_LIMIT_1_HI	0x3a
  20.174 -#define  PCI_CB_IO_RANGE_MASK	(~0x03UL)
  20.175 -/* 0x3c-0x3d are same as for htype 0 */
  20.176 -#define PCI_CB_BRIDGE_CONTROL	0x3e
  20.177 -#define  PCI_CB_BRIDGE_CTL_PARITY	0x01	/* Similar to standard bridge control register */
  20.178 -#define  PCI_CB_BRIDGE_CTL_SERR		0x02
  20.179 -#define  PCI_CB_BRIDGE_CTL_ISA		0x04
  20.180 -#define  PCI_CB_BRIDGE_CTL_VGA		0x08
  20.181 -#define  PCI_CB_BRIDGE_CTL_MASTER_ABORT	0x20
  20.182 -#define  PCI_CB_BRIDGE_CTL_CB_RESET	0x40	/* CardBus reset */
  20.183 -#define  PCI_CB_BRIDGE_CTL_16BIT_INT	0x80	/* Enable interrupt for 16-bit cards */
  20.184 -#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100	/* Prefetch enable for both memory regions */
  20.185 -#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
  20.186 -#define  PCI_CB_BRIDGE_CTL_POST_WRITES	0x400
  20.187 -#define PCI_CB_SUBSYSTEM_VENDOR_ID	0x40
  20.188 -#define PCI_CB_SUBSYSTEM_ID		0x42
  20.189 -#define PCI_CB_LEGACY_MODE_BASE		0x44	/* 16-bit PC Card legacy mode base address (ExCa) */
  20.190 -/* 0x48-0x7f reserved */
  20.191 -
  20.192 -/* Capability lists */
  20.193 -
  20.194 -#define PCI_CAP_LIST_ID		0	/* Capability ID */
  20.195 -#define  PCI_CAP_ID_PM		0x01	/* Power Management */
  20.196 -#define  PCI_CAP_ID_AGP		0x02	/* Accelerated Graphics Port */
  20.197 -#define  PCI_CAP_ID_VPD		0x03	/* Vital Product Data */
  20.198 -#define  PCI_CAP_ID_SLOTID	0x04	/* Slot Identification */
  20.199 -#define  PCI_CAP_ID_MSI		0x05	/* Message Signalled Interrupts */
  20.200 -#define  PCI_CAP_ID_CHSWP	0x06	/* CompactPCI HotSwap */
  20.201 -#define  PCI_CAP_ID_PCIX	0x07	/* PCI-X */
  20.202 -#define  PCI_CAP_ID_HT_IRQCONF	0x08	/* HyperTransport IRQ Configuration */
  20.203 -#define  PCI_CAP_ID_SHPC 	0x0C	/* PCI Standard Hot-Plug Controller */
  20.204 -#define  PCI_CAP_ID_EXP 	0x10	/* PCI Express */
  20.205 -#define  PCI_CAP_ID_MSIX	0x11	/* MSI-X */
  20.206 -#define PCI_CAP_LIST_NEXT	1	/* Next capability in the list */
  20.207 -#define PCI_CAP_FLAGS		2	/* Capability defined flags (16 bits) */
  20.208 -#define PCI_CAP_SIZEOF		4
  20.209 -
  20.210 -/* Power Management Registers */
  20.211 -
  20.212 -#define PCI_PM_PMC		2	/* PM Capabilities Register */
  20.213 -#define  PCI_PM_CAP_VER_MASK	0x0007	/* Version */
  20.214 -#define  PCI_PM_CAP_PME_CLOCK	0x0008	/* PME clock required */
  20.215 -#define  PCI_PM_CAP_RESERVED    0x0010  /* Reserved field */
  20.216 -#define  PCI_PM_CAP_DSI		0x0020	/* Device specific initialization */
  20.217 -#define  PCI_PM_CAP_AUX_POWER	0x01C0	/* Auxilliary power support mask */
  20.218 -#define  PCI_PM_CAP_D1		0x0200	/* D1 power state support */
  20.219 -#define  PCI_PM_CAP_D2		0x0400	/* D2 power state support */
  20.220 -#define  PCI_PM_CAP_PME		0x0800	/* PME pin supported */
  20.221 -#define  PCI_PM_CAP_PME_MASK	0xF800	/* PME Mask of all supported states */
  20.222 -#define  PCI_PM_CAP_PME_D0	0x0800	/* PME# from D0 */
  20.223 -#define  PCI_PM_CAP_PME_D1	0x1000	/* PME# from D1 */
  20.224 -#define  PCI_PM_CAP_PME_D2	0x2000	/* PME# from D2 */
  20.225 -#define  PCI_PM_CAP_PME_D3	0x4000	/* PME# from D3 (hot) */
  20.226 -#define  PCI_PM_CAP_PME_D3cold	0x8000	/* PME# from D3 (cold) */
  20.227 -#define PCI_PM_CTRL		4	/* PM control and status register */
  20.228 -#define  PCI_PM_CTRL_STATE_MASK	0x0003	/* Current power state (D0 to D3) */
  20.229 -#define  PCI_PM_CTRL_NO_SOFT_RESET	0x0004	/* No reset for D3hot->D0 */
  20.230 -#define  PCI_PM_CTRL_PME_ENABLE	0x0100	/* PME pin enable */
  20.231 -#define  PCI_PM_CTRL_DATA_SEL_MASK	0x1e00	/* Data select (??) */
  20.232 -#define  PCI_PM_CTRL_DATA_SCALE_MASK	0x6000	/* Data scale (??) */
  20.233 -#define  PCI_PM_CTRL_PME_STATUS	0x8000	/* PME pin status */
  20.234 -#define PCI_PM_PPB_EXTENSIONS	6	/* PPB support extensions (??) */
  20.235 -#define  PCI_PM_PPB_B2_B3	0x40	/* Stop clock when in D3hot (??) */
  20.236 -#define  PCI_PM_BPCC_ENABLE	0x80	/* Bus power/clock control enable (??) */
  20.237 -#define PCI_PM_DATA_REGISTER	7	/* (??) */
  20.238 -#define PCI_PM_SIZEOF		8
  20.239 -
  20.240 -/* AGP registers */
  20.241 -
  20.242 -#define PCI_AGP_VERSION		2	/* BCD version number */
  20.243 -#define PCI_AGP_RFU		3	/* Rest of capability flags */
  20.244 -#define PCI_AGP_STATUS		4	/* Status register */
  20.245 -#define  PCI_AGP_STATUS_RQ_MASK	0xff000000	/* Maximum number of requests - 1 */
  20.246 -#define  PCI_AGP_STATUS_SBA	0x0200	/* Sideband addressing supported */
  20.247 -#define  PCI_AGP_STATUS_64BIT	0x0020	/* 64-bit addressing supported */
  20.248 -#define  PCI_AGP_STATUS_FW	0x0010	/* FW transfers supported */
  20.249 -#define  PCI_AGP_STATUS_RATE4	0x0004	/* 4x transfer rate supported */
  20.250 -#define  PCI_AGP_STATUS_RATE2	0x0002	/* 2x transfer rate supported */
  20.251 -#define  PCI_AGP_STATUS_RATE1	0x0001	/* 1x transfer rate supported */
  20.252 -#define PCI_AGP_COMMAND		8	/* Control register */
  20.253 -#define  PCI_AGP_COMMAND_RQ_MASK 0xff000000  /* Master: Maximum number of requests */
  20.254 -#define  PCI_AGP_COMMAND_SBA	0x0200	/* Sideband addressing enabled */
  20.255 -#define  PCI_AGP_COMMAND_AGP	0x0100	/* Allow processing of AGP transactions */
  20.256 -#define  PCI_AGP_COMMAND_64BIT	0x0020 	/* Allow processing of 64-bit addresses */
  20.257 -#define  PCI_AGP_COMMAND_FW	0x0010 	/* Force FW transfers */
  20.258 -#define  PCI_AGP_COMMAND_RATE4	0x0004	/* Use 4x rate */
  20.259 -#define  PCI_AGP_COMMAND_RATE2	0x0002	/* Use 2x rate */
  20.260 -#define  PCI_AGP_COMMAND_RATE1	0x0001	/* Use 1x rate */
  20.261 -#define PCI_AGP_SIZEOF		12
  20.262 -
  20.263 -/* Vital Product Data */
  20.264 -
  20.265 -#define PCI_VPD_ADDR		2	/* Address to access (15 bits!) */
  20.266 -#define  PCI_VPD_ADDR_MASK	0x7fff	/* Address mask */
  20.267 -#define  PCI_VPD_ADDR_F		0x8000	/* Write 0, 1 indicates completion */
  20.268 -#define PCI_VPD_DATA		4	/* 32-bits of data returned here */
  20.269 -
  20.270 -/* Slot Identification */
  20.271 -
  20.272 -#define PCI_SID_ESR		2	/* Expansion Slot Register */
  20.273 -#define  PCI_SID_ESR_NSLOTS	0x1f	/* Number of expansion slots available */
  20.274 -#define  PCI_SID_ESR_FIC	0x20	/* First In Chassis Flag */
  20.275 -#define PCI_SID_CHASSIS_NR	3	/* Chassis Number */
  20.276 -
  20.277 -/* Message Signalled Interrupts registers */
  20.278 -
  20.279 -#define PCI_MSI_FLAGS		2	/* Various flags */
  20.280 -#define  PCI_MSI_FLAGS_64BIT	0x80	/* 64-bit addresses allowed */
  20.281 -#define  PCI_MSI_FLAGS_QSIZE	0x70	/* Message queue size configured */
  20.282 -#define  PCI_MSI_FLAGS_QMASK	0x0e	/* Maximum queue size available */
  20.283 -#define  PCI_MSI_FLAGS_ENABLE	0x01	/* MSI feature enabled */
  20.284 -#define  PCI_MSI_FLAGS_MASKBIT	0x100	/* 64-bit mask bits allowed */
  20.285 -#define PCI_MSI_RFU		3	/* Rest of capability flags */
  20.286 -#define PCI_MSI_ADDRESS_LO	4	/* Lower 32 bits */
  20.287 -#define PCI_MSI_ADDRESS_HI	8	/* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
  20.288 -#define PCI_MSI_DATA_32		8	/* 16 bits of data for 32-bit devices */
  20.289 -#define PCI_MSI_DATA_64		12	/* 16 bits of data for 64-bit devices */
  20.290 -#define PCI_MSI_MASK_BIT	16	/* Mask bits register */
  20.291 -
  20.292 -/* CompactPCI Hotswap Register */
  20.293 -
  20.294 -#define PCI_CHSWP_CSR		2	/* Control and Status Register */
  20.295 -#define  PCI_CHSWP_DHA		0x01	/* Device Hiding Arm */
  20.296 -#define  PCI_CHSWP_EIM		0x02	/* ENUM# Signal Mask */
  20.297 -#define  PCI_CHSWP_PIE		0x04	/* Pending Insert or Extract */
  20.298 -#define  PCI_CHSWP_LOO		0x08	/* LED On / Off */
  20.299 -#define  PCI_CHSWP_PI		0x30	/* Programming Interface */
  20.300 -#define  PCI_CHSWP_EXT		0x40	/* ENUM# status - extraction */
  20.301 -#define  PCI_CHSWP_INS		0x80	/* ENUM# status - insertion */
  20.302 -
  20.303 -/* PCI-X registers */
  20.304 -
  20.305 -#define PCI_X_CMD		2	/* Modes & Features */
  20.306 -#define  PCI_X_CMD_DPERR_E	0x0001	/* Data Parity Error Recovery Enable */
  20.307 -#define  PCI_X_CMD_ERO		0x0002	/* Enable Relaxed Ordering */
  20.308 -#define  PCI_X_CMD_MAX_READ	0x000c	/* Max Memory Read Byte Count */
  20.309 -#define  PCI_X_CMD_MAX_SPLIT	0x0070	/* Max Outstanding Split Transactions */
  20.310 -#define  PCI_X_CMD_VERSION(x) 	(((x) >> 12) & 3) /* Version */
  20.311 -#define PCI_X_STATUS		4	/* PCI-X capabilities */
  20.312 -#define  PCI_X_STATUS_DEVFN	0x000000ff	/* A copy of devfn */
  20.313 -#define  PCI_X_STATUS_BUS	0x0000ff00	/* A copy of bus nr */
  20.314 -#define  PCI_X_STATUS_64BIT	0x00010000	/* 64-bit device */
  20.315 -#define  PCI_X_STATUS_133MHZ	0x00020000	/* 133 MHz capable */
  20.316 -#define  PCI_X_STATUS_SPL_DISC	0x00040000	/* Split Completion Discarded */
  20.317 -#define  PCI_X_STATUS_UNX_SPL	0x00080000	/* Unexpected Split Completion */
  20.318 -#define  PCI_X_STATUS_COMPLEX	0x00100000	/* Device Complexity */
  20.319 -#define  PCI_X_STATUS_MAX_READ	0x00600000	/* Designed Max Memory Read Count */
  20.320 -#define  PCI_X_STATUS_MAX_SPLIT	0x03800000	/* Designed Max Outstanding Split Transactions */
  20.321 -#define  PCI_X_STATUS_MAX_CUM	0x1c000000	/* Designed Max Cumulative Read Size */
  20.322 -#define  PCI_X_STATUS_SPL_ERR	0x20000000	/* Rcvd Split Completion Error Msg */
  20.323 -#define  PCI_X_STATUS_266MHZ	0x40000000	/* 266 MHz capable */
  20.324 -#define  PCI_X_STATUS_533MHZ	0x80000000	/* 533 MHz capable */
  20.325 -
  20.326 -/* PCI Express capability registers */
  20.327 -
  20.328 -#define PCI_EXP_FLAGS		2	/* Capabilities register */
  20.329 -#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
  20.330 -#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
  20.331 -#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
  20.332 -#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
  20.333 -#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
  20.334 -#define  PCI_EXP_TYPE_UPSTREAM	0x5	/* Upstream Port */
  20.335 -#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
  20.336 -#define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
  20.337 -#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
  20.338 -#define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
  20.339 -#define PCI_EXP_DEVCAP		4	/* Device capabilities */
  20.340 -#define  PCI_EXP_DEVCAP_PAYLOAD	0x07	/* Max_Payload_Size */
  20.341 -#define  PCI_EXP_DEVCAP_PHANTOM	0x18	/* Phantom functions */
  20.342 -#define  PCI_EXP_DEVCAP_EXT_TAG	0x20	/* Extended tags */
  20.343 -#define  PCI_EXP_DEVCAP_L0S	0x1c0	/* L0s Acceptable Latency */
  20.344 -#define  PCI_EXP_DEVCAP_L1	0xe00	/* L1 Acceptable Latency */
  20.345 -#define  PCI_EXP_DEVCAP_ATN_BUT	0x1000	/* Attention Button Present */
  20.346 -#define  PCI_EXP_DEVCAP_ATN_IND	0x2000	/* Attention Indicator Present */
  20.347 -#define  PCI_EXP_DEVCAP_PWR_IND	0x4000	/* Power Indicator Present */
  20.348 -#define  PCI_EXP_DEVCAP_PWR_VAL	0x3fc0000 /* Slot Power Limit Value */
  20.349 -#define  PCI_EXP_DEVCAP_PWR_SCL	0xc000000 /* Slot Power Limit Scale */
  20.350 -#define PCI_EXP_DEVCTL		8	/* Device Control */
  20.351 -#define  PCI_EXP_DEVCTL_CERE	0x0001	/* Correctable Error Reporting En. */
  20.352 -#define  PCI_EXP_DEVCTL_NFERE	0x0002	/* Non-Fatal Error Reporting Enable */
  20.353 -#define  PCI_EXP_DEVCTL_FERE	0x0004	/* Fatal Error Reporting Enable */
  20.354 -#define  PCI_EXP_DEVCTL_URRE	0x0008	/* Unsupported Request Reporting En. */
  20.355 -#define  PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
  20.356 -#define  PCI_EXP_DEVCTL_PAYLOAD	0x00e0	/* Max_Payload_Size */
  20.357 -#define  PCI_EXP_DEVCTL_EXT_TAG	0x0100	/* Extended Tag Field Enable */
  20.358 -#define  PCI_EXP_DEVCTL_PHANTOM	0x0200	/* Phantom Functions Enable */
  20.359 -#define  PCI_EXP_DEVCTL_AUX_PME	0x0400	/* Auxiliary Power PM Enable */
  20.360 -#define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
  20.361 -#define  PCI_EXP_DEVCTL_READRQ	0x7000	/* Max_Read_Request_Size */
  20.362 -#define PCI_EXP_DEVSTA		10	/* Device Status */
  20.363 -#define  PCI_EXP_DEVSTA_CED	0x01	/* Correctable Error Detected */
  20.364 -#define  PCI_EXP_DEVSTA_NFED	0x02	/* Non-Fatal Error Detected */
  20.365 -#define  PCI_EXP_DEVSTA_FED	0x04	/* Fatal Error Detected */
  20.366 -#define  PCI_EXP_DEVSTA_URD	0x08	/* Unsupported Request Detected */
  20.367 -#define  PCI_EXP_DEVSTA_AUXPD	0x10	/* AUX Power Detected */
  20.368 -#define  PCI_EXP_DEVSTA_TRPND	0x20	/* Transactions Pending */
  20.369 -#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
  20.370 -#define PCI_EXP_LNKCTL		16	/* Link Control */
  20.371 -#define PCI_EXP_LNKSTA		18	/* Link Status */
  20.372 -#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
  20.373 -#define PCI_EXP_SLTCTL		24	/* Slot Control */
  20.374 -#define PCI_EXP_SLTSTA		26	/* Slot Status */
  20.375 -#define PCI_EXP_RTCTL		28	/* Root Control */
  20.376 -#define  PCI_EXP_RTCTL_SECEE	0x01	/* System Error on Correctable Error */
  20.377 -#define  PCI_EXP_RTCTL_SENFEE	0x02	/* System Error on Non-Fatal Error */
  20.378 -#define  PCI_EXP_RTCTL_SEFEE	0x04	/* System Error on Fatal Error */
  20.379 -#define  PCI_EXP_RTCTL_PMEIE	0x08	/* PME Interrupt Enable */
  20.380 -#define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
  20.381 -#define PCI_EXP_RTCAP		30	/* Root Capabilities */
  20.382 -#define PCI_EXP_RTSTA		32	/* Root Status */
  20.383 -
  20.384 -/* Extended Capabilities (PCI-X 2.0 and Express) */
  20.385 -#define PCI_EXT_CAP_ID(header)		(header & 0x0000ffff)
  20.386 -#define PCI_EXT_CAP_VER(header)		((header >> 16) & 0xf)
  20.387 -#define PCI_EXT_CAP_NEXT(header)	((header >> 20) & 0xffc)
  20.388 -
  20.389 -#define PCI_EXT_CAP_ID_ERR	1
  20.390 -#define PCI_EXT_CAP_ID_VC	2
  20.391 -#define PCI_EXT_CAP_ID_DSN	3
  20.392 -#define PCI_EXT_CAP_ID_PWR	4
  20.393 -
  20.394 -/* Advanced Error Reporting */
  20.395 -#define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
  20.396 -#define  PCI_ERR_UNC_TRAIN	0x00000001	/* Training */
  20.397 -#define  PCI_ERR_UNC_DLP	0x00000010	/* Data Link Protocol */
  20.398 -#define  PCI_ERR_UNC_POISON_TLP	0x00001000	/* Poisoned TLP */
  20.399 -#define  PCI_ERR_UNC_FCP	0x00002000	/* Flow Control Protocol */
  20.400 -#define  PCI_ERR_UNC_COMP_TIME	0x00004000	/* Completion Timeout */
  20.401 -#define  PCI_ERR_UNC_COMP_ABORT	0x00008000	/* Completer Abort */
  20.402 -#define  PCI_ERR_UNC_UNX_COMP	0x00010000	/* Unexpected Completion */
  20.403 -#define  PCI_ERR_UNC_RX_OVER	0x00020000	/* Receiver Overflow */
  20.404 -#define  PCI_ERR_UNC_MALF_TLP	0x00040000	/* Malformed TLP */
  20.405 -#define  PCI_ERR_UNC_ECRC	0x00080000	/* ECRC Error Status */
  20.406 -#define  PCI_ERR_UNC_UNSUP	0x00100000	/* Unsupported Request */
  20.407 -#define PCI_ERR_UNCOR_MASK	8	/* Uncorrectable Error Mask */
  20.408 -	/* Same bits as above */
  20.409 -#define PCI_ERR_UNCOR_SEVER	12	/* Uncorrectable Error Severity */
  20.410 -	/* Same bits as above */
  20.411 -#define PCI_ERR_COR_STATUS	16	/* Correctable Error Status */
  20.412 -#define  PCI_ERR_COR_RCVR	0x00000001	/* Receiver Error Status */
  20.413 -#define  PCI_ERR_COR_BAD_TLP	0x00000040	/* Bad TLP Status */
  20.414 -#define  PCI_ERR_COR_BAD_DLLP	0x00000080	/* Bad DLLP Status */
  20.415 -#define  PCI_ERR_COR_REP_ROLL	0x00000100	/* REPLAY_NUM Rollover */
  20.416 -#define  PCI_ERR_COR_REP_TIMER	0x00001000	/* Replay Timer Timeout */
  20.417 -#define PCI_ERR_COR_MASK	20	/* Correctable Error Mask */
  20.418 -	/* Same bits as above */
  20.419 -#define PCI_ERR_CAP		24	/* Advanced Error Capabilities */
  20.420 -#define  PCI_ERR_CAP_FEP(x)	((x) & 31)	/* First Error Pointer */
  20.421 -#define  PCI_ERR_CAP_ECRC_GENC	0x00000020	/* ECRC Generation Capable */
  20.422 -#define  PCI_ERR_CAP_ECRC_GENE	0x00000040	/* ECRC Generation Enable */
  20.423 -#define  PCI_ERR_CAP_ECRC_CHKC	0x00000080	/* ECRC Check Capable */
  20.424 -#define  PCI_ERR_CAP_ECRC_CHKE	0x00000100	/* ECRC Check Enable */
  20.425 -#define PCI_ERR_HEADER_LOG	28	/* Header Log Register (16 bytes) */
  20.426 -#define PCI_ERR_ROOT_COMMAND	44	/* Root Error Command */
  20.427 -#define PCI_ERR_ROOT_STATUS	48
  20.428 -#define PCI_ERR_ROOT_COR_SRC	52
  20.429 -#define PCI_ERR_ROOT_SRC	54
  20.430 -
  20.431 -/* Virtual Channel */
  20.432 -#define PCI_VC_PORT_REG1	4
  20.433 -#define PCI_VC_PORT_REG2	8
  20.434 -#define PCI_VC_PORT_CTRL	12
  20.435 -#define PCI_VC_PORT_STATUS	14
  20.436 -#define PCI_VC_RES_CAP		16
  20.437 -#define PCI_VC_RES_CTRL		20
  20.438 -#define PCI_VC_RES_STATUS	26
  20.439 -
  20.440 -/* Power Budgeting */
  20.441 -#define PCI_PWR_DSR		4	/* Data Select Register */
  20.442 -#define PCI_PWR_DATA		8	/* Data Register */
  20.443 -#define  PCI_PWR_DATA_BASE(x)	((x) & 0xff)	    /* Base Power */
  20.444 -#define  PCI_PWR_DATA_SCALE(x)	(((x) >> 8) & 3)    /* Data Scale */
  20.445 -#define  PCI_PWR_DATA_PM_SUB(x)	(((x) >> 10) & 7)   /* PM Sub State */
  20.446 -#define  PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
  20.447 -#define  PCI_PWR_DATA_TYPE(x)	(((x) >> 15) & 7)   /* Type */
  20.448 -#define  PCI_PWR_DATA_RAIL(x)	(((x) >> 18) & 7)   /* Power Rail */
  20.449 -#define PCI_PWR_CAP		12	/* Capability */
  20.450 -#define  PCI_PWR_CAP_BUDGET(x)	((x) & 1)	/* Included in system budget */
  20.451 -
  20.452 -#endif /* LINUX_PCI_REGS_H */
    21.1 --- a/xen/arch/x86/hvm/vmx/vtd/qinval.c	Thu Feb 21 14:50:27 2008 +0000
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,456 +0,0 @@
    21.4 -/*
    21.5 - * Copyright (c) 2006, Intel Corporation.
    21.6 - *
    21.7 - * This program is free software; you can redistribute it and/or modify it
    21.8 - * under the terms and conditions of the GNU General Public License,
    21.9 - * version 2, as published by the Free Software Foundation.
   21.10 - *
   21.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   21.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   21.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   21.14 - * more details.
   21.15 - *
   21.16 - * You should have received a copy of the GNU General Public License along with
   21.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   21.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   21.19 - *
   21.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   21.21 - * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
   21.22 - */
   21.23 -
   21.24 -
   21.25 -#include <xen/init.h>
   21.26 -#include <xen/irq.h>
   21.27 -#include <xen/spinlock.h>
   21.28 -#include <xen/sched.h>
   21.29 -#include <xen/xmalloc.h>
   21.30 -#include <xen/domain_page.h>
   21.31 -#include <asm/delay.h>
   21.32 -#include <asm/string.h>
   21.33 -#include <asm/iommu.h>
   21.34 -#include <asm/hvm/vmx/intel-iommu.h>
   21.35 -#include "dmar.h"
   21.36 -#include "vtd.h"
   21.37 -#include "pci-direct.h"
   21.38 -#include "pci_regs.h"
   21.39 -#include "msi.h"
   21.40 -#include "extern.h"
   21.41 -
   21.42 -static void print_qi_regs(struct iommu *iommu)
   21.43 -{
   21.44 -    u64 val;
   21.45 -
   21.46 -    val = dmar_readq(iommu->reg, DMAR_IQA_REG);
   21.47 -    printk("DMAR_IAQ_REG = %"PRIx64"\n", val);
   21.48 -
   21.49 -    val = dmar_readq(iommu->reg, DMAR_IQH_REG);
   21.50 -    printk("DMAR_IAH_REG = %"PRIx64"\n", val);
   21.51 -
   21.52 -    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
   21.53 -    printk("DMAR_IAT_REG = %"PRIx64"\n", val);
   21.54 -}
   21.55 -
   21.56 -static int qinval_next_index(struct iommu *iommu)
   21.57 -{
   21.58 -    u64 val;
   21.59 -    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
   21.60 -    return (val >> 4);
   21.61 -}
   21.62 -
   21.63 -static int qinval_update_qtail(struct iommu *iommu, int index)
   21.64 -{
   21.65 -    u64 val;
   21.66 -
   21.67 -    /* Need an ASSERT to insure that we have got register lock */
   21.68 -    val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
   21.69 -    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
   21.70 -    return 0;
   21.71 -}
   21.72 -
   21.73 -static int gen_cc_inv_dsc(struct iommu *iommu, int index,
   21.74 -    u16 did, u16 source_id, u8 function_mask, u8 granu)
   21.75 -{
   21.76 -    u64 *ptr64;
   21.77 -    unsigned long flags;
   21.78 -    struct qinval_entry * qinval_entry = NULL;
   21.79 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
   21.80 -
   21.81 -    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
   21.82 -    qinval_entry = &qi_ctrl->qinval[index];
   21.83 -    qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
   21.84 -    qinval_entry->q.cc_inv_dsc.lo.granu = granu;
   21.85 -    qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
   21.86 -    qinval_entry->q.cc_inv_dsc.lo.did = did;
   21.87 -    qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
   21.88 -    qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
   21.89 -    qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
   21.90 -    qinval_entry->q.cc_inv_dsc.hi.res = 0;
   21.91 -    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
   21.92 -
   21.93 -    ptr64 = (u64 *)qinval_entry;
   21.94 -    return 0;
   21.95 -}
   21.96 -
   21.97 -int queue_invalidate_context(struct iommu *iommu,
   21.98 -    u16 did, u16 source_id, u8 function_mask, u8 granu)
   21.99 -{
  21.100 -    int ret = -1;
  21.101 -    unsigned long flags;
  21.102 -    int index = -1;
  21.103 -
  21.104 -    spin_lock_irqsave(&iommu->register_lock, flags);
  21.105 -    index = qinval_next_index(iommu);
  21.106 -    if (index == -1)
  21.107 -        return -EBUSY;
  21.108 -    ret = gen_cc_inv_dsc(iommu, index, did, source_id,
  21.109 -                         function_mask, granu);
  21.110 -    ret |= qinval_update_qtail(iommu, index);
  21.111 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  21.112 -    return ret;
  21.113 -}
  21.114 -
  21.115 -static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
  21.116 -    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
  21.117 -{
  21.118 -    unsigned long flags;
  21.119 -    struct qinval_entry * qinval_entry = NULL;
  21.120 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.121 -
  21.122 -    if ( index == -1 )
  21.123 -        return -1;
  21.124 -    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
  21.125 -
  21.126 -    qinval_entry = &qi_ctrl->qinval[index];
  21.127 -    qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
  21.128 -    qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
  21.129 -    qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
  21.130 -    qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
  21.131 -    qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
  21.132 -    qinval_entry->q.iotlb_inv_dsc.lo.did = did;
  21.133 -    qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;
  21.134 -
  21.135 -    qinval_entry->q.iotlb_inv_dsc.hi.am = am;
  21.136 -    qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
  21.137 -    qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
  21.138 -    qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;
  21.139 -
  21.140 -    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
  21.141 -    return 0;
  21.142 -}
  21.143 -
  21.144 -int queue_invalidate_iotlb(struct iommu *iommu,
  21.145 -    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
  21.146 -{
  21.147 -    int ret = -1;
  21.148 -    unsigned long flags;
  21.149 -    int index = -1;
  21.150 -
  21.151 -    spin_lock_irqsave(&iommu->register_lock, flags);
  21.152 -
  21.153 -    index = qinval_next_index(iommu);
  21.154 -    ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
  21.155 -                            am, ih, addr);
  21.156 -    ret |= qinval_update_qtail(iommu, index);
  21.157 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  21.158 -    return ret;
  21.159 -}
  21.160 -
  21.161 -static int gen_wait_dsc(struct iommu *iommu, int index,
  21.162 -    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
  21.163 -{
  21.164 -    u64 *ptr64;
  21.165 -    unsigned long flags;
  21.166 -    struct qinval_entry * qinval_entry = NULL;
  21.167 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.168 -
  21.169 -    if ( index == -1 )
  21.170 -        return -1;
  21.171 -    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
  21.172 -    qinval_entry = &qi_ctrl->qinval[index];
  21.173 -    qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
  21.174 -    qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
  21.175 -    qinval_entry->q.inv_wait_dsc.lo.sw = sw;
  21.176 -    qinval_entry->q.inv_wait_dsc.lo.fn = fn;
  21.177 -    qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
  21.178 -    qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
  21.179 -    qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
  21.180 -    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
  21.181 -    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
  21.182 -    ptr64 = (u64 *)qinval_entry;
  21.183 -    return 0;
  21.184 -}
  21.185 -
  21.186 -static int queue_invalidate_wait(struct iommu *iommu,
  21.187 -    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
  21.188 -{
  21.189 -    unsigned long flags;
  21.190 -    unsigned long start_time;
  21.191 -    int index = -1;
  21.192 -    int ret = -1;
  21.193 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.194 -
  21.195 -    spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
  21.196 -    spin_lock_irqsave(&iommu->register_lock, flags);
  21.197 -    index = qinval_next_index(iommu);
  21.198 -    if (*saddr == 1)
  21.199 -        *saddr = 0;
  21.200 -    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
  21.201 -    ret |= qinval_update_qtail(iommu, index);
  21.202 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  21.203 -
  21.204 -    /* Now we don't support interrupt method */
  21.205 -    if ( sw )
  21.206 -    {
  21.207 -        /* In case all wait descriptor writes to same addr with same data */
  21.208 -        start_time = jiffies;
  21.209 -        while ( *saddr != 1 ) {
  21.210 -            if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT)) {
  21.211 -                print_qi_regs(iommu);
  21.212 -                panic("queue invalidate wait descriptor was not executed\n");
  21.213 -            }
  21.214 -            cpu_relax();
  21.215 -        }
  21.216 -    }
  21.217 -    spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
  21.218 -    return ret;
  21.219 -}
  21.220 -
  21.221 -int invalidate_sync(struct iommu *iommu)
  21.222 -{
  21.223 -    int ret = -1;
  21.224 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.225 -
  21.226 -    if (qi_ctrl->qinval)
  21.227 -    {
  21.228 -        ret = queue_invalidate_wait(iommu,
  21.229 -            0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
  21.230 -        return ret;
  21.231 -    }
  21.232 -    return 0;
  21.233 -}
  21.234 -
  21.235 -static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
  21.236 -    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
  21.237 -{
  21.238 -    unsigned long flags;
  21.239 -    struct qinval_entry * qinval_entry = NULL;
  21.240 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.241 -
  21.242 -    if ( index == -1 )
  21.243 -        return -1;
  21.244 -    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
  21.245 -
  21.246 -    qinval_entry = &qi_ctrl->qinval[index];
  21.247 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
  21.248 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
  21.249 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
  21.250 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
  21.251 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
  21.252 -    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;
  21.253 -
  21.254 -    qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
  21.255 -    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr;
  21.256 -
  21.257 -    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
  21.258 -    return 0;
  21.259 -}
  21.260 -
  21.261 -int queue_invalidate_device_iotlb(struct iommu *iommu,
  21.262 -    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
  21.263 -{
  21.264 -    int ret = -1;
  21.265 -    unsigned long flags;
  21.266 -    int index = -1;
  21.267 -
  21.268 -    spin_lock_irqsave(&iommu->register_lock, flags);
  21.269 -    index = qinval_next_index(iommu);
  21.270 -    ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
  21.271 -                                sid, size, addr);
  21.272 -    ret |= qinval_update_qtail(iommu, index);
  21.273 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  21.274 -    return ret;
  21.275 -}
  21.276 -
  21.277 -static int gen_iec_inv_dsc(struct iommu *iommu, int index,
  21.278 -    u8 granu, u8 im, u16 iidx)
  21.279 -{
  21.280 -    unsigned long flags;
  21.281 -    struct qinval_entry * qinval_entry = NULL;
  21.282 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.283 -
  21.284 -    if ( index == -1 )
  21.285 -        return -1;
  21.286 -    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
  21.287 -
  21.288 -    qinval_entry = &qi_ctrl->qinval[index];
  21.289 -    qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
  21.290 -    qinval_entry->q.iec_inv_dsc.lo.granu = granu;
  21.291 -    qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
  21.292 -    qinval_entry->q.iec_inv_dsc.lo.im = im;
  21.293 -    qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
  21.294 -    qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
  21.295 -    qinval_entry->q.iec_inv_dsc.hi.res = 0;
  21.296 -
  21.297 -    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
  21.298 -    return 0;
  21.299 -}
  21.300 -
  21.301 -int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
  21.302 -{
  21.303 -    int ret;
  21.304 -    unsigned long flags;
  21.305 -    int index = -1;
  21.306 -
  21.307 -    spin_lock_irqsave(&iommu->register_lock, flags);
  21.308 -    index = qinval_next_index(iommu);
  21.309 -    ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
  21.310 -    ret |= qinval_update_qtail(iommu, index);
  21.311 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
  21.312 -    return ret;
  21.313 -}
  21.314 -
  21.315 -u64 iec_cap;
  21.316 -int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
  21.317 -{
  21.318 -    int ret;
  21.319 -    ret = queue_invalidate_iec(iommu, granu, im, iidx);
  21.320 -    ret |= invalidate_sync(iommu);
  21.321 -
  21.322 -    /*
  21.323 -     * reading vt-d architecture register will ensure
  21.324 -     * draining happens in implementation independent way.
  21.325 -     */
  21.326 -    iec_cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
  21.327 -    return ret;
  21.328 -}
  21.329 -
  21.330 -int iommu_flush_iec_global(struct iommu *iommu)
  21.331 -{
  21.332 -    return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
  21.333 -}
  21.334 -
  21.335 -int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
  21.336 -{
  21.337 -   return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
  21.338 -}
  21.339 -
  21.340 -static int flush_context_qi(
  21.341 -    void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
  21.342 -    int non_present_entry_flush)
  21.343 -{
  21.344 -    int ret = 0;
  21.345 -    struct iommu *iommu = (struct iommu *)_iommu;
  21.346 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.347 -
  21.348 -    /*
  21.349 -     * In the non-present entry flush case, if hardware doesn't cache
  21.350 -     * non-present entry we do nothing and if hardware cache non-present
  21.351 -     * entry, we flush entries of domain 0 (the domain id is used to cache
  21.352 -     * any non-present entries)
  21.353 -     */
  21.354 -    if ( non_present_entry_flush )
  21.355 -    {
  21.356 -        if ( !cap_caching_mode(iommu->cap) )
  21.357 -            return 1;
  21.358 -        else
  21.359 -            did = 0;
  21.360 -    }
  21.361 -
  21.362 -    if (qi_ctrl->qinval)
  21.363 -    {
  21.364 -        ret = queue_invalidate_context(iommu, did, sid, fm,
  21.365 -                                       type >> DMA_CCMD_INVL_GRANU_OFFSET);
  21.366 -        ret |= invalidate_sync(iommu);
  21.367 -    }
  21.368 -    return ret;
  21.369 -}
  21.370 -
  21.371 -static int flush_iotlb_qi(
  21.372 -    void *_iommu, u16 did,
  21.373 -    u64 addr, unsigned int size_order, u64 type,
  21.374 -    int non_present_entry_flush)
  21.375 -{
  21.376 -    u8 dr = 0, dw = 0;
  21.377 -    int ret = 0;
  21.378 -    struct iommu *iommu = (struct iommu *)_iommu;
  21.379 -    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
  21.380 -
  21.381 -    /*
  21.382 -     * In the non-present entry flush case, if hardware doesn't cache
  21.383 -     * non-present entry we do nothing and if hardware cache non-present
  21.384 -     * entry, we flush entries of domain 0 (the domain id is used to cache
  21.385 -     * any non-present entries)
  21.386 -     */
  21.387 -    if ( non_present_entry_flush )
  21.388 -    {
  21.389 -        if ( !cap_caching_mode(iommu->cap) )
  21.390 -            return 1;
  21.391 -        else
  21.392 -            did = 0;
  21.393 -    }
  21.394 -
  21.395 -    if (qi_ctrl->qinval) {
  21.396 -        /* use queued invalidation */
  21.397 -        if (cap_write_drain(iommu->cap))
  21.398 -            dw = 1;
  21.399 -        if (cap_read_drain(iommu->cap))
  21.400 -            dr = 1;
  21.401 -        /* Need to conside the ih bit later */
  21.402 -        ret = queue_invalidate_iotlb(iommu,
  21.403 -                  (type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
  21.404 -                  dw, did, (u8)size_order, 0, addr);
  21.405 -        ret |= invalidate_sync(iommu);
  21.406 -    }
  21.407 -    return ret;
  21.408 -}
  21.409 -
  21.410 -int qinval_setup(struct iommu *iommu)
  21.411 -{
  21.412 -    unsigned long start_time;
  21.413 -    u64 paddr;
  21.414 -    u32 status = 0;
  21.415 -    struct qi_ctrl *qi_ctrl;
  21.416 -    struct iommu_flush *flush;
  21.417 -
  21.418 -    qi_ctrl = iommu_qi_ctrl(iommu);
  21.419 -    flush = iommu_get_flush(iommu);
  21.420 -
  21.421 -    if ( !ecap_queued_inval(iommu->ecap) )
  21.422 -        return -ENODEV;
  21.423 -
  21.424 -    if (qi_ctrl->qinval == NULL) {
  21.425 -        qi_ctrl->qinval = alloc_xenheap_page();
  21.426 -        if (qi_ctrl->qinval == NULL)
  21.427 -            panic("Cannot allocate memory for qi_ctrl->qinval\n");
  21.428 -        memset((u8*)qi_ctrl->qinval, 0, PAGE_SIZE_4K);
  21.429 -        flush->context = flush_context_qi;
  21.430 -        flush->iotlb = flush_iotlb_qi;
  21.431 -    }
  21.432 -    paddr = virt_to_maddr(qi_ctrl->qinval);
  21.433 -
  21.434 -    /* Setup Invalidation Queue Address(IQA) register with the
  21.435 -     * address of the page we just allocated.  QS field at
  21.436 -     * bits[2:0] to indicate size of queue is one 4KB page.
  21.437 -     * That's 256 entries.  Queued Head (IQH) and Queue Tail (IQT)
  21.438 -     * registers are automatically reset to 0 with write
  21.439 -     * to IQA register.
  21.440 -     */
  21.441 -    dmar_writeq(iommu->reg, DMAR_IQA_REG, paddr);
  21.442 -
  21.443 -    /* enable queued invalidation hardware */
  21.444 -    iommu->gcmd |= DMA_GCMD_QIE;
  21.445 -    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
  21.446 -
  21.447 -    /* Make sure hardware complete it */
  21.448 -    start_time = jiffies;
  21.449 -    while (1) {
  21.450 -        status = dmar_readl(iommu->reg, DMAR_GSTS_REG);
  21.451 -        if (status & DMA_GSTS_QIES)
  21.452 -            break;
  21.453 -        if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))
  21.454 -            panic("Cannot set QIE field for queue invalidation\n");
  21.455 -        cpu_relax();
  21.456 -    }
  21.457 -    status = 0;
  21.458 -    return status;
  21.459 -}
    22.1 --- a/xen/arch/x86/hvm/vmx/vtd/utils.c	Thu Feb 21 14:50:27 2008 +0000
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,338 +0,0 @@
    22.4 -/*
    22.5 - * Copyright (c) 2006, Intel Corporation.
    22.6 - *
    22.7 - * This program is free software; you can redistribute it and/or modify it
    22.8 - * under the terms and conditions of the GNU General Public License,
    22.9 - * version 2, as published by the Free Software Foundation.
   22.10 - *
   22.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   22.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   22.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   22.14 - * more details.
   22.15 - *
   22.16 - * You should have received a copy of the GNU General Public License along with
   22.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   22.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   22.19 - *
   22.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   22.21 - */
   22.22 -
   22.23 -#include <xen/init.h>
   22.24 -#include <xen/bitmap.h>
   22.25 -#include <xen/irq.h>
   22.26 -#include <xen/spinlock.h>
   22.27 -#include <xen/sched.h>
   22.28 -#include <xen/delay.h>
   22.29 -#include <asm/iommu.h>
   22.30 -#include <asm/hvm/vmx/intel-iommu.h>
   22.31 -#include "dmar.h"
   22.32 -#include "pci-direct.h"
   22.33 -#include "pci_regs.h"
   22.34 -#include "msi.h"
   22.35 -
   22.36 -#include <xen/mm.h>
   22.37 -#include <xen/xmalloc.h>
   22.38 -#include <xen/inttypes.h>
   22.39 -
   22.40 -#define INTEL   0x8086
   22.41 -#define SEABURG 0x4000
   22.42 -#define C_STEP  2
   22.43 -
   22.44 -int vtd_hw_check(void)
   22.45 -{
   22.46 -    u16 vendor, device;
   22.47 -    u8 revision, stepping;
   22.48 -
   22.49 -    vendor   = read_pci_config_16(0, 0, 0, PCI_VENDOR_ID);
   22.50 -    device   = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
   22.51 -    revision = read_pci_config_byte(0, 0, 0, PCI_REVISION_ID);
   22.52 -    stepping = revision & 0xf;
   22.53 -
   22.54 -    if ( (vendor == INTEL) && (device == SEABURG) )
   22.55 -    {
   22.56 -        if ( stepping < C_STEP )
   22.57 -        {
   22.58 -            dprintk(XENLOG_WARNING VTDPREFIX,
   22.59 -                    "*** VT-d disabled - pre C0-step Seaburg found\n");
   22.60 -            dprintk(XENLOG_WARNING VTDPREFIX,
   22.61 -                    "***  vendor = %x device = %x revision = %x\n",
   22.62 -                    vendor, device, revision);
   22.63 -            vtd_enabled = 0;
   22.64 -            return -ENODEV;
   22.65 -        }
   22.66 -    }
   22.67 -    return 0;
   22.68 -}
   22.69 -
   22.70 -/* Disable vt-d protected memory registers. */
   22.71 -void disable_pmr(struct iommu *iommu)
   22.72 -{
   22.73 -    unsigned long start_time;
   22.74 -    unsigned int val;
   22.75 -
   22.76 -    val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
   22.77 -    if ( !(val & DMA_PMEN_PRS) )
   22.78 -        return;
   22.79 -
   22.80 -    dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
   22.81 -    start_time = jiffies;
   22.82 -
   22.83 -    for ( ; ; )
   22.84 -    {
   22.85 -        val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
   22.86 -        if ( (val & DMA_PMEN_PRS) == 0 )
   22.87 -            break;
   22.88 -
   22.89 -        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
   22.90 -            panic("Disable PMRs timeout\n");
   22.91 -
   22.92 -        cpu_relax();
   22.93 -    }
   22.94 -
   22.95 -    dprintk(XENLOG_INFO VTDPREFIX,
   22.96 -            "Disabled protected memory registers\n");
   22.97 -}
   22.98 -
   22.99 -static u8 find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap)
  22.100 -{
  22.101 -    u8 id;
  22.102 -    int max_cap = 48;
  22.103 -    u8 pos = PCI_CAPABILITY_LIST;
  22.104 -    u16 status;
  22.105 -
  22.106 -    status = read_pci_config_16(bus, dev, func, PCI_STATUS);
  22.107 -    if ( (status & PCI_STATUS_CAP_LIST) == 0 )
  22.108 -        return 0;
  22.109 -
  22.110 -    while ( max_cap-- )
  22.111 -    {
  22.112 -        pos = read_pci_config_byte(bus, dev, func, pos);
  22.113 -        if ( pos < 0x40 )
  22.114 -            break;
  22.115 -
  22.116 -        pos &= ~3;
  22.117 -        id = read_pci_config_byte(bus, dev, func, pos + PCI_CAP_LIST_ID);
  22.118 -
  22.119 -        if ( id == 0xff )
  22.120 -            break;
  22.121 -        else if ( id == cap )
  22.122 -            return pos;
  22.123 -
  22.124 -        pos += PCI_CAP_LIST_NEXT;
  22.125 -    }
  22.126 -
  22.127 -    return 0;
  22.128 -}
  22.129 -
  22.130 -#define PCI_D3hot   (3)
  22.131 -#define PCI_CONFIG_DWORD_SIZE   (64)
  22.132 -#define PCI_EXP_DEVCAP_FLR      (1 << 28)
  22.133 -#define PCI_EXP_DEVCTL_FLR      (1 << 15)
  22.134 -
  22.135 -void pdev_flr(u8 bus, u8 devfn)
  22.136 -{
  22.137 -    u8 pos;
  22.138 -    u32 dev_cap, dev_status, pm_ctl;
  22.139 -    int flr = 0;
  22.140 -    u8 dev = PCI_SLOT(devfn);
  22.141 -    u8 func = PCI_FUNC(devfn);
  22.142 -
  22.143 -    pos = find_cap_offset(bus, dev, func, PCI_CAP_ID_EXP);
  22.144 -    if ( pos != 0 )
  22.145 -    {
  22.146 -        dev_cap = read_pci_config(bus, dev, func, pos + PCI_EXP_DEVCAP);
  22.147 -        if ( dev_cap & PCI_EXP_DEVCAP_FLR )
  22.148 -        {
  22.149 -            write_pci_config(bus, dev, func,
  22.150 -                             pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR);
  22.151 -            do {
  22.152 -                dev_status = read_pci_config(bus, dev, func,
  22.153 -                                             pos + PCI_EXP_DEVSTA);
  22.154 -            } while ( dev_status & PCI_EXP_DEVSTA_TRPND );
  22.155 -
  22.156 -            flr = 1;
  22.157 -        }
  22.158 -    }
  22.159 -
  22.160 -    /* If this device doesn't support function level reset,
  22.161 -     * program device from D0 t0 D3hot, and then return to D0
  22.162 -     * to implement function level reset
  22.163 -     */
  22.164 -    if ( flr == 0 )
  22.165 -    {
  22.166 -        pos = find_cap_offset(bus, dev, func, PCI_CAP_ID_PM);
  22.167 -        if ( pos != 0 )
  22.168 -        {
  22.169 -            int i;
  22.170 -            u32 config[PCI_CONFIG_DWORD_SIZE];
  22.171 -            for ( i = 0; i < PCI_CONFIG_DWORD_SIZE; i++ )
  22.172 -                config[i] = read_pci_config(bus, dev, func, i*4);
  22.173 -
  22.174 -            /* Enter D3hot without soft reset */
  22.175 -            pm_ctl = read_pci_config(bus, dev, func, pos + PCI_PM_CTRL);
  22.176 -            pm_ctl |= PCI_PM_CTRL_NO_SOFT_RESET;
  22.177 -            pm_ctl &= ~PCI_PM_CTRL_STATE_MASK;
  22.178 -            pm_ctl |= PCI_D3hot;
  22.179 -            write_pci_config(bus, dev, func, pos + PCI_PM_CTRL, pm_ctl);
  22.180 -            mdelay(10);
  22.181 -
  22.182 -            /* From D3hot to D0 */
  22.183 -            write_pci_config(bus, dev, func, pos + PCI_PM_CTRL, 0);
  22.184 -            mdelay(10);
  22.185 -
  22.186 -            /* Write saved configurations to device */
  22.187 -            for ( i = 0; i < PCI_CONFIG_DWORD_SIZE; i++ )
  22.188 -                write_pci_config(bus, dev, func, i*4, config[i]);
  22.189 -
  22.190 -            flr = 1;
  22.191 -        }
  22.192 -    }
  22.193 -}
  22.194 -
  22.195 -void print_iommu_regs(struct acpi_drhd_unit *drhd)
  22.196 -{
  22.197 -    struct iommu *iommu = drhd->iommu;
  22.198 -
  22.199 -    printk("---- print_iommu_regs ----\n");
  22.200 -    printk("print_iommu_regs: drhd->address = %lx\n", drhd->address);
  22.201 -    printk("print_iommu_regs: DMAR_VER_REG = %x\n",
  22.202 -           dmar_readl(iommu->reg,DMAR_VER_REG));
  22.203 -    printk("print_iommu_regs: DMAR_CAP_REG = %"PRIx64"\n",
  22.204 -           dmar_readq(iommu->reg,DMAR_CAP_REG));
  22.205 -    printk("print_iommu_regs: n_fault_reg = %"PRIx64"\n",
  22.206 -           cap_num_fault_regs(dmar_readq(iommu->reg, DMAR_CAP_REG)));
  22.207 -    printk("print_iommu_regs: fault_recording_offset_l = %"PRIx64"\n",
  22.208 -           cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)));
  22.209 -    printk("print_iommu_regs: fault_recording_offset_h = %"PRIx64"\n",
  22.210 -           cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)) + 8);
  22.211 -    printk("print_iommu_regs: fault_recording_reg_l = %"PRIx64"\n",
  22.212 -           dmar_readq(iommu->reg,
  22.213 -               cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG))));
  22.214 -    printk("print_iommu_regs: fault_recording_reg_h = %"PRIx64"\n",
  22.215 -           dmar_readq(iommu->reg,
  22.216 -               cap_fault_reg_offset(dmar_readq(iommu->reg, DMAR_CAP_REG)) + 8));
  22.217 -    printk("print_iommu_regs: DMAR_ECAP_REG = %"PRIx64"\n",
  22.218 -           dmar_readq(iommu->reg,DMAR_ECAP_REG));
  22.219 -    printk("print_iommu_regs: DMAR_GCMD_REG = %x\n",
  22.220 -           dmar_readl(iommu->reg,DMAR_GCMD_REG));
  22.221 -    printk("print_iommu_regs: DMAR_GSTS_REG = %x\n",
  22.222 -           dmar_readl(iommu->reg,DMAR_GSTS_REG));
  22.223 -    printk("print_iommu_regs: DMAR_RTADDR_REG = %"PRIx64"\n",
  22.224 -           dmar_readq(iommu->reg,DMAR_RTADDR_REG));
  22.225 -    printk("print_iommu_regs: DMAR_CCMD_REG = %"PRIx64"\n",
  22.226 -           dmar_readq(iommu->reg,DMAR_CCMD_REG));
  22.227 -    printk("print_iommu_regs: DMAR_FSTS_REG = %x\n",
  22.228 -           dmar_readl(iommu->reg,DMAR_FSTS_REG));
  22.229 -    printk("print_iommu_regs: DMAR_FECTL_REG = %x\n",
  22.230 -           dmar_readl(iommu->reg,DMAR_FECTL_REG));
  22.231 -    printk("print_iommu_regs: DMAR_FEDATA_REG = %x\n",
  22.232 -           dmar_readl(iommu->reg,DMAR_FEDATA_REG));
  22.233 -    printk("print_iommu_regs: DMAR_FEADDR_REG = %x\n",
  22.234 -           dmar_readl(iommu->reg,DMAR_FEADDR_REG));
  22.235 -    printk("print_iommu_regs: DMAR_FEUADDR_REG = %x\n",
  22.236 -           dmar_readl(iommu->reg,DMAR_FEUADDR_REG));
  22.237 -}
  22.238 -
  22.239 -u32 get_level_index(unsigned long gmfn, int level)
  22.240 -{
  22.241 -    while ( --level )
  22.242 -        gmfn = gmfn >> LEVEL_STRIDE;
  22.243 -
  22.244 -    return gmfn & LEVEL_MASK;
  22.245 -}
  22.246 -
  22.247 -void print_vtd_entries(
  22.248 -    struct domain *d, 
  22.249 -    struct iommu *iommu,
  22.250 -    int bus, int devfn,
  22.251 -    unsigned long gmfn)
  22.252 -{
  22.253 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
  22.254 -    struct acpi_drhd_unit *drhd;
  22.255 -    struct context_entry *ctxt_entry;
  22.256 -    struct root_entry *root_entry;
  22.257 -    struct dma_pte pte;
  22.258 -    u64 *l;
  22.259 -    u32 l_index;
  22.260 -    u32 i = 0;
  22.261 -    int level = agaw_to_level(hd->agaw);
  22.262 -
  22.263 -    printk("print_vtd_entries: domain_id = %x bdf = %x:%x:%x gmfn = %lx\n",
  22.264 -           d->domain_id, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn);
  22.265 -
  22.266 -    if ( hd->pgd == NULL )
  22.267 -    {
  22.268 -        printk("    hg->pgd == NULL\n");
  22.269 -        return;
  22.270 -    }
  22.271 -    printk("    d->pgd = %p virt_to_maddr(hd->pgd) = %lx\n",
  22.272 -           hd->pgd, virt_to_maddr(hd->pgd));
  22.273 -
  22.274 -    for_each_drhd_unit ( drhd )
  22.275 -    {
  22.276 -        printk("---- print_vtd_entries %d ----\n", i++);
  22.277 -
  22.278 -        root_entry = iommu->root_entry;
  22.279 -        if ( root_entry == NULL )
  22.280 -        {
  22.281 -            printk("    root_entry == NULL\n");
  22.282 -            continue;
  22.283 -        }
  22.284 -
  22.285 -        printk("    root_entry = %p\n", root_entry);
  22.286 -        printk("    root_entry[%x] = %"PRIx64"\n", bus, root_entry[bus].val);
  22.287 -        if ( !root_present(root_entry[bus]) )
  22.288 -        {
  22.289 -            printk("    root_entry[%x] not present\n", bus);
  22.290 -            continue;
  22.291 -        }
  22.292 -
  22.293 -        ctxt_entry =
  22.294 -            maddr_to_virt((root_entry[bus].val >> PAGE_SHIFT) << PAGE_SHIFT);
  22.295 -        if ( ctxt_entry == NULL )
  22.296 -        {
  22.297 -            printk("    ctxt_entry == NULL\n");
  22.298 -            continue;
  22.299 -        }
  22.300 -
  22.301 -        printk("    context = %p\n", ctxt_entry);
  22.302 -        printk("    context[%x] = %"PRIx64" %"PRIx64"\n",
  22.303 -               devfn, ctxt_entry[devfn].hi, ctxt_entry[devfn].lo);
  22.304 -        if ( !context_present(ctxt_entry[devfn]) )
  22.305 -        {
  22.306 -            printk("    ctxt_entry[%x] not present\n", devfn);
  22.307 -            continue;
  22.308 -        }
  22.309 -
  22.310 -        if ( level != VTD_PAGE_TABLE_LEVEL_3 &&
  22.311 -             level != VTD_PAGE_TABLE_LEVEL_4)
  22.312 -        {
  22.313 -            printk("Unsupported VTD page table level (%d)!\n", level);
  22.314 -            continue;
  22.315 -        }
  22.316 -
  22.317 -        l = maddr_to_virt(ctxt_entry[devfn].lo);
  22.318 -        do
  22.319 -        {
  22.320 -            l = (u64*)(((unsigned long)l >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K);
  22.321 -            printk("    l%d = %p\n", level, l);
  22.322 -            if ( l == NULL )
  22.323 -            {
  22.324 -                printk("    l%d == NULL\n", level);
  22.325 -                break;
  22.326 -            }
  22.327 -            l_index = get_level_index(gmfn, level);
  22.328 -            printk("    l%d_index = %x\n", level, l_index);
  22.329 -            printk("    l%d[%x] = %"PRIx64"\n", level, l_index, l[l_index]);
  22.330 -
  22.331 -            pte.val = l[l_index];
  22.332 -            if ( !dma_pte_present(pte) )
  22.333 -            {
  22.334 -                printk("    l%d[%x] not present\n", level, l_index);
  22.335 -                break;
  22.336 -            }
  22.337 -
  22.338 -            l = maddr_to_virt(l[l_index]);
  22.339 -        } while ( --level );
  22.340 -    }
  22.341 -}
    23.1 --- a/xen/arch/x86/hvm/vmx/vtd/vtd.h	Thu Feb 21 14:50:27 2008 +0000
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,54 +0,0 @@
    23.4 -/*
    23.5 - * Copyright (c) 2006, Intel Corporation.
    23.6 - *
    23.7 - * This program is free software; you can redistribute it and/or modify it
    23.8 - * under the terms and conditions of the GNU General Public License,
    23.9 - * version 2, as published by the Free Software Foundation.
   23.10 - *
   23.11 - * This program is distributed in the hope it will be useful, but WITHOUT
   23.12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   23.13 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   23.14 - * more details.
   23.15 - *
   23.16 - * You should have received a copy of the GNU General Public License along with
   23.17 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   23.18 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   23.19 - *
   23.20 - * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   23.21 - * Copyright (C) Weidong Han <weidong.han@intel.com>
   23.22 - */
   23.23 -
   23.24 -#ifndef _VTD_H_
   23.25 -#define _VTD_H_
   23.26 -
   23.27 -#include <xen/list.h>
   23.28 -#include <asm/iommu.h>
   23.29 -
   23.30 -#define VTDPREFIX "[VT-D]" 
   23.31 -
   23.32 -#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
   23.33 -#define time_after(a,b)         \
   23.34 -        (typecheck(unsigned long, a) && \
   23.35 -         typecheck(unsigned long, b) && \
   23.36 -         ((long)(b) - (long)(a) < 0))
   23.37 -
   23.38 -struct IO_APIC_route_remap_entry {
   23.39 -    union {
   23.40 -        u64 val;
   23.41 -        struct {
   23.42 -            u64 vector:8,
   23.43 -            delivery_mode:3,
   23.44 -            index_15:1,
   23.45 -            delivery_status:1,
   23.46 -            polarity:1,
   23.47 -            irr:1,
   23.48 -            trigger:1,
   23.49 -            mask:1,
   23.50 -            reserved:31,
   23.51 -            format:1,
   23.52 -            index_0_14:15;
   23.53 -        };
   23.54 -    };
   23.55 -};
   23.56 -
   23.57 -#endif // _VTD_H_
    24.1 --- a/xen/drivers/Makefile	Thu Feb 21 14:50:27 2008 +0000
    24.2 +++ b/xen/drivers/Makefile	Thu Feb 21 15:06:37 2008 +0000
    24.3 @@ -1,3 +1,4 @@
    24.4  subdir-y += char
    24.5 +subdir-$(x86) += passthrough
    24.6  subdir-$(HAS_ACPI) += acpi
    24.7  subdir-$(HAS_VGA) += video
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen/drivers/passthrough/Makefile	Thu Feb 21 15:06:37 2008 +0000
    25.3 @@ -0,0 +1,2 @@
    25.4 +subdir-$(x86) += vtd
    25.5 +subdir-$(x86) += amd
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xen/drivers/passthrough/amd/Makefile	Thu Feb 21 15:06:37 2008 +0000
    26.3 @@ -0,0 +1,4 @@
    26.4 +obj-y += iommu_detect.o
    26.5 +obj-y += iommu_init.o
    26.6 +obj-y += iommu_map.o
    26.7 +obj-y += pci_amd_iommu.o
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xen/drivers/passthrough/amd/iommu_detect.c	Thu Feb 21 15:06:37 2008 +0000
    27.3 @@ -0,0 +1,215 @@
    27.4 +/*
    27.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    27.6 + * Author: Leo Duran <leo.duran@amd.com>
    27.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    27.8 + *
    27.9 + * This program is free software; you can redistribute it and/or modify
   27.10 + * it under the terms of the GNU General Public License as published by
   27.11 + * the Free Software Foundation; either version 2 of the License, or
   27.12 + * (at your option) any later version.
   27.13 + *
   27.14 + * This program is distributed in the hope that it will be useful,
   27.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   27.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   27.17 + * GNU General Public License for more details.
   27.18 + *
   27.19 + * You should have received a copy of the GNU General Public License
   27.20 + * along with this program; if not, write to the Free Software
   27.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   27.22 + */
   27.23 +
   27.24 +#include <xen/config.h>
   27.25 +#include <xen/errno.h>
   27.26 +#include <asm/iommu.h>
   27.27 +#include <asm/amd-iommu.h>
   27.28 +#include <asm/hvm/svm/amd-iommu-proto.h>
   27.29 +#include "../pci-direct.h"
   27.30 +#include "../pci_regs.h"
   27.31 +
   27.32 +static int __init valid_bridge_bus_config(int bus, int dev, int func,
   27.33 +            int *sec_bus, int *sub_bus)
   27.34 +{
   27.35 +    int pri_bus;
   27.36 +
   27.37 +    pri_bus = read_pci_config_byte(bus, dev, func, PCI_PRIMARY_BUS);
   27.38 +    *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS);
   27.39 +    *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS);
   27.40 +
   27.41 +    return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus );
   27.42 +}
   27.43 +
   27.44 +int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu)
   27.45 +{
   27.46 +    int bus, dev, func;
   27.47 +    int devfn, hdr_type;
   27.48 +    int sec_bus, sub_bus;
   27.49 +    int multi_func;
   27.50 +
   27.51 +    bus = iommu->last_downstream_bus = iommu->root_bus;
   27.52 +    iommu->downstream_bus_present[bus] = 1;
   27.53 +    dev = PCI_SLOT(iommu->first_devfn);
   27.54 +    multi_func = PCI_FUNC(iommu->first_devfn) > 0;
   27.55 +    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) {
   27.56 +        /* skipping to next device#? */
   27.57 +        if ( dev != PCI_SLOT(devfn) ) {
   27.58 +            dev = PCI_SLOT(devfn);
   27.59 +            multi_func = 0;
   27.60 +        }
   27.61 +        func = PCI_FUNC(devfn);
   27.62 + 
   27.63 +        if ( !VALID_PCI_VENDOR_ID(
   27.64 +            read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) )
   27.65 +            continue;
   27.66 +
   27.67 +        hdr_type = read_pci_config_byte(bus, dev, func,
   27.68 +                PCI_HEADER_TYPE);
   27.69 +        if ( func == 0 )
   27.70 +            multi_func = IS_PCI_MULTI_FUNCTION(hdr_type);
   27.71 +
   27.72 +        if ( (func == 0 || multi_func) &&
   27.73 +            IS_PCI_TYPE1_HEADER(hdr_type) ) {
   27.74 +            if (!valid_bridge_bus_config(bus, dev, func,
   27.75 +                &sec_bus, &sub_bus))
   27.76 +                return -ENODEV;
   27.77 +
   27.78 +            if ( sub_bus > iommu->last_downstream_bus )
   27.79 +                iommu->last_downstream_bus = sub_bus;
   27.80 +            do {
   27.81 +                iommu->downstream_bus_present[sec_bus] = 1;
   27.82 +            } while ( sec_bus++ < sub_bus );
   27.83 +        }
   27.84 +    }
   27.85 +
   27.86 +    return 0;
   27.87 +}
   27.88 +
   27.89 +int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
   27.90 +            struct amd_iommu *iommu)
   27.91 +{
   27.92 +    u32 cap_header, cap_range;
   27.93 +    u64 mmio_bar;
   27.94 +
   27.95 +#if HACK_BIOS_SETTINGS
   27.96 +    /* remove it when BIOS available */
   27.97 +    write_pci_config(bus, dev, func,
   27.98 +        cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
   27.99 +    write_pci_config(bus, dev, func,
  27.100 +        cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
  27.101 +    /* remove it when BIOS available */
  27.102 +#endif
  27.103 +
  27.104 +    mmio_bar = (u64)read_pci_config(bus, dev, func,
  27.105 +             cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
  27.106 +    mmio_bar |= read_pci_config(bus, dev, func,
  27.107 +            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) &
  27.108 +            PCI_CAP_MMIO_BAR_LOW_MASK;
  27.109 +    iommu->mmio_base_phys = (unsigned long)mmio_bar;
  27.110 +
  27.111 +    if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) {
  27.112 +        dprintk(XENLOG_ERR ,
  27.113 +                "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
  27.114 +        return -ENODEV;
  27.115 +    }
  27.116 +
  27.117 +    cap_header = read_pci_config(bus, dev, func, cap_ptr);
  27.118 +    iommu->revision = get_field_from_reg_u32(cap_header,
  27.119 +                  PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
  27.120 +    iommu->iotlb_support = get_field_from_reg_u32(cap_header,
  27.121 +                PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT);
  27.122 +    iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
  27.123 +                    PCI_CAP_HT_TUNNEL_MASK,
  27.124 +                    PCI_CAP_HT_TUNNEL_SHIFT);
  27.125 +    iommu->not_present_cached = get_field_from_reg_u32(cap_header,
  27.126 +                    PCI_CAP_NP_CACHE_MASK,
  27.127 +                    PCI_CAP_NP_CACHE_SHIFT);
  27.128 +
  27.129 +    cap_range = read_pci_config(bus, dev, func,
  27.130 +            cap_ptr + PCI_CAP_RANGE_OFFSET);
  27.131 +    iommu->root_bus = get_field_from_reg_u32(cap_range,
  27.132 +                PCI_CAP_BUS_NUMBER_MASK,
  27.133 +                PCI_CAP_BUS_NUMBER_SHIFT);
  27.134 +    iommu->first_devfn = get_field_from_reg_u32(cap_range,
  27.135 +                PCI_CAP_FIRST_DEVICE_MASK,
  27.136 +                PCI_CAP_FIRST_DEVICE_SHIFT);
  27.137 +    iommu->last_devfn = get_field_from_reg_u32(cap_range,
  27.138 +                PCI_CAP_LAST_DEVICE_MASK,
  27.139 +                PCI_CAP_LAST_DEVICE_SHIFT);
  27.140 +
  27.141 +    return 0;
  27.142 +}
  27.143 +
  27.144 +static int __init scan_caps_for_iommu(int bus, int dev, int func,
  27.145 +            iommu_detect_callback_ptr_t iommu_detect_callback)
  27.146 +{
  27.147 +    int cap_ptr, cap_id, cap_type;
  27.148 +    u32 cap_header;
  27.149 +    int count, error = 0;
  27.150 +
  27.151 +    count = 0;
  27.152 +    cap_ptr = read_pci_config_byte(bus, dev, func,
  27.153 +            PCI_CAPABILITY_LIST);
  27.154 +    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
  27.155 +        count < PCI_MAX_CAP_BLOCKS && !error ) {
  27.156 +        cap_ptr &= PCI_CAP_PTR_MASK;
  27.157 +        cap_header = read_pci_config(bus, dev, func, cap_ptr);
  27.158 +        cap_id = get_field_from_reg_u32(cap_header,
  27.159 +                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
  27.160 +
  27.161 +        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) {
  27.162 +            cap_type = get_field_from_reg_u32(cap_header,
  27.163 +                    PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
  27.164 +            if ( cap_type == PCI_CAP_TYPE_IOMMU ) {
  27.165 +                error = iommu_detect_callback(
  27.166 +                        bus, dev, func, cap_ptr);
  27.167 +            }
  27.168 +        }
  27.169 +
  27.170 +        cap_ptr = get_field_from_reg_u32(cap_header,
  27.171 +                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
  27.172 +        ++count;    }
  27.173 +
  27.174 +    return error;
  27.175 +}
  27.176 +
  27.177 +static int __init scan_functions_for_iommu(int bus, int dev,
  27.178 +            iommu_detect_callback_ptr_t iommu_detect_callback)
  27.179 +{
  27.180 +    int func, hdr_type;
  27.181 +    int count, error = 0;
  27.182 +
  27.183 +    func = 0;
  27.184 +    count = 1;
  27.185 +    while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func,
  27.186 +            PCI_VENDOR_ID)) && !error && func < count ) {
  27.187 +        hdr_type = read_pci_config_byte(bus, dev, func,
  27.188 +                PCI_HEADER_TYPE);
  27.189 +
  27.190 +        if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) )
  27.191 +            count = PCI_MAX_FUNC_COUNT;
  27.192 +
  27.193 +        if ( IS_PCI_TYPE0_HEADER(hdr_type) ||
  27.194 +            IS_PCI_TYPE1_HEADER(hdr_type) ) {
  27.195 +            error =  scan_caps_for_iommu(bus, dev, func,
  27.196 +                    iommu_detect_callback);
  27.197 +        }
  27.198 +        ++func;
  27.199 +    }
  27.200 +
  27.201 +    return error;
  27.202 +}
  27.203 +
  27.204 +
  27.205 +int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback)
  27.206 +{
  27.207 +    int bus, dev, error = 0;
  27.208 +
  27.209 +    for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) {
  27.210 +        for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) {
  27.211 +            error =  scan_functions_for_iommu(bus, dev,
  27.212 +                  iommu_detect_callback);
  27.213 +        }
  27.214 +    }
  27.215 +
  27.216 +    return error;
  27.217 +}
  27.218 +
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Thu Feb 21 15:06:37 2008 +0000
    28.3 @@ -0,0 +1,147 @@
    28.4 +/*
    28.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    28.6 + * Author: Leo Duran <leo.duran@amd.com>
    28.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    28.8 + *
    28.9 + * This program is free software; you can redistribute it and/or modify
   28.10 + * it under the terms of the GNU General Public License as published by
   28.11 + * the Free Software Foundation; either version 2 of the License, or
   28.12 + * (at your option) any later version.
   28.13 + *
   28.14 + * This program is distributed in the hope that it will be useful,
   28.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   28.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   28.17 + * GNU General Public License for more details.
   28.18 + *
   28.19 + * You should have received a copy of the GNU General Public License
   28.20 + * along with this program; if not, write to the Free Software
   28.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   28.22 + */
   28.23 +
   28.24 +#include <xen/config.h>
   28.25 +#include <xen/errno.h>
   28.26 +#include <asm/amd-iommu.h>
   28.27 +#include <asm/hvm/svm/amd-iommu-proto.h>
   28.28 +#include <asm-x86/fixmap.h>
   28.29 +#include "../pci-direct.h"
   28.30 +#include "../pci_regs.h"
   28.31 +
   28.32 +extern int nr_amd_iommus;
   28.33 +
   28.34 +int __init map_iommu_mmio_region(struct amd_iommu *iommu)
   28.35 +{
   28.36 +    unsigned long mfn;
   28.37 +
   28.38 +    if ( nr_amd_iommus > MAX_AMD_IOMMUS ) {
   28.39 +        gdprintk(XENLOG_ERR,
   28.40 +            "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
   28.41 +        return -ENOMEM;
   28.42 +    }
   28.43 +
   28.44 +    iommu->mmio_base = (void *) fix_to_virt(FIX_IOMMU_MMIO_BASE_0 +
   28.45 +                       nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
   28.46 +    mfn = (unsigned long)iommu->mmio_base_phys >> PAGE_SHIFT;
   28.47 +    map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
   28.48 +                    MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
   28.49 +
   28.50 +    memset((u8*)iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
   28.51 +
   28.52 +    return 0;
   28.53 +}
   28.54 +
   28.55 +void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
   28.56 +{
   28.57 +    if ( iommu->mmio_base ) {
   28.58 +        iounmap(iommu->mmio_base);
   28.59 +        iommu->mmio_base = NULL;
   28.60 +    }
   28.61 +}
   28.62 +
   28.63 +void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
   28.64 +{
   28.65 +    u64 addr_64, addr_lo, addr_hi;
   28.66 +    u32 entry;
   28.67 +
   28.68 +    addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
   28.69 +    addr_lo = addr_64 & DMA_32BIT_MASK;
   28.70 +    addr_hi = addr_64 >> 32;
   28.71 +
   28.72 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   28.73 +        IOMMU_DEV_TABLE_BASE_LOW_MASK,
   28.74 +        IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
   28.75 +    set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
   28.76 +        entry, IOMMU_DEV_TABLE_SIZE_MASK,
   28.77 +        IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
   28.78 +    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
   28.79 +
   28.80 +    set_field_in_reg_u32((u32)addr_hi, 0,
   28.81 +        IOMMU_DEV_TABLE_BASE_HIGH_MASK,
   28.82 +        IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
   28.83 +    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
   28.84 +}
   28.85 +
   28.86 +void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
   28.87 +{
   28.88 +    u64 addr_64, addr_lo, addr_hi;
   28.89 +    u32 power_of2_entries;
   28.90 +    u32 entry;
   28.91 +
   28.92 +    addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
   28.93 +    addr_lo = addr_64 & DMA_32BIT_MASK;
   28.94 +    addr_hi = addr_64 >> 32;
   28.95 +
   28.96 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   28.97 +        IOMMU_CMD_BUFFER_BASE_LOW_MASK,
   28.98 +        IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
   28.99 +    writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
  28.100 +
  28.101 +    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
  28.102 +        IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
  28.103 +
  28.104 +    set_field_in_reg_u32((u32)addr_hi, 0,
  28.105 +        IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
  28.106 +        IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
  28.107 +    set_field_in_reg_u32(power_of2_entries, entry,
  28.108 +        IOMMU_CMD_BUFFER_LENGTH_MASK,
  28.109 +        IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
  28.110 +    writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
  28.111 +}
  28.112 +
  28.113 +static void __init set_iommu_translation_control(struct amd_iommu *iommu,
  28.114 +            int enable)
  28.115 +{
  28.116 +    u32 entry;
  28.117 +
  28.118 +    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
  28.119 +    set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
  28.120 +        IOMMU_CONTROL_ENABLED, entry,
  28.121 +        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
  28.122 +        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
  28.123 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
  28.124 +        IOMMU_CONTROL_ENABLED, entry,
  28.125 +        IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
  28.126 +        IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
  28.127 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
  28.128 +}
  28.129 +
  28.130 +static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
  28.131 +            int enable)
  28.132 +{
  28.133 +    u32 entry;
  28.134 +
  28.135 +    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
  28.136 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
  28.137 +        IOMMU_CONTROL_ENABLED, entry,
  28.138 +        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
  28.139 +        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
  28.140 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
  28.141 +}
  28.142 +
  28.143 +void __init enable_iommu(struct amd_iommu *iommu)
  28.144 +{
  28.145 +    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
  28.146 +    set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
  28.147 +    printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
  28.148 +}
  28.149 +
  28.150 +
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Feb 21 15:06:37 2008 +0000
    29.3 @@ -0,0 +1,450 @@
    29.4 +/*
    29.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    29.6 + * Author: Leo Duran <leo.duran@amd.com>
    29.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    29.8 + *
    29.9 + * This program is free software; you can redistribute it and/or modify
   29.10 + * it under the terms of the GNU General Public License as published by
   29.11 + * the Free Software Foundation; either version 2 of the License, or
   29.12 + * (at your option) any later version.
   29.13 + *
   29.14 + * This program is distributed in the hope that it will be useful,
   29.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   29.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   29.17 + * GNU General Public License for more details.
   29.18 + *
   29.19 + * You should have received a copy of the GNU General Public License
   29.20 + * along with this program; if not, write to the Free Software
   29.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   29.22 + */
   29.23 +
   29.24 +#include <xen/sched.h>
   29.25 +#include <asm/hvm/iommu.h>
   29.26 +#include <asm/amd-iommu.h>
   29.27 +#include <asm/hvm/svm/amd-iommu-proto.h>
   29.28 +
   29.29 +extern long amd_iommu_poll_comp_wait;
   29.30 +
   29.31 +static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
   29.32 +{
   29.33 +    u32 tail, head, *cmd_buffer;
   29.34 +    int i;
   29.35 +
   29.36 +    tail = iommu->cmd_buffer_tail;
   29.37 +    if ( ++tail == iommu->cmd_buffer.entries )
   29.38 +        tail = 0;
   29.39 +    head = get_field_from_reg_u32(
   29.40 +        readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
   29.41 +        IOMMU_CMD_BUFFER_HEAD_MASK,
   29.42 +        IOMMU_CMD_BUFFER_HEAD_SHIFT);
   29.43 +    if ( head != tail )
   29.44 +    {
   29.45 +        cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
   29.46 +                             (iommu->cmd_buffer_tail *
   29.47 +                              IOMMU_CMD_BUFFER_ENTRY_SIZE));
   29.48 +        for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
   29.49 +            cmd_buffer[i] = cmd[i];
   29.50 +
   29.51 +        iommu->cmd_buffer_tail = tail;
   29.52 +        return 1;
   29.53 +    }
   29.54 +
   29.55 +    return 0;
   29.56 +}
   29.57 +
   29.58 +static void commit_iommu_command_buffer(struct amd_iommu *iommu)
   29.59 +{
   29.60 +    u32 tail;
   29.61 +
   29.62 +    set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
   29.63 +                         IOMMU_CMD_BUFFER_TAIL_MASK,
   29.64 +                         IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
   29.65 +    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
   29.66 +}
   29.67 +
   29.68 +int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
   29.69 +{
   29.70 +    if ( queue_iommu_command(iommu, cmd) )
   29.71 +    {
   29.72 +        commit_iommu_command_buffer(iommu);
   29.73 +        return 1;
   29.74 +    }
   29.75 +
   29.76 +    return 0;
   29.77 +}
   29.78 +
   29.79 +static void invalidate_iommu_page(struct amd_iommu *iommu,
   29.80 +                                  u64 io_addr, u16 domain_id)
   29.81 +{
   29.82 +    u64 addr_lo, addr_hi;
   29.83 +    u32 cmd[4], entry;
   29.84 +
   29.85 +    addr_lo = io_addr & DMA_32BIT_MASK;
   29.86 +    addr_hi = io_addr >> 32;
   29.87 +
   29.88 +    set_field_in_reg_u32(domain_id, 0,
   29.89 +                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
   29.90 +                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
   29.91 +    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
   29.92 +                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
   29.93 +                         &entry);
   29.94 +    cmd[1] = entry;
   29.95 +
   29.96 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
   29.97 +                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
   29.98 +                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
   29.99 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
  29.100 +                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
  29.101 +                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
  29.102 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
  29.103 +                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
  29.104 +                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
  29.105 +    cmd[2] = entry;
  29.106 +
  29.107 +    set_field_in_reg_u32((u32)addr_hi, 0,
  29.108 +                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
  29.109 +                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
  29.110 +    cmd[3] = entry;
  29.111 +
  29.112 +    cmd[0] = 0;
  29.113 +    send_iommu_command(iommu, cmd);
  29.114 +}
  29.115 +
  29.116 +void flush_command_buffer(struct amd_iommu *iommu)
  29.117 +{
  29.118 +    u32 cmd[4], status;
  29.119 +    int loop_count, comp_wait;
  29.120 +
  29.121 +    /* clear 'ComWaitInt' in status register (WIC) */
  29.122 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
  29.123 +                         IOMMU_STATUS_COMP_WAIT_INT_MASK,
  29.124 +                         IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
  29.125 +    writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
  29.126 +
  29.127 +    /* send an empty COMPLETION_WAIT command to flush command buffer */
  29.128 +    cmd[3] = cmd[2] = 0;
  29.129 +    set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
  29.130 +                         IOMMU_CMD_OPCODE_MASK,
  29.131 +                         IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
  29.132 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
  29.133 +                         IOMMU_COMP_WAIT_I_FLAG_MASK,
  29.134 +                         IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
  29.135 +    send_iommu_command(iommu, cmd);
  29.136 +
  29.137 +    /* wait for 'ComWaitInt' to signal comp#endifletion? */
  29.138 +    if ( amd_iommu_poll_comp_wait ) {
  29.139 +        loop_count = amd_iommu_poll_comp_wait;
  29.140 +        do {
  29.141 +            status = readl(iommu->mmio_base +
  29.142 +                           IOMMU_STATUS_MMIO_OFFSET);
  29.143 +            comp_wait = get_field_from_reg_u32(
  29.144 +                status,
  29.145 +                IOMMU_STATUS_COMP_WAIT_INT_MASK,
  29.146 +                IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
  29.147 +            --loop_count;
  29.148 +        } while ( loop_count && !comp_wait );
  29.149 +
  29.150 +        if ( comp_wait )
  29.151 +        {
  29.152 +            /* clear 'ComWaitInt' in status register (WIC) */
  29.153 +            status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
  29.154 +            writel(status, iommu->mmio_base +
  29.155 +                   IOMMU_STATUS_MMIO_OFFSET);
  29.156 +        }
  29.157 +        else
  29.158 +            dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:"
  29.159 +                    " ComWaitInt bit did not assert!\n");
  29.160 +    }
  29.161 +}
  29.162 +
  29.163 +static void clear_page_table_entry_present(u32 *pte)
  29.164 +{
  29.165 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
  29.166 +                         IOMMU_PTE_PRESENT_MASK,
  29.167 +                         IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
  29.168 +}
  29.169 +
  29.170 +static void set_page_table_entry_present(u32 *pte, u64 page_addr,
  29.171 +                                         int iw, int ir)
  29.172 +{
  29.173 +    u64 addr_lo, addr_hi;
  29.174 +    u32 entry;
  29.175 +
  29.176 +    addr_lo = page_addr & DMA_32BIT_MASK;
  29.177 +    addr_hi = page_addr >> 32;
  29.178 +
  29.179 +    set_field_in_reg_u32((u32)addr_hi, 0,
  29.180 +                         IOMMU_PTE_ADDR_HIGH_MASK,
  29.181 +                         IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
  29.182 +    set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED :
  29.183 +                         IOMMU_CONTROL_DISABLED, entry,
  29.184 +                         IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
  29.185 +                         IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
  29.186 +    set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED :
  29.187 +                         IOMMU_CONTROL_DISABLED, entry,
  29.188 +                         IOMMU_PTE_IO_READ_PERMISSION_MASK,
  29.189 +                         IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
  29.190 +    pte[1] = entry;
  29.191 +
  29.192 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
  29.193 +                         IOMMU_PTE_ADDR_LOW_MASK,
  29.194 +                         IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
  29.195 +    set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry,
  29.196 +                         IOMMU_PTE_NEXT_LEVEL_MASK,
  29.197 +                         IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
  29.198 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.199 +                         IOMMU_PTE_PRESENT_MASK,
  29.200 +                         IOMMU_PTE_PRESENT_SHIFT, &entry);
  29.201 +    pte[0] = entry;
  29.202 +}
  29.203 +
  29.204 +
  29.205 +static void amd_iommu_set_page_directory_entry(u32 *pde, 
  29.206 +                                               u64 next_ptr, u8 next_level)
  29.207 +{
  29.208 +    u64 addr_lo, addr_hi;
  29.209 +    u32 entry;
  29.210 +
  29.211 +    addr_lo = next_ptr & DMA_32BIT_MASK;
  29.212 +    addr_hi = next_ptr >> 32;
  29.213 +
  29.214 +    /* enable read/write permissions,which will be enforced at the PTE */
  29.215 +    set_field_in_reg_u32((u32)addr_hi, 0,
  29.216 +                         IOMMU_PDE_ADDR_HIGH_MASK,
  29.217 +                         IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
  29.218 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.219 +                         IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
  29.220 +                         IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
  29.221 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.222 +                         IOMMU_PDE_IO_READ_PERMISSION_MASK,
  29.223 +                         IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
  29.224 +    pde[1] = entry;
  29.225 +
  29.226 +    /* mark next level as 'present' */
  29.227 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
  29.228 +                         IOMMU_PDE_ADDR_LOW_MASK,
  29.229 +                         IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
  29.230 +    set_field_in_reg_u32(next_level, entry,
  29.231 +                         IOMMU_PDE_NEXT_LEVEL_MASK,
  29.232 +                         IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
  29.233 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.234 +                         IOMMU_PDE_PRESENT_MASK,
  29.235 +                         IOMMU_PDE_PRESENT_SHIFT, &entry);
  29.236 +    pde[0] = entry;
  29.237 +}
  29.238 +
  29.239 +void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
  29.240 +                                   u8 paging_mode)
  29.241 +{
  29.242 +    u64 addr_hi, addr_lo;
  29.243 +    u32 entry;
  29.244 +
  29.245 +    dte[6] = dte[5] = dte[4] = 0;
  29.246 +
  29.247 +    set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
  29.248 +                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
  29.249 +                         IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
  29.250 +    dte[3] = entry;
  29.251 +
  29.252 +    set_field_in_reg_u32(domain_id, 0,
  29.253 +                         IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
  29.254 +                         IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
  29.255 +    dte[2] = entry;
  29.256 +
  29.257 +    addr_lo = root_ptr & DMA_32BIT_MASK;
  29.258 +    addr_hi = root_ptr >> 32;
  29.259 +    set_field_in_reg_u32((u32)addr_hi, 0,
  29.260 +                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
  29.261 +                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
  29.262 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.263 +                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
  29.264 +                         IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
  29.265 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.266 +                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
  29.267 +                         IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
  29.268 +    dte[1] = entry;
  29.269 +
  29.270 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
  29.271 +                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
  29.272 +                         IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
  29.273 +    set_field_in_reg_u32(paging_mode, entry,
  29.274 +                         IOMMU_DEV_TABLE_PAGING_MODE_MASK,
  29.275 +                         IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
  29.276 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.277 +                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
  29.278 +                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
  29.279 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
  29.280 +                         IOMMU_DEV_TABLE_VALID_MASK,
  29.281 +                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
  29.282 +    dte[0] = entry;
  29.283 +}
  29.284 +
  29.285 +void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
  29.286 +{
  29.287 +    u64 addr_lo, addr_hi, ptr;
  29.288 +
  29.289 +    addr_lo = get_field_from_reg_u32(
  29.290 +        entry[0],
  29.291 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
  29.292 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
  29.293 +
  29.294 +    addr_hi = get_field_from_reg_u32(
  29.295 +        entry[1],
  29.296 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
  29.297 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
  29.298 +
  29.299 +    ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
  29.300 +    return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
  29.301 +}
  29.302 +
  29.303 +static int amd_iommu_is_pte_present(u32 *entry)
  29.304 +{
  29.305 +    return (get_field_from_reg_u32(entry[0],
  29.306 +                                   IOMMU_PDE_PRESENT_MASK,
  29.307 +                                   IOMMU_PDE_PRESENT_SHIFT));
  29.308 +}
  29.309 +
  29.310 +void invalidate_dev_table_entry(struct amd_iommu *iommu,
  29.311 +                                u16 device_id)
  29.312 +{
  29.313 +    u32 cmd[4], entry;
  29.314 +
  29.315 +    cmd[3] = cmd[2] = 0;
  29.316 +    set_field_in_reg_u32(device_id, 0,
  29.317 +                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
  29.318 +                         IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
  29.319 +    cmd[0] = entry;
  29.320 +
  29.321 +    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
  29.322 +                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
  29.323 +                         &entry);
  29.324 +    cmd[1] = entry;
  29.325 +
  29.326 +    send_iommu_command(iommu, cmd);
  29.327 +}
  29.328 +
  29.329 +int amd_iommu_is_dte_page_translation_valid(u32 *entry)
  29.330 +{
  29.331 +    return (get_field_from_reg_u32(entry[0],
  29.332 +                                   IOMMU_DEV_TABLE_VALID_MASK,
  29.333 +                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
  29.334 +            get_field_from_reg_u32(entry[0],
  29.335 +                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
  29.336 +                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
  29.337 +}
  29.338 +
  29.339 +static void *get_pte_from_page_tables(void *table, int level,
  29.340 +                                      unsigned long io_pfn)
  29.341 +{
  29.342 +    unsigned long offset;
  29.343 +    void *pde = NULL;
  29.344 +
  29.345 +    BUG_ON(table == NULL);
  29.346 +
  29.347 +    while ( level > 0 )
  29.348 +    {
  29.349 +        offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
  29.350 +                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
  29.351 +        offset &= ~PTE_PER_TABLE_MASK;
  29.352 +        pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
  29.353 +
  29.354 +        if ( level == 1 )
  29.355 +            break;
  29.356 +        if ( !pde )
  29.357 +            return NULL;
  29.358 +        if ( !amd_iommu_is_pte_present(pde) )
  29.359 +        {
  29.360 +            void *next_table = alloc_xenheap_page();
  29.361 +            if ( next_table == NULL )
  29.362 +                return NULL;
  29.363 +            memset(next_table, 0, PAGE_SIZE);
  29.364 +            if ( *(u64 *)pde == 0 )
  29.365 +            {
  29.366 +                unsigned long next_ptr = (u64)virt_to_maddr(next_table);
  29.367 +                amd_iommu_set_page_directory_entry(
  29.368 +                    (u32 *)pde, next_ptr, level - 1);
  29.369 +            }
  29.370 +            else
  29.371 +            {
  29.372 +                free_xenheap_page(next_table);
  29.373 +            }
  29.374 +        }
  29.375 +        table = amd_iommu_get_vptr_from_page_table_entry(pde);
  29.376 +        level--;
  29.377 +    }
  29.378 +
  29.379 +    return pde;
  29.380 +}
  29.381 +
  29.382 +int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
  29.383 +{
  29.384 +    void *pte;
  29.385 +    unsigned long flags;
  29.386 +    u64 maddr;
  29.387 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
  29.388 +    int iw, ir;
  29.389 +
  29.390 +    BUG_ON( !hd->root_table );
  29.391 +
  29.392 +    maddr = (u64)mfn << PAGE_SHIFT;
  29.393 +
  29.394 +    iw = IOMMU_IO_WRITE_ENABLED;
  29.395 +    ir = IOMMU_IO_READ_ENABLED;
  29.396 +
  29.397 +    spin_lock_irqsave(&hd->mapping_lock, flags);
  29.398 +
  29.399 +    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
  29.400 +    if ( pte == 0 )
  29.401 +    {
  29.402 +        dprintk(XENLOG_ERR,
  29.403 +                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
  29.404 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
  29.405 +        return -EIO;
  29.406 +    }
  29.407 +
  29.408 +    set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
  29.409 +
  29.410 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
  29.411 +    return 0;
  29.412 +}
  29.413 +
  29.414 +int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
  29.415 +{
  29.416 +    void *pte;
  29.417 +    unsigned long flags;
  29.418 +    u64 io_addr = gfn;
  29.419 +    int requestor_id;
  29.420 +    struct amd_iommu *iommu;
  29.421 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
  29.422 +
  29.423 +    BUG_ON( !hd->root_table );
  29.424 +
  29.425 +    requestor_id = hd->domain_id;
  29.426 +    io_addr = (u64)gfn << PAGE_SHIFT;
  29.427 +
  29.428 +    spin_lock_irqsave(&hd->mapping_lock, flags);
  29.429 +
  29.430 +    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
  29.431 +    if ( pte == 0 )
  29.432 +    {
  29.433 +        dprintk(XENLOG_ERR,
  29.434 +                "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
  29.435 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
  29.436 +        return -EIO;
  29.437 +    }
  29.438 +
  29.439 +    /* mark PTE as 'page not present' */
  29.440 +    clear_page_table_entry_present((u32 *)pte);
  29.441 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
  29.442 +
  29.443 +    /* send INVALIDATE_IOMMU_PAGES command */
  29.444 +    for_each_amd_iommu(iommu)
  29.445 +    {
  29.446 +        spin_lock_irqsave(&iommu->lock, flags);
  29.447 +        invalidate_iommu_page(iommu, io_addr, requestor_id);
  29.448 +        flush_command_buffer(iommu);
  29.449 +        spin_unlock_irqrestore(&iommu->lock, flags);
  29.450 +    }
  29.451 +
  29.452 +    return 0;
  29.453 +}
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Feb 21 15:06:37 2008 +0000
    30.3 @@ -0,0 +1,578 @@
    30.4 +/*
    30.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    30.6 + * Author: Leo Duran <leo.duran@amd.com>
    30.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    30.8 + *
    30.9 + * This program is free software; you can redistribute it and/or modify
   30.10 + * it under the terms of the GNU General Public License as published by
   30.11 + * the Free Software Foundation; either version 2 of the License, or
   30.12 + * (at your option) any later version.
   30.13 + *
   30.14 + * This program is distributed in the hope that it will be useful,
   30.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   30.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   30.17 + * GNU General Public License for more details.
   30.18 + *
   30.19 + * You should have received a copy of the GNU General Public License
   30.20 + * along with this program; if not, write to the Free Software
   30.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   30.22 + */
   30.23 +
   30.24 +#include <asm/amd-iommu.h>
   30.25 +#include <asm/hvm/svm/amd-iommu-proto.h>
   30.26 +#include <xen/sched.h>
   30.27 +#include <asm/mm.h>
   30.28 +#include "../pci-direct.h"
   30.29 +#include "../pci_regs.h"
   30.30 +
   30.31 +struct list_head amd_iommu_head;
   30.32 +long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
   30.33 +static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
   30.34 +int nr_amd_iommus = 0;
   30.35 +
   30.36 +/* will set if amd-iommu HW is found */
   30.37 +int amd_iommu_enabled = 0;
   30.38 +
   30.39 +static int enable_amd_iommu = 0;
   30.40 +boolean_param("enable_amd_iommu", enable_amd_iommu);
   30.41 +
   30.42 +static void deallocate_domain_page_tables(struct hvm_iommu *hd)
   30.43 +{
   30.44 +    if ( hd->root_table )
   30.45 +        free_xenheap_page(hd->root_table);
   30.46 +}
   30.47 +
   30.48 +static void deallocate_domain_resources(struct hvm_iommu *hd)
   30.49 +{
   30.50 +    deallocate_domain_page_tables(hd);
   30.51 +}
   30.52 +
   30.53 +static void __init init_cleanup(void)
   30.54 +{
   30.55 +    struct amd_iommu *iommu;
   30.56 +
   30.57 +    for_each_amd_iommu ( iommu )
   30.58 +        unmap_iommu_mmio_region(iommu);
   30.59 +}
   30.60 +
   30.61 +static void __init deallocate_iommu_table_struct(
   30.62 +    struct table_struct *table)
   30.63 +{
   30.64 +    if ( table->buffer )
   30.65 +    {
   30.66 +        free_xenheap_pages(table->buffer,
   30.67 +                           get_order_from_bytes(table->alloc_size));
   30.68 +        table->buffer = NULL;
   30.69 +    }
   30.70 +}
   30.71 +
   30.72 +static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
   30.73 +{
   30.74 +    deallocate_iommu_table_struct(&iommu->dev_table);
   30.75 +    deallocate_iommu_table_struct(&iommu->cmd_buffer);;
   30.76 +}
   30.77 +
   30.78 +static void __init detect_cleanup(void)
   30.79 +{
   30.80 +    struct amd_iommu *iommu, *next;
   30.81 +
   30.82 +    list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
   30.83 +    {
   30.84 +        list_del(&iommu->list);
   30.85 +        deallocate_iommu_resources(iommu);
   30.86 +        xfree(iommu);
   30.87 +    }
   30.88 +}
   30.89 +
   30.90 +static int requestor_id_from_bdf(int bdf)
   30.91 +{
   30.92 +    /* HACK - HACK */
   30.93 +    /* account for possible 'aliasing' by parent device */
   30.94 +    return bdf;
   30.95 +}
   30.96 +
   30.97 +static int __init allocate_iommu_table_struct(struct table_struct *table,
   30.98 +                                              const char *name)
   30.99 +{
  30.100 +    table->buffer = (void *) alloc_xenheap_pages(
  30.101 +        get_order_from_bytes(table->alloc_size));
  30.102 +
  30.103 +    if ( !table->buffer )
  30.104 +    {
  30.105 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
  30.106 +        return -ENOMEM;
  30.107 +    }
  30.108 +
  30.109 +    memset(table->buffer, 0, table->alloc_size);
  30.110 +
  30.111 +    return 0;
  30.112 +}
  30.113 +
  30.114 +static int __init allocate_iommu_resources(struct amd_iommu *iommu)
  30.115 +{
  30.116 +    /* allocate 'device table' on a 4K boundary */
  30.117 +    iommu->dev_table.alloc_size =
  30.118 +        PAGE_ALIGN(((iommu->last_downstream_bus + 1) *
  30.119 +                    IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
  30.120 +                   IOMMU_DEV_TABLE_ENTRY_SIZE);
  30.121 +    iommu->dev_table.entries =
  30.122 +        iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
  30.123 +
  30.124 +    if ( allocate_iommu_table_struct(&iommu->dev_table,
  30.125 +                                     "Device Table") != 0 )
  30.126 +        goto error_out;
  30.127 +
  30.128 +    /* allocate 'command buffer' in power of 2 increments of 4K */
  30.129 +    iommu->cmd_buffer_tail = 0;
  30.130 +    iommu->cmd_buffer.alloc_size =
  30.131 +        PAGE_SIZE << get_order_from_bytes(
  30.132 +            PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
  30.133 +                       IOMMU_CMD_BUFFER_ENTRY_SIZE));
  30.134 +
  30.135 +    iommu->cmd_buffer.entries =
  30.136 +        iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
  30.137 +
  30.138 +    if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
  30.139 +                                     "Command Buffer") != 0 )
  30.140 +        goto error_out;
  30.141 +
  30.142 +    return 0;
  30.143 +
  30.144 + error_out:
  30.145 +    deallocate_iommu_resources(iommu);
  30.146 +    return -ENOMEM;
  30.147 +}
  30.148 +
  30.149 +int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
  30.150 +{
  30.151 +    struct amd_iommu *iommu;
  30.152 +
  30.153 +    iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
  30.154 +    if ( !iommu )
  30.155 +    {
  30.156 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
  30.157 +        return -ENOMEM;
  30.158 +    }
  30.159 +    memset(iommu, 0, sizeof(struct amd_iommu));
  30.160 +    spin_lock_init(&iommu->lock);
  30.161 +
  30.162 +    /* get capability and topology information */
  30.163 +    if ( get_iommu_capabilities(bus, dev, func, cap_ptr, iommu) != 0 )
  30.164 +        goto error_out;
  30.165 +    if ( get_iommu_last_downstream_bus(iommu) != 0 )
  30.166 +        goto error_out;
  30.167 +
  30.168 +    list_add_tail(&iommu->list, &amd_iommu_head);
  30.169 +
  30.170 +    /* allocate resources for this IOMMU */
  30.171 +    if (allocate_iommu_resources(iommu) != 0)
  30.172 +        goto error_out;
  30.173 +
  30.174 +    return 0;
  30.175 +
  30.176 + error_out:
  30.177 +    xfree(iommu);
  30.178 +    return -ENODEV;
  30.179 +}
  30.180 +
  30.181 +static int __init amd_iommu_init(void)
  30.182 +{
  30.183 +    struct amd_iommu *iommu;
  30.184 +    unsigned long flags;
  30.185 +
  30.186 +    for_each_amd_iommu ( iommu )
  30.187 +    {
  30.188 +        spin_lock_irqsave(&iommu->lock, flags);
  30.189 +
  30.190 +        /* register IOMMU data strucures in MMIO space */
  30.191 +        if ( map_iommu_mmio_region(iommu) != 0 )
  30.192 +            goto error_out;
  30.193 +        register_iommu_dev_table_in_mmio_space(iommu);
  30.194 +        register_iommu_cmd_buffer_in_mmio_space(iommu);
  30.195 +
  30.196 +        /* enable IOMMU translation services */
  30.197 +        enable_iommu(iommu);
  30.198 +        nr_amd_iommus++;
  30.199 +
  30.200 +        spin_unlock_irqrestore(&iommu->lock, flags);
  30.201 +    }
  30.202 +
  30.203 +    amd_iommu_enabled = 1;
  30.204 +
  30.205 +    return 0;
  30.206 +
  30.207 + error_out:
  30.208 +    init_cleanup();
  30.209 +    return -ENODEV;
  30.210 +}
  30.211 +
  30.212 +struct amd_iommu *find_iommu_for_device(int bus, int devfn)
  30.213 +{
  30.214 +    struct amd_iommu *iommu;
  30.215 +
  30.216 +    for_each_amd_iommu ( iommu )
  30.217 +    {
  30.218 +        if ( bus == iommu->root_bus )
  30.219 +        {
  30.220 +            if ( (devfn >= iommu->first_devfn) &&
  30.221 +                 (devfn <= iommu->last_devfn) )
  30.222 +                return iommu;
  30.223 +        }
  30.224 +        else if ( bus <= iommu->last_downstream_bus )
  30.225 +        {
  30.226 +            if ( iommu->downstream_bus_present[bus] )
  30.227 +                return iommu;
  30.228 +        }
  30.229 +    }
  30.230 +
  30.231 +    return NULL;
  30.232 +}
  30.233 +
  30.234 +void amd_iommu_setup_domain_device(
  30.235 +    struct domain *domain, struct amd_iommu *iommu, int requestor_id)
  30.236 +{
  30.237 +    void *dte;
  30.238 +    u64 root_ptr;
  30.239 +    unsigned long flags;
  30.240 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  30.241 +
  30.242 +    BUG_ON( !hd->root_table||!hd->paging_mode );
  30.243 +
  30.244 +    root_ptr = (u64)virt_to_maddr(hd->root_table);
  30.245 +    dte = iommu->dev_table.buffer +
  30.246 +        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
  30.247 +
  30.248 +    if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
  30.249 +    {
  30.250 +        spin_lock_irqsave(&iommu->lock, flags); 
  30.251 +
  30.252 +        amd_iommu_set_dev_table_entry(
  30.253 +            (u32 *)dte,
  30.254 +            root_ptr, hd->domain_id, hd->paging_mode);
  30.255 +        invalidate_dev_table_entry(iommu, requestor_id);
  30.256 +        flush_command_buffer(iommu);
  30.257 +        dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
  30.258 +                "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
  30.259 +                requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
  30.260 +
  30.261 +        spin_unlock_irqrestore(&iommu->lock, flags);
  30.262 +    }
  30.263 +}
  30.264 +
  30.265 +void __init amd_iommu_setup_dom0_devices(void)
  30.266 +{
  30.267 +    struct hvm_iommu *hd = domain_hvm_iommu(dom0);
  30.268 +    struct amd_iommu *iommu;
  30.269 +    struct pci_dev *pdev;
  30.270 +    int bus, dev, func;
  30.271 +    u32 l;
  30.272 +    int req_id, bdf;
  30.273 +
  30.274 +    for ( bus = 0; bus < 256; bus++ )
  30.275 +    {
  30.276 +        for ( dev = 0; dev < 32; dev++ )
  30.277 +        {
  30.278 +            for ( func = 0; func < 8; func++ )
  30.279 +            {
  30.280 +                l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
  30.281 +                /* some broken boards return 0 or ~0 if a slot is empty: */
  30.282 +                if ( l == 0xffffffff || l == 0x00000000 ||
  30.283 +                     l == 0x0000ffff || l == 0xffff0000 )
  30.284 +                    continue;
  30.285 +
  30.286 +                pdev = xmalloc(struct pci_dev);
  30.287 +                pdev->bus = bus;
  30.288 +                pdev->devfn = PCI_DEVFN(dev, func);
  30.289 +                list_add_tail(&pdev->list, &hd->pdev_list);
  30.290 +
  30.291 +                bdf = (bus << 8) | pdev->devfn;
  30.292 +                req_id = requestor_id_from_bdf(bdf);
  30.293 +                iommu = find_iommu_for_device(bus, pdev->devfn);
  30.294 +
  30.295 +                if ( iommu )
  30.296 +                    amd_iommu_setup_domain_device(dom0, iommu, req_id);
  30.297 +            }
  30.298 +        }
  30.299 +    }
  30.300 +}
  30.301 +
  30.302 +int amd_iommu_detect(void)
  30.303 +{
  30.304 +    unsigned long i;
  30.305 +
  30.306 +    if ( !enable_amd_iommu )
  30.307 +    {
  30.308 +        printk("AMD IOMMU: Disabled\n");
  30.309 +        return 0;
  30.310 +    }
  30.311 +
  30.312 +    INIT_LIST_HEAD(&amd_iommu_head);
  30.313 +
  30.314 +    if ( scan_for_iommu(iommu_detect_callback) != 0 )
  30.315 +    {
  30.316 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
  30.317 +        goto error_out;
  30.318 +    }
  30.319 +
  30.320 +    if ( !iommu_found() )
  30.321 +    {
  30.322 +        printk("AMD IOMMU: Not found!\n");
  30.323 +        return 0;
  30.324 +    }
  30.325 +
  30.326 +    if ( amd_iommu_init() != 0 )
  30.327 +    {
  30.328 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
  30.329 +        goto error_out;
  30.330 +    }
  30.331 +
  30.332 +    if ( iommu_domain_init(dom0) != 0 )
  30.333 +        goto error_out;
  30.334 +
  30.335 +    /* setup 1:1 page table for dom0 */
  30.336 +    for ( i = 0; i < max_page; i++ )
  30.337 +        amd_iommu_map_page(dom0, i, i);
  30.338 +
  30.339 +    amd_iommu_setup_dom0_devices();
  30.340 +    return 0;
  30.341 +
  30.342 + error_out:
  30.343 +    detect_cleanup();
  30.344 +    return -ENODEV;
  30.345 +
  30.346 +}
  30.347 +
  30.348 +static int allocate_domain_resources(struct hvm_iommu *hd)
  30.349 +{
  30.350 +    /* allocate root table */
  30.351 +    unsigned long flags;
  30.352 +
  30.353 +    spin_lock_irqsave(&hd->mapping_lock, flags);
  30.354 +    if ( !hd->root_table )
  30.355 +    {
  30.356 +        hd->root_table = (void *)alloc_xenheap_page();
  30.357 +        if ( !hd->root_table )
  30.358 +            goto error_out;
  30.359 +        memset((u8*)hd->root_table, 0, PAGE_SIZE);
  30.360 +    }
  30.361 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
  30.362 +
  30.363 +    return 0;
  30.364 + error_out:
  30.365 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
  30.366 +    return -ENOMEM;
  30.367 +}
  30.368 +
  30.369 +static int get_paging_mode(unsigned long entries)
  30.370 +{
  30.371 +    int level = 1;
  30.372 +
  30.373 +    BUG_ON ( !max_page );
  30.374 +
  30.375 +    if ( entries > max_page )
  30.376 +        entries = max_page;
  30.377 +
  30.378 +    while ( entries > PTE_PER_TABLE_SIZE )
  30.379 +    {
  30.380 +        entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
  30.381 +        ++level;
  30.382 +        if ( level > 6 )
  30.383 +            return -ENOMEM;
  30.384 +    }
  30.385 +
  30.386 +    dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level);
  30.387 +
  30.388 +    return level;
  30.389 +}
  30.390 +
  30.391 +int amd_iommu_domain_init(struct domain *domain)
  30.392 +{
  30.393 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  30.394 +
  30.395 +    /* allocate page directroy */
  30.396 +    if ( allocate_domain_resources(hd) != 0 )
  30.397 +    {
  30.398 +        deallocate_domain_resources(hd);
  30.399 +        return -ENOMEM;
  30.400 +    }
  30.401 +
  30.402 +    if ( is_hvm_domain(domain) )
  30.403 +        hd->paging_mode = IOMMU_PAGE_TABLE_LEVEL_4;
  30.404 +    else
  30.405 +        hd->paging_mode = get_paging_mode(max_page);
  30.406 +
  30.407 +    hd->domain_id = domain->domain_id;
  30.408 +
  30.409 +    return 0;
  30.410 +}
  30.411 +
  30.412 +static void amd_iommu_disable_domain_device(
  30.413 +    struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
  30.414 +{
  30.415 +    void *dte;
  30.416 +    unsigned long flags;
  30.417 +
  30.418 +    dte = iommu->dev_table.buffer +
  30.419 +        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
  30.420 +
  30.421 +    if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
  30.422 +    {
  30.423 +        spin_lock_irqsave(&iommu->lock, flags); 
  30.424 +        memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
  30.425 +        invalidate_dev_table_entry(iommu, requestor_id);
  30.426 +        flush_command_buffer(iommu);
  30.427 +        dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
  30.428 +                " domain_id:%d, paging_mode:%d\n",
  30.429 +                requestor_id,  domain_hvm_iommu(domain)->domain_id,
  30.430 +                domain_hvm_iommu(domain)->paging_mode);
  30.431 +        spin_unlock_irqrestore(&iommu->lock, flags);
  30.432 +    }
  30.433 +}
  30.434 +
  30.435 +extern void pdev_flr(u8 bus, u8 devfn);
  30.436 +
  30.437 +static int reassign_device( struct domain *source, struct domain *target,
  30.438 +                            u8 bus, u8 devfn)
  30.439 +{
  30.440 +    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
  30.441 +    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
  30.442 +    struct pci_dev *pdev;
  30.443 +    struct amd_iommu *iommu;
  30.444 +    int req_id, bdf;
  30.445 +    unsigned long flags;
  30.446 +
  30.447 +    for_each_pdev( source, pdev )
  30.448 +    {
  30.449 +        if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
  30.450 +            continue;
  30.451 +
  30.452 +        pdev->bus = bus;
  30.453 +        pdev->devfn = devfn;
  30.454 +
  30.455 +        bdf = (bus << 8) | devfn;
  30.456 +        req_id = requestor_id_from_bdf(bdf);
  30.457 +        iommu = find_iommu_for_device(bus, devfn);
  30.458 +
  30.459 +        if ( iommu )
  30.460 +        {
  30.461 +            amd_iommu_disable_domain_device(source, iommu, req_id);
  30.462 +            /* Move pci device from the source domain to target domain. */
  30.463 +            spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
  30.464 +            spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
  30.465 +            list_move(&pdev->list, &target_hd->pdev_list);
  30.466 +            spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
  30.467 +            spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
  30.468 +
  30.469 +            amd_iommu_setup_domain_device(target, iommu, req_id);
  30.470 +            gdprintk(XENLOG_INFO ,
  30.471 +                     "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
  30.472 +                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
  30.473 +                     source->domain_id, target->domain_id);
  30.474 +        }
  30.475 +        else
  30.476 +        {
  30.477 +            gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
  30.478 +                     " %x:%x.%x cannot be assigned to domain %d\n", 
  30.479 +                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
  30.480 +            return -ENODEV;
  30.481 +        }
  30.482 +
  30.483 +        break;
  30.484 +    }
  30.485 +    return 0;
  30.486 +}
  30.487 +
  30.488 +int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
  30.489 +{
  30.490 +    pdev_flr(bus, devfn);
  30.491 +    return reassign_device(dom0, d, bus, devfn);
  30.492 +}
  30.493 +
  30.494 +static void release_domain_devices(struct domain *d)
  30.495 +{
  30.496 +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
  30.497 +    struct pci_dev *pdev;
  30.498 +
  30.499 +    while ( !list_empty(&hd->pdev_list) )
  30.500 +    {
  30.501 +        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
  30.502 +        pdev_flr(pdev->bus, pdev->devfn);
  30.503 +        gdprintk(XENLOG_INFO ,
  30.504 +                 "AMD IOMMU: release devices %x:%x.%x\n",
  30.505 +                 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  30.506 +        reassign_device(d, dom0, pdev->bus, pdev->devfn);
  30.507 +    }
  30.508 +}
  30.509 +
  30.510 +static void deallocate_next_page_table(void *table, unsigned long index,
  30.511 +                                       int level)
  30.512 +{
  30.513 +    unsigned long next_index;
  30.514 +    void *next_table, *pde;
  30.515 +    int next_level;
  30.516 +
  30.517 +    pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
  30.518 +    next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
  30.519 +
  30.520 +    if ( next_table )
  30.521 +    {
  30.522 +        next_level = level - 1;
  30.523 +        if ( next_level > 1 )
  30.524 +        {
  30.525 +            next_index = 0;
  30.526 +            do
  30.527 +            {
  30.528 +                deallocate_next_page_table(next_table,
  30.529 +                                           next_index, next_level);
  30.530 +                ++next_index;
  30.531 +            } while (next_index < PTE_PER_TABLE_SIZE);
  30.532 +        }
  30.533 +
  30.534 +        free_xenheap_page(next_table);
  30.535 +    }
  30.536 +}
  30.537 +
  30.538 +static void deallocate_iommu_page_tables(struct domain *d)
  30.539 +{
  30.540 +    unsigned long index;
  30.541 +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
  30.542 +
  30.543 +    if ( hd ->root_table )
  30.544 +    {
  30.545 +        index = 0;
  30.546 +        do
  30.547 +        {
  30.548 +            deallocate_next_page_table(hd->root_table,
  30.549 +                                       index, hd->paging_mode);
  30.550 +            ++index;
  30.551 +        } while ( index < PTE_PER_TABLE_SIZE );
  30.552 +
  30.553 +        free_xenheap_page(hd ->root_table);
  30.554 +    }
  30.555 +
  30.556 +    hd ->root_table = NULL;
  30.557 +}
  30.558 +
  30.559 +void amd_iommu_domain_destroy(struct domain *d)
  30.560 +{
  30.561 +    if ( !amd_iommu_enabled )
  30.562 +        return;
  30.563 +
  30.564 +    deallocate_iommu_page_tables(d);
  30.565 +    release_domain_devices(d);
  30.566 +}
  30.567 +
  30.568 +void amd_iommu_return_device(struct domain *s, struct domain *t, u8 bus, u8 devfn)
  30.569 +{
  30.570 +    pdev_flr(bus, devfn);
  30.571 +    reassign_device(s, t, bus, devfn);
  30.572 +}
  30.573 +
  30.574 +struct iommu_ops amd_iommu_ops = {
  30.575 +    .init = amd_iommu_domain_init,
  30.576 +    .assign_device  = amd_iommu_assign_device,
  30.577 +    .teardown = amd_iommu_domain_destroy,
  30.578 +    .map_page = amd_iommu_map_page,
  30.579 +    .unmap_page = amd_iommu_unmap_page,
  30.580 +    .reassign_device = amd_iommu_return_device,
  30.581 +};
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xen/drivers/passthrough/pci-direct.h	Thu Feb 21 15:06:37 2008 +0000
    31.3 @@ -0,0 +1,48 @@
    31.4 +#ifndef ASM_PCI_DIRECT_H
    31.5 +#define ASM_PCI_DIRECT_H 1
    31.6 +
    31.7 +#include <xen/types.h>
    31.8 +#include <asm/io.h>
    31.9 +
   31.10 +/* Direct PCI access. This is used for PCI accesses in early boot before
   31.11 +   the PCI subsystem works. */
   31.12 +
   31.13 +#define PDprintk(x...)
   31.14 +
   31.15 +static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
   31.16 +{
   31.17 +    u32 v;
   31.18 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   31.19 +    v = inl(0xcfc);
   31.20 +    if (v != 0xffffffff)
   31.21 +        PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
   31.22 +    return v;
   31.23 +}
   31.24 +
   31.25 +static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
   31.26 +{
   31.27 +    u8 v;
   31.28 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   31.29 +    v = inb(0xcfc + (offset&3));
   31.30 +    PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
   31.31 +    return v;
   31.32 +}
   31.33 +
   31.34 +static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
   31.35 +{
   31.36 +    u16 v;
   31.37 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   31.38 +    v = inw(0xcfc + (offset&2));
   31.39 +    PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
   31.40 +    return v;
   31.41 +}
   31.42 +
   31.43 +static inline void write_pci_config(
   31.44 +    u8 bus, u8 slot, u8 func, u8 offset, u32 val)
   31.45 +{
   31.46 +    PDprintk("%x writing to %x: %x\n", slot, offset, val);
   31.47 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
   31.48 +    outl(val, 0xcfc);
   31.49 +}
   31.50 +
   31.51 +#endif
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/xen/drivers/passthrough/pci_regs.h	Thu Feb 21 15:06:37 2008 +0000
    32.3 @@ -0,0 +1,530 @@
    32.4 +/*
    32.5 + *	pci_regs.h
    32.6 + *
    32.7 + *	PCI standard defines
    32.8 + *	Copyright 1994, Drew Eckhardt
    32.9 + *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
   32.10 + *
   32.11 + *	For more information, please consult the following manuals (look at
   32.12 + *	http://www.pcisig.com/ for how to get them):
   32.13 + *
   32.14 + *	PCI BIOS Specification
   32.15 + *	PCI Local Bus Specification
   32.16 + *	PCI to PCI Bridge Specification
   32.17 + *	PCI System Design Guide
   32.18 + *
   32.19 + * 	For hypertransport information, please consult the following manuals
   32.20 + * 	from http://www.hypertransport.org
   32.21 + *
   32.22 + *	The Hypertransport I/O Link Specification
   32.23 + */
   32.24 +
   32.25 +#ifndef LINUX_PCI_REGS_H
   32.26 +#define LINUX_PCI_REGS_H
   32.27 +
   32.28 +/*
   32.29 + * Under PCI, each device has 256 bytes of configuration address space,
   32.30 + * of which the first 64 bytes are standardized as follows:
   32.31 + */
   32.32 +#define PCI_VENDOR_ID		0x00	/* 16 bits */
   32.33 +#define PCI_DEVICE_ID		0x02	/* 16 bits */
   32.34 +#define PCI_COMMAND		0x04	/* 16 bits */
   32.35 +#define  PCI_COMMAND_IO		0x1	/* Enable response in I/O space */
   32.36 +#define  PCI_COMMAND_MEMORY	0x2	/* Enable response in Memory space */
   32.37 +#define  PCI_COMMAND_MASTER	0x4	/* Enable bus mastering */
   32.38 +#define  PCI_COMMAND_SPECIAL	0x8	/* Enable response to special cycles */
   32.39 +#define  PCI_COMMAND_INVALIDATE	0x10	/* Use memory write and invalidate */
   32.40 +#define  PCI_COMMAND_VGA_PALETTE 0x20	/* Enable palette snooping */
   32.41 +#define  PCI_COMMAND_PARITY	0x40	/* Enable parity checking */
   32.42 +#define  PCI_COMMAND_WAIT 	0x80	/* Enable address/data stepping */
   32.43 +#define  PCI_COMMAND_SERR	0x100	/* Enable SERR */
   32.44 +#define  PCI_COMMAND_FAST_BACK	0x200	/* Enable back-to-back writes */
   32.45 +#define  PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
   32.46 +
   32.47 +#define PCI_STATUS		0x06	/* 16 bits */
   32.48 +#define  PCI_STATUS_CAP_LIST	0x10	/* Support Capability List */
   32.49 +#define  PCI_STATUS_66MHZ	0x20	/* Support 66 Mhz PCI 2.1 bus */
   32.50 +#define  PCI_STATUS_UDF		0x40	/* Support User Definable Features [obsolete] */
   32.51 +#define  PCI_STATUS_FAST_BACK	0x80	/* Accept fast-back to back */
   32.52 +#define  PCI_STATUS_PARITY	0x100	/* Detected parity error */
   32.53 +#define  PCI_STATUS_DEVSEL_MASK	0x600	/* DEVSEL timing */
   32.54 +#define  PCI_STATUS_DEVSEL_FAST		0x000
   32.55 +#define  PCI_STATUS_DEVSEL_MEDIUM	0x200
   32.56 +#define  PCI_STATUS_DEVSEL_SLOW		0x400
   32.57 +#define  PCI_STATUS_SIG_TARGET_ABORT	0x800 /* Set on target abort */
   32.58 +#define  PCI_STATUS_REC_TARGET_ABORT	0x1000 /* Master ack of " */
   32.59 +#define  PCI_STATUS_REC_MASTER_ABORT	0x2000 /* Set on master abort */
   32.60 +#define  PCI_STATUS_SIG_SYSTEM_ERROR	0x4000 /* Set when we drive SERR */
   32.61 +#define  PCI_STATUS_DETECTED_PARITY	0x8000 /* Set on parity error */
   32.62 +
   32.63 +#define PCI_CLASS_REVISION	0x08	/* High 24 bits are class, low 8 revision */
   32.64 +#define PCI_REVISION_ID		0x08	/* Revision ID */
   32.65 +#define PCI_CLASS_PROG		0x09	/* Reg. Level Programming Interface */
   32.66 +#define PCI_CLASS_DEVICE	0x0a	/* Device class */
   32.67 +
   32.68 +#define PCI_CACHE_LINE_SIZE	0x0c	/* 8 bits */
   32.69 +#define PCI_LATENCY_TIMER	0x0d	/* 8 bits */
   32.70 +#define PCI_HEADER_TYPE		0x0e	/* 8 bits */
   32.71 +#define  PCI_HEADER_TYPE_NORMAL		0
   32.72 +#define  PCI_HEADER_TYPE_BRIDGE		1
   32.73 +#define  PCI_HEADER_TYPE_CARDBUS	2
   32.74 +
   32.75 +#define PCI_BIST		0x0f	/* 8 bits */
   32.76 +#define  PCI_BIST_CODE_MASK	0x0f	/* Return result */
   32.77 +#define  PCI_BIST_START		0x40	/* 1 to start BIST, 2 secs or less */
   32.78 +#define  PCI_BIST_CAPABLE	0x80	/* 1 if BIST capable */
   32.79 +
   32.80 +/*
   32.81 + * Base addresses specify locations in memory or I/O space.
   32.82 + * Decoded size can be determined by writing a value of
   32.83 + * 0xffffffff to the register, and reading it back.  Only
   32.84 + * 1 bits are decoded.
   32.85 + */
   32.86 +#define PCI_BASE_ADDRESS_0	0x10	/* 32 bits */
   32.87 +#define PCI_BASE_ADDRESS_1	0x14	/* 32 bits [htype 0,1 only] */
   32.88 +#define PCI_BASE_ADDRESS_2	0x18	/* 32 bits [htype 0 only] */
   32.89 +#define PCI_BASE_ADDRESS_3	0x1c	/* 32 bits */
   32.90 +#define PCI_BASE_ADDRESS_4	0x20	/* 32 bits */
   32.91 +#define PCI_BASE_ADDRESS_5	0x24	/* 32 bits */
   32.92 +#define  PCI_BASE_ADDRESS_SPACE		0x01	/* 0 = memory, 1 = I/O */
   32.93 +#define  PCI_BASE_ADDRESS_SPACE_IO	0x01
   32.94 +#define  PCI_BASE_ADDRESS_SPACE_MEMORY	0x00
   32.95 +#define  PCI_BASE_ADDRESS_MEM_TYPE_MASK	0x06
   32.96 +#define  PCI_BASE_ADDRESS_MEM_TYPE_32	0x00	/* 32 bit address */
   32.97 +#define  PCI_BASE_ADDRESS_MEM_TYPE_1M	0x02	/* Below 1M [obsolete] */
   32.98 +#define  PCI_BASE_ADDRESS_MEM_TYPE_64	0x04	/* 64 bit address */
   32.99 +#define  PCI_BASE_ADDRESS_MEM_PREFETCH	0x08	/* prefetchable? */
  32.100 +#define  PCI_BASE_ADDRESS_MEM_MASK	(~0x0fUL)
  32.101 +#define  PCI_BASE_ADDRESS_IO_MASK	(~0x03UL)
  32.102 +/* bit 1 is reserved if address_space = 1 */
  32.103 +
  32.104 +/* Header type 0 (normal devices) */
  32.105 +#define PCI_CARDBUS_CIS		0x28
  32.106 +#define PCI_SUBSYSTEM_VENDOR_ID	0x2c
  32.107 +#define PCI_SUBSYSTEM_ID	0x2e
  32.108 +#define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
  32.109 +#define  PCI_ROM_ADDRESS_ENABLE	0x01
  32.110 +#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
  32.111 +
  32.112 +#define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
  32.113 +
  32.114 +/* 0x35-0x3b are reserved */
  32.115 +#define PCI_INTERRUPT_LINE	0x3c	/* 8 bits */
  32.116 +#define PCI_INTERRUPT_PIN	0x3d	/* 8 bits */
  32.117 +#define PCI_MIN_GNT		0x3e	/* 8 bits */
  32.118 +#define PCI_MAX_LAT		0x3f	/* 8 bits */
  32.119 +
  32.120 +/* Header type 1 (PCI-to-PCI bridges) */
  32.121 +#define PCI_PRIMARY_BUS		0x18	/* Primary bus number */
  32.122 +#define PCI_SECONDARY_BUS	0x19	/* Secondary bus number */
  32.123 +#define PCI_SUBORDINATE_BUS	0x1a	/* Highest bus number behind the bridge */
  32.124 +#define PCI_SEC_LATENCY_TIMER	0x1b	/* Latency timer for secondary interface */
  32.125 +#define PCI_IO_BASE		0x1c	/* I/O range behind the bridge */
  32.126 +#define PCI_IO_LIMIT		0x1d
  32.127 +#define  PCI_IO_RANGE_TYPE_MASK	0x0fUL	/* I/O bridging type */
  32.128 +#define  PCI_IO_RANGE_TYPE_16	0x00
  32.129 +#define  PCI_IO_RANGE_TYPE_32	0x01
  32.130 +#define  PCI_IO_RANGE_MASK	(~0x0fUL)
  32.131 +#define PCI_SEC_STATUS		0x1e	/* Secondary status register, only bit 14 used */
  32.132 +#define PCI_MEMORY_BASE		0x20	/* Memory range behind */
  32.133 +#define PCI_MEMORY_LIMIT	0x22
  32.134 +#define  PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
  32.135 +#define  PCI_MEMORY_RANGE_MASK	(~0x0fUL)
  32.136 +#define PCI_PREF_MEMORY_BASE	0x24	/* Prefetchable memory range behind */
  32.137 +#define PCI_PREF_MEMORY_LIMIT	0x26
  32.138 +#define  PCI_PREF_RANGE_TYPE_MASK 0x0fUL
  32.139 +#define  PCI_PREF_RANGE_TYPE_32	0x00
  32.140 +#define  PCI_PREF_RANGE_TYPE_64	0x01
  32.141 +#define  PCI_PREF_RANGE_MASK	(~0x0fUL)
  32.142 +#define PCI_PREF_BASE_UPPER32	0x28	/* Upper half of prefetchable memory range */
  32.143 +#define PCI_PREF_LIMIT_UPPER32	0x2c
  32.144 +#define PCI_IO_BASE_UPPER16	0x30	/* Upper half of I/O addresses */
  32.145 +#define PCI_IO_LIMIT_UPPER16	0x32
  32.146 +/* 0x34 same as for htype 0 */
  32.147 +/* 0x35-0x3b is reserved */
  32.148 +#define PCI_ROM_ADDRESS1	0x38	/* Same as PCI_ROM_ADDRESS, but for htype 1 */
  32.149 +/* 0x3c-0x3d are same as for htype 0 */
  32.150 +#define PCI_BRIDGE_CONTROL	0x3e
  32.151 +#define  PCI_BRIDGE_CTL_PARITY	0x01	/* Enable parity detection on secondary interface */
  32.152 +#define  PCI_BRIDGE_CTL_SERR	0x02	/* The same for SERR forwarding */
  32.153 +#define  PCI_BRIDGE_CTL_ISA	0x04	/* Enable ISA mode */
  32.154 +#define  PCI_BRIDGE_CTL_VGA	0x08	/* Forward VGA addresses */
  32.155 +#define  PCI_BRIDGE_CTL_MASTER_ABORT	0x20  /* Report master aborts */
  32.156 +#define  PCI_BRIDGE_CTL_BUS_RESET	0x40	/* Secondary bus reset */
  32.157 +#define  PCI_BRIDGE_CTL_FAST_BACK	0x80	/* Fast Back2Back enabled on secondary interface */
  32.158 +
  32.159 +/* Header type 2 (CardBus bridges) */
  32.160 +#define PCI_CB_CAPABILITY_LIST	0x14
  32.161 +/* 0x15 reserved */
  32.162 +#define PCI_CB_SEC_STATUS	0x16	/* Secondary status */
  32.163 +#define PCI_CB_PRIMARY_BUS	0x18	/* PCI bus number */
  32.164 +#define PCI_CB_CARD_BUS		0x19	/* CardBus bus number */
  32.165 +#define PCI_CB_SUBORDINATE_BUS	0x1a	/* Subordinate bus number */
  32.166 +#define PCI_CB_LATENCY_TIMER	0x1b	/* CardBus latency timer */
  32.167 +#define PCI_CB_MEMORY_BASE_0	0x1c
  32.168 +#define PCI_CB_MEMORY_LIMIT_0	0x20
  32.169 +#define PCI_CB_MEMORY_BASE_1	0x24
  32.170 +#define PCI_CB_MEMORY_LIMIT_1	0x28
  32.171 +#define PCI_CB_IO_BASE_0	0x2c
  32.172 +#define PCI_CB_IO_BASE_0_HI	0x2e
  32.173 +#define PCI_CB_IO_LIMIT_0	0x30
  32.174 +#define PCI_CB_IO_LIMIT_0_HI	0x32
  32.175 +#define PCI_CB_IO_BASE_1	0x34
  32.176 +#define PCI_CB_IO_BASE_1_HI	0x36
  32.177 +#define PCI_CB_IO_LIMIT_1	0x38
  32.178 +#define PCI_CB_IO_LIMIT_1_HI	0x3a
  32.179 +#define  PCI_CB_IO_RANGE_MASK	(~0x03UL)
  32.180 +/* 0x3c-0x3d are same as for htype 0 */
  32.181 +#define PCI_CB_BRIDGE_CONTROL	0x3e
  32.182 +#define  PCI_CB_BRIDGE_CTL_PARITY	0x01	/* Similar to standard bridge control register */
  32.183 +#define  PCI_CB_BRIDGE_CTL_SERR		0x02
  32.184 +#define  PCI_CB_BRIDGE_CTL_ISA		0x04
  32.185 +#define  PCI_CB_BRIDGE_CTL_VGA		0x08
  32.186 +#define  PCI_CB_BRIDGE_CTL_MASTER_ABORT	0x20
  32.187 +#define  PCI_CB_BRIDGE_CTL_CB_RESET	0x40	/* CardBus reset */
  32.188 +#define  PCI_CB_BRIDGE_CTL_16BIT_INT	0x80	/* Enable interrupt for 16-bit cards */
  32.189 +#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100	/* Prefetch enable for both memory regions */
  32.190 +#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
  32.191 +#define  PCI_CB_BRIDGE_CTL_POST_WRITES	0x400
  32.192 +#define PCI_CB_SUBSYSTEM_VENDOR_ID	0x40
  32.193 +#define PCI_CB_SUBSYSTEM_ID		0x42
  32.194 +#define PCI_CB_LEGACY_MODE_BASE		0x44	/* 16-bit PC Card legacy mode base address (ExCa) */
  32.195 +/* 0x48-0x7f reserved */
  32.196 +
  32.197 +/* Capability lists */
  32.198 +
  32.199 +#define PCI_CAP_LIST_ID		0	/* Capability ID */
  32.200 +#define  PCI_CAP_ID_PM		0x01	/* Power Management */
  32.201 +#define  PCI_CAP_ID_AGP		0x02	/* Accelerated Graphics Port */
  32.202 +#define  PCI_CAP_ID_VPD		0x03	/* Vital Product Data */
  32.203 +#define  PCI_CAP_ID_SLOTID	0x04	/* Slot Identification */
  32.204 +#define  PCI_CAP_ID_MSI		0x05	/* Message Signalled Interrupts */
  32.205 +#define  PCI_CAP_ID_CHSWP	0x06	/* CompactPCI HotSwap */
  32.206 +#define  PCI_CAP_ID_PCIX	0x07	/* PCI-X */
  32.207 +#define  PCI_CAP_ID_HT		0x08	/* HyperTransport */
  32.208 +#define  PCI_CAP_ID_VNDR	0x09	/* Vendor specific */
  32.209 +#define  PCI_CAP_ID_DBG		0x0A	/* Debug port */
  32.210 +#define  PCI_CAP_ID_CCRC	0x0B	/* CompactPCI Central Resource Control */
  32.211 +#define  PCI_CAP_ID_SHPC 	0x0C	/* PCI Standard Hot-Plug Controller */
  32.212 +#define  PCI_CAP_ID_SSVID	0x0D	/* Bridge subsystem vendor/device ID */
  32.213 +#define  PCI_CAP_ID_AGP3	0x0E	/* AGP Target PCI-PCI bridge */
  32.214 +#define  PCI_CAP_ID_EXP 	0x10	/* PCI Express */
  32.215 +#define  PCI_CAP_ID_MSIX	0x11	/* MSI-X */
  32.216 +#define PCI_CAP_LIST_NEXT	1	/* Next capability in the list */
  32.217 +#define PCI_CAP_FLAGS		2	/* Capability defined flags (16 bits) */
  32.218 +#define PCI_CAP_SIZEOF		4
  32.219 +
  32.220 +/* Power Management Registers */
  32.221 +
  32.222 +#define PCI_PM_PMC		2	/* PM Capabilities Register */
  32.223 +#define  PCI_PM_CAP_VER_MASK	0x0007	/* Version */
  32.224 +#define  PCI_PM_CAP_PME_CLOCK	0x0008	/* PME clock required */
  32.225 +#define  PCI_PM_CAP_RESERVED    0x0010  /* Reserved field */
  32.226 +#define  PCI_PM_CAP_DSI		0x0020	/* Device specific initialization */
  32.227 +#define  PCI_PM_CAP_AUX_POWER	0x01C0	/* Auxilliary power support mask */
  32.228 +#define  PCI_PM_CAP_D1		0x0200	/* D1 power state support */
  32.229 +#define  PCI_PM_CAP_D2		0x0400	/* D2 power state support */
  32.230 +#define  PCI_PM_CAP_PME		0x0800	/* PME pin supported */
  32.231 +#define  PCI_PM_CAP_PME_MASK	0xF800	/* PME Mask of all supported states */
  32.232 +#define  PCI_PM_CAP_PME_D0	0x0800	/* PME# from D0 */
  32.233 +#define  PCI_PM_CAP_PME_D1	0x1000	/* PME# from D1 */
  32.234 +#define  PCI_PM_CAP_PME_D2	0x2000	/* PME# from D2 */
  32.235 +#define  PCI_PM_CAP_PME_D3	0x4000	/* PME# from D3 (hot) */
  32.236 +#define  PCI_PM_CAP_PME_D3cold	0x8000	/* PME# from D3 (cold) */
  32.237 +#define PCI_PM_CTRL		4	/* PM control and status register */
  32.238 +#define  PCI_PM_CTRL_STATE_MASK	0x0003	/* Current power state (D0 to D3) */
  32.239 +#define  PCI_PM_CTRL_NO_SOFT_RESET	0x0004	/* No reset for D3hot->D0 */
  32.240 +#define  PCI_PM_CTRL_PME_ENABLE	0x0100	/* PME pin enable */
  32.241 +#define  PCI_PM_CTRL_DATA_SEL_MASK	0x1e00	/* Data select (??) */
  32.242 +#define  PCI_PM_CTRL_DATA_SCALE_MASK	0x6000	/* Data scale (??) */
  32.243 +#define  PCI_PM_CTRL_PME_STATUS	0x8000	/* PME pin status */
  32.244 +#define PCI_PM_PPB_EXTENSIONS	6	/* PPB support extensions (??) */
  32.245 +#define  PCI_PM_PPB_B2_B3	0x40	/* Stop clock when in D3hot (??) */
  32.246 +#define  PCI_PM_BPCC_ENABLE	0x80	/* Bus power/clock control enable (??) */
  32.247 +#define PCI_PM_DATA_REGISTER	7	/* (??) */
  32.248 +#define PCI_PM_SIZEOF		8
  32.249 +
  32.250 +/* AGP registers */
  32.251 +
  32.252 +#define PCI_AGP_VERSION		2	/* BCD version number */
  32.253 +#define PCI_AGP_RFU		3	/* Rest of capability flags */
  32.254 +#define PCI_AGP_STATUS		4	/* Status register */
  32.255 +#define  PCI_AGP_STATUS_RQ_MASK	0xff000000	/* Maximum number of requests - 1 */
  32.256 +#define  PCI_AGP_STATUS_SBA	0x0200	/* Sideband addressing supported */
  32.257 +#define  PCI_AGP_STATUS_64BIT	0x0020	/* 64-bit addressing supported */
  32.258 +#define  PCI_AGP_STATUS_FW	0x0010	/* FW transfers supported */
  32.259 +#define  PCI_AGP_STATUS_RATE4	0x0004	/* 4x transfer rate supported */
  32.260 +#define  PCI_AGP_STATUS_RATE2	0x0002	/* 2x transfer rate supported */
  32.261 +#define  PCI_AGP_STATUS_RATE1	0x0001	/* 1x transfer rate supported */
  32.262 +#define PCI_AGP_COMMAND		8	/* Control register */
  32.263 +#define  PCI_AGP_COMMAND_RQ_MASK 0xff000000  /* Master: Maximum number of requests */
  32.264 +#define  PCI_AGP_COMMAND_SBA	0x0200	/* Sideband addressing enabled */
  32.265 +#define  PCI_AGP_COMMAND_AGP	0x0100	/* Allow processing of AGP transactions */
  32.266 +#define  PCI_AGP_COMMAND_64BIT	0x0020 	/* Allow processing of 64-bit addresses */
  32.267 +#define  PCI_AGP_COMMAND_FW	0x0010 	/* Force FW transfers */
  32.268 +#define  PCI_AGP_COMMAND_RATE4	0x0004	/* Use 4x rate */
  32.269 +#define  PCI_AGP_COMMAND_RATE2	0x0002	/* Use 2x rate */
  32.270 +#define  PCI_AGP_COMMAND_RATE1	0x0001	/* Use 1x rate */
  32.271 +#define PCI_AGP_SIZEOF		12
  32.272 +
  32.273 +/* Vital Product Data */
  32.274 +
  32.275 +#define PCI_VPD_ADDR		2	/* Address to access (15 bits!) */
  32.276 +#define  PCI_VPD_ADDR_MASK	0x7fff	/* Address mask */
  32.277 +#define  PCI_VPD_ADDR_F		0x8000	/* Write 0, 1 indicates completion */
  32.278 +#define PCI_VPD_DATA		4	/* 32-bits of data returned here */
  32.279 +
  32.280 +/* Slot Identification */
  32.281 +
  32.282 +#define PCI_SID_ESR		2	/* Expansion Slot Register */
  32.283 +#define  PCI_SID_ESR_NSLOTS	0x1f	/* Number of expansion slots available */
  32.284 +#define  PCI_SID_ESR_FIC	0x20	/* First In Chassis Flag */
  32.285 +#define PCI_SID_CHASSIS_NR	3	/* Chassis Number */
  32.286 +
  32.287 +/* Message Signalled Interrupts registers */
  32.288 +
  32.289 +#define PCI_MSI_FLAGS		2	/* Various flags */
  32.290 +#define  PCI_MSI_FLAGS_64BIT	0x80	/* 64-bit addresses allowed */
  32.291 +#define  PCI_MSI_FLAGS_QSIZE	0x70	/* Message queue size configured */
  32.292 +#define  PCI_MSI_FLAGS_QMASK	0x0e	/* Maximum queue size available */
  32.293 +#define  PCI_MSI_FLAGS_ENABLE	0x01	/* MSI feature enabled */
  32.294 +#define  PCI_MSI_FLAGS_MASKBIT	0x100	/* 64-bit mask bits allowed */
  32.295 +#define PCI_MSI_RFU		3	/* Rest of capability flags */
  32.296 +#define PCI_MSI_ADDRESS_LO	4	/* Lower 32 bits */
  32.297 +#define PCI_MSI_ADDRESS_HI	8	/* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
  32.298 +#define PCI_MSI_DATA_32		8	/* 16 bits of data for 32-bit devices */
  32.299 +#define PCI_MSI_DATA_64		12	/* 16 bits of data for 64-bit devices */
  32.300 +#define PCI_MSI_MASK_BIT	16	/* Mask bits register */
  32.301 +
  32.302 +/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
  32.303 +#define PCI_MSIX_FLAGS		2
  32.304 +#define  PCI_MSIX_FLAGS_QSIZE	0x7FF
  32.305 +#define  PCI_MSIX_FLAGS_ENABLE	(1 << 15)
  32.306 +#define  PCI_MSIX_FLAGS_MASKALL	(1 << 14)
  32.307 +#define PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
  32.308 +#define PCI_MSIX_FLAGS_BITMASK	(1 << 0)
  32.309 +
  32.310 +/* CompactPCI Hotswap Register */
  32.311 +
  32.312 +#define PCI_CHSWP_CSR		2	/* Control and Status Register */
  32.313 +#define  PCI_CHSWP_DHA		0x01	/* Device Hiding Arm */
  32.314 +#define  PCI_CHSWP_EIM		0x02	/* ENUM# Signal Mask */
  32.315 +#define  PCI_CHSWP_PIE		0x04	/* Pending Insert or Extract */
  32.316 +#define  PCI_CHSWP_LOO		0x08	/* LED On / Off */
  32.317 +#define  PCI_CHSWP_PI		0x30	/* Programming Interface */
  32.318 +#define  PCI_CHSWP_EXT		0x40	/* ENUM# status - extraction */
  32.319 +#define  PCI_CHSWP_INS		0x80	/* ENUM# status - insertion */
  32.320 +
  32.321 +/* PCI-X registers */
  32.322 +
  32.323 +#define PCI_X_CMD		2	/* Modes & Features */
  32.324 +#define  PCI_X_CMD_DPERR_E	0x0001	/* Data Parity Error Recovery Enable */
  32.325 +#define  PCI_X_CMD_ERO		0x0002	/* Enable Relaxed Ordering */
  32.326 +#define  PCI_X_CMD_READ_512	0x0000	/* 512 byte maximum read byte count */
  32.327 +#define  PCI_X_CMD_READ_1K	0x0004	/* 1Kbyte maximum read byte count */
  32.328 +#define  PCI_X_CMD_READ_2K	0x0008	/* 2Kbyte maximum read byte count */
  32.329 +#define  PCI_X_CMD_READ_4K	0x000c	/* 4Kbyte maximum read byte count */
  32.330 +#define  PCI_X_CMD_MAX_READ	0x000c	/* Max Memory Read Byte Count */
  32.331 +				/* Max # of outstanding split transactions */
  32.332 +#define  PCI_X_CMD_SPLIT_1	0x0000	/* Max 1 */
  32.333 +#define  PCI_X_CMD_SPLIT_2	0x0010	/* Max 2 */
  32.334 +#define  PCI_X_CMD_SPLIT_3	0x0020	/* Max 3 */
  32.335 +#define  PCI_X_CMD_SPLIT_4	0x0030	/* Max 4 */
  32.336 +#define  PCI_X_CMD_SPLIT_8	0x0040	/* Max 8 */
  32.337 +#define  PCI_X_CMD_SPLIT_12	0x0050	/* Max 12 */
  32.338 +#define  PCI_X_CMD_SPLIT_16	0x0060	/* Max 16 */
  32.339 +#define  PCI_X_CMD_SPLIT_32	0x0070	/* Max 32 */
  32.340 +#define  PCI_X_CMD_MAX_SPLIT	0x0070	/* Max Outstanding Split Transactions */
  32.341 +#define  PCI_X_CMD_VERSION(x) 	(((x) >> 12) & 3) /* Version */
  32.342 +#define PCI_X_STATUS		4	/* PCI-X capabilities */
  32.343 +#define  PCI_X_STATUS_DEVFN	0x000000ff	/* A copy of devfn */
  32.344 +#define  PCI_X_STATUS_BUS	0x0000ff00	/* A copy of bus nr */
  32.345 +#define  PCI_X_STATUS_64BIT	0x00010000	/* 64-bit device */
  32.346 +#define  PCI_X_STATUS_133MHZ	0x00020000	/* 133 MHz capable */
  32.347 +#define  PCI_X_STATUS_SPL_DISC	0x00040000	/* Split Completion Discarded */
  32.348 +#define  PCI_X_STATUS_UNX_SPL	0x00080000	/* Unexpected Split Completion */
  32.349 +#define  PCI_X_STATUS_COMPLEX	0x00100000	/* Device Complexity */
  32.350 +#define  PCI_X_STATUS_MAX_READ	0x00600000	/* Designed Max Memory Read Count */
  32.351 +#define  PCI_X_STATUS_MAX_SPLIT	0x03800000	/* Designed Max Outstanding Split Transactions */
  32.352 +#define  PCI_X_STATUS_MAX_CUM	0x1c000000	/* Designed Max Cumulative Read Size */
  32.353 +#define  PCI_X_STATUS_SPL_ERR	0x20000000	/* Rcvd Split Completion Error Msg */
  32.354 +#define  PCI_X_STATUS_266MHZ	0x40000000	/* 266 MHz capable */
  32.355 +#define  PCI_X_STATUS_533MHZ	0x80000000	/* 533 MHz capable */
  32.356 +
  32.357 +/* PCI Express capability registers */
  32.358 +
  32.359 +#define PCI_EXP_FLAGS		2	/* Capabilities register */
  32.360 +#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
  32.361 +#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
  32.362 +#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
  32.363 +#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
  32.364 +#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
  32.365 +#define  PCI_EXP_TYPE_UPSTREAM	0x5	/* Upstream Port */
  32.366 +#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
  32.367 +#define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
  32.368 +#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
  32.369 +#define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
  32.370 +#define PCI_EXP_DEVCAP		4	/* Device capabilities */
  32.371 +#define  PCI_EXP_DEVCAP_PAYLOAD	0x07	/* Max_Payload_Size */
  32.372 +#define  PCI_EXP_DEVCAP_PHANTOM	0x18	/* Phantom functions */
  32.373 +#define  PCI_EXP_DEVCAP_EXT_TAG	0x20	/* Extended tags */
  32.374 +#define  PCI_EXP_DEVCAP_L0S	0x1c0	/* L0s Acceptable Latency */
  32.375 +#define  PCI_EXP_DEVCAP_L1	0xe00	/* L1 Acceptable Latency */
  32.376 +#define  PCI_EXP_DEVCAP_ATN_BUT	0x1000	/* Attention Button Present */
  32.377 +#define  PCI_EXP_DEVCAP_ATN_IND	0x2000	/* Attention Indicator Present */
  32.378 +#define  PCI_EXP_DEVCAP_PWR_IND	0x4000	/* Power Indicator Present */
  32.379 +#define  PCI_EXP_DEVCAP_PWR_VAL	0x3fc0000 /* Slot Power Limit Value */
  32.380 +#define  PCI_EXP_DEVCAP_PWR_SCL	0xc000000 /* Slot Power Limit Scale */
  32.381 +#define PCI_EXP_DEVCTL		8	/* Device Control */
  32.382 +#define  PCI_EXP_DEVCTL_CERE	0x0001	/* Correctable Error Reporting En. */
  32.383 +#define  PCI_EXP_DEVCTL_NFERE	0x0002	/* Non-Fatal Error Reporting Enable */
  32.384 +#define  PCI_EXP_DEVCTL_FERE	0x0004	/* Fatal Error Reporting Enable */
  32.385 +#define  PCI_EXP_DEVCTL_URRE	0x0008	/* Unsupported Request Reporting En. */
  32.386 +#define  PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
  32.387 +#define  PCI_EXP_DEVCTL_PAYLOAD	0x00e0	/* Max_Payload_Size */
  32.388 +#define  PCI_EXP_DEVCTL_EXT_TAG	0x0100	/* Extended Tag Field Enable */
  32.389 +#define  PCI_EXP_DEVCTL_PHANTOM	0x0200	/* Phantom Functions Enable */
  32.390 +#define  PCI_EXP_DEVCTL_AUX_PME	0x0400	/* Auxiliary Power PM Enable */
  32.391 +#define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
  32.392 +#define  PCI_EXP_DEVCTL_READRQ	0x7000	/* Max_Read_Request_Size */
  32.393 +#define PCI_EXP_DEVSTA		10	/* Device Status */
  32.394 +#define  PCI_EXP_DEVSTA_CED	0x01	/* Correctable Error Detected */
  32.395 +#define  PCI_EXP_DEVSTA_NFED	0x02	/* Non-Fatal Error Detected */
  32.396 +#define  PCI_EXP_DEVSTA_FED	0x04	/* Fatal Error Detected */
  32.397 +#define  PCI_EXP_DEVSTA_URD	0x08	/* Unsupported Request Detected */
  32.398 +#define  PCI_EXP_DEVSTA_AUXPD	0x10	/* AUX Power Detected */
  32.399 +#define  PCI_EXP_DEVSTA_TRPND	0x20	/* Transactions Pending */
  32.400 +#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
  32.401 +#define PCI_EXP_LNKCTL		16	/* Link Control */
  32.402 +#define  PCI_EXP_LNKCTL_CLKREQ_EN 0x100	/* Enable clkreq */
  32.403 +#define PCI_EXP_LNKSTA		18	/* Link Status */
  32.404 +#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
  32.405 +#define PCI_EXP_SLTCTL		24	/* Slot Control */
  32.406 +#define PCI_EXP_SLTSTA		26	/* Slot Status */
  32.407 +#define PCI_EXP_RTCTL		28	/* Root Control */
  32.408 +#define  PCI_EXP_RTCTL_SECEE	0x01	/* System Error on Correctable Error */
  32.409 +#define  PCI_EXP_RTCTL_SENFEE	0x02	/* System Error on Non-Fatal Error */
  32.410 +#define  PCI_EXP_RTCTL_SEFEE	0x04	/* System Error on Fatal Error */
  32.411 +#define  PCI_EXP_RTCTL_PMEIE	0x08	/* PME Interrupt Enable */
  32.412 +#define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
  32.413 +#define PCI_EXP_RTCAP		30	/* Root Capabilities */
  32.414 +#define PCI_EXP_RTSTA		32	/* Root Status */
  32.415 +
  32.416 +/* Extended Capabilities (PCI-X 2.0 and Express) */
  32.417 +#define PCI_EXT_CAP_ID(header)		(header & 0x0000ffff)
  32.418 +#define PCI_EXT_CAP_VER(header)		((header >> 16) & 0xf)
  32.419 +#define PCI_EXT_CAP_NEXT(header)	((header >> 20) & 0xffc)
  32.420 +
  32.421 +#define PCI_EXT_CAP_ID_ERR	1
  32.422 +#define PCI_EXT_CAP_ID_VC	2
  32.423 +#define PCI_EXT_CAP_ID_DSN	3
  32.424 +#define PCI_EXT_CAP_ID_PWR	4
  32.425 +
  32.426 +/* Advanced Error Reporting */
  32.427 +#define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
  32.428 +#define  PCI_ERR_UNC_TRAIN	0x00000001	/* Training */
  32.429 +#define  PCI_ERR_UNC_DLP	0x00000010	/* Data Link Protocol */
  32.430 +#define  PCI_ERR_UNC_POISON_TLP	0x00001000	/* Poisoned TLP */
  32.431 +#define  PCI_ERR_UNC_FCP	0x00002000	/* Flow Control Protocol */
  32.432 +#define  PCI_ERR_UNC_COMP_TIME	0x00004000	/* Completion Timeout */
  32.433 +#define  PCI_ERR_UNC_COMP_ABORT	0x00008000	/* Completer Abort */
  32.434 +#define  PCI_ERR_UNC_UNX_COMP	0x00010000	/* Unexpected Completion */
  32.435 +#define  PCI_ERR_UNC_RX_OVER	0x00020000	/* Receiver Overflow */
  32.436 +#define  PCI_ERR_UNC_MALF_TLP	0x00040000	/* Malformed TLP */
  32.437 +#define  PCI_ERR_UNC_ECRC	0x00080000	/* ECRC Error Status */
  32.438 +#define  PCI_ERR_UNC_UNSUP	0x00100000	/* Unsupported Request */
  32.439 +#define PCI_ERR_UNCOR_MASK	8	/* Uncorrectable Error Mask */
  32.440 +	/* Same bits as above */
  32.441 +#define PCI_ERR_UNCOR_SEVER	12	/* Uncorrectable Error Severity */
  32.442 +	/* Same bits as above */
  32.443 +#define PCI_ERR_COR_STATUS	16	/* Correctable Error Status */
  32.444 +#define  PCI_ERR_COR_RCVR	0x00000001	/* Receiver Error Status */
  32.445 +#define  PCI_ERR_COR_BAD_TLP	0x00000040	/* Bad TLP Status */
  32.446 +#define  PCI_ERR_COR_BAD_DLLP	0x00000080	/* Bad DLLP Status */
  32.447 +#define  PCI_ERR_COR_REP_ROLL	0x00000100	/* REPLAY_NUM Rollover */
  32.448 +#define  PCI_ERR_COR_REP_TIMER	0x00001000	/* Replay Timer Timeout */
  32.449 +#define PCI_ERR_COR_MASK	20	/* Correctable Error Mask */
  32.450 +	/* Same bits as above */
  32.451 +#define PCI_ERR_CAP		24	/* Advanced Error Capabilities */
  32.452 +#define  PCI_ERR_CAP_FEP(x)	((x) & 31)	/* First Error Pointer */
  32.453 +#define  PCI_ERR_CAP_ECRC_GENC	0x00000020	/* ECRC Generation Capable */
  32.454 +#define  PCI_ERR_CAP_ECRC_GENE	0x00000040	/* ECRC Generation Enable */
  32.455 +#define  PCI_ERR_CAP_ECRC_CHKC	0x00000080	/* ECRC Check Capable */
  32.456 +#define  PCI_ERR_CAP_ECRC_CHKE	0x00000100	/* ECRC Check Enable */
  32.457 +#define PCI_ERR_HEADER_LOG	28	/* Header Log Register (16 bytes) */
  32.458 +#define PCI_ERR_ROOT_COMMAND	44	/* Root Error Command */
  32.459 +/* Correctable Err Reporting Enable */
  32.460 +#define PCI_ERR_ROOT_CMD_COR_EN		0x00000001
  32.461 +/* Non-fatal Err Reporting Enable */
  32.462 +#define PCI_ERR_ROOT_CMD_NONFATAL_EN	0x00000002
  32.463 +/* Fatal Err Reporting Enable */
  32.464 +#define PCI_ERR_ROOT_CMD_FATAL_EN	0x00000004
  32.465 +#define PCI_ERR_ROOT_STATUS	48
  32.466 +#define PCI_ERR_ROOT_COR_RCV		0x00000001	/* ERR_COR Received */
  32.467 +/* Multi ERR_COR Received */
  32.468 +#define PCI_ERR_ROOT_MULTI_COR_RCV	0x00000002
  32.469 +/* ERR_FATAL/NONFATAL Recevied */
  32.470 +#define PCI_ERR_ROOT_UNCOR_RCV		0x00000004
  32.471 +/* Multi ERR_FATAL/NONFATAL Recevied */
  32.472 +#define PCI_ERR_ROOT_MULTI_UNCOR_RCV	0x00000008
  32.473 +#define PCI_ERR_ROOT_FIRST_FATAL	0x00000010	/* First Fatal */
  32.474 +#define PCI_ERR_ROOT_NONFATAL_RCV	0x00000020	/* Non-Fatal Received */
  32.475 +#define PCI_ERR_ROOT_FATAL_RCV		0x00000040	/* Fatal Received */
  32.476 +#define PCI_ERR_ROOT_COR_SRC	52
  32.477 +#define PCI_ERR_ROOT_SRC	54
  32.478 +
  32.479 +/* Virtual Channel */
  32.480 +#define PCI_VC_PORT_REG1	4
  32.481 +#define PCI_VC_PORT_REG2	8
  32.482 +#define PCI_VC_PORT_CTRL	12
  32.483 +#define PCI_VC_PORT_STATUS	14
  32.484 +#define PCI_VC_RES_CAP		16
  32.485 +#define PCI_VC_RES_CTRL		20
  32.486 +#define PCI_VC_RES_STATUS	26
  32.487 +
  32.488 +/* Power Budgeting */
  32.489 +#define PCI_PWR_DSR		4	/* Data Select Register */
  32.490 +#define PCI_PWR_DATA		8	/* Data Register */
  32.491 +#define  PCI_PWR_DATA_BASE(x)	((x) & 0xff)	    /* Base Power */
  32.492 +#define  PCI_PWR_DATA_SCALE(x)	(((x) >> 8) & 3)    /* Data Scale */
  32.493 +#define  PCI_PWR_DATA_PM_SUB(x)	(((x) >> 10) & 7)   /* PM Sub State */
  32.494 +#define  PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
  32.495 +#define  PCI_PWR_DATA_TYPE(x)	(((x) >> 15) & 7)   /* Type */
  32.496 +#define  PCI_PWR_DATA_RAIL(x)	(((x) >> 18) & 7)   /* Power Rail */
  32.497 +#define PCI_PWR_CAP		12	/* Capability */
  32.498 +#define  PCI_PWR_CAP_BUDGET(x)	((x) & 1)	/* Included in system budget */
  32.499 +
  32.500 +/*
  32.501 + * Hypertransport sub capability types
  32.502 + *
  32.503 + * Unfortunately there are both 3 bit and 5 bit capability types defined
  32.504 + * in the HT spec, catering for that is a little messy. You probably don't
  32.505 + * want to use these directly, just use pci_find_ht_capability() and it
  32.506 + * will do the right thing for you.
  32.507 + */
  32.508 +#define HT_3BIT_CAP_MASK	0xE0
  32.509 +#define HT_CAPTYPE_SLAVE	0x00	/* Slave/Primary link configuration */
  32.510 +#define HT_CAPTYPE_HOST		0x20	/* Host/Secondary link configuration */
  32.511 +
  32.512 +#define HT_5BIT_CAP_MASK	0xF8
  32.513 +#define HT_CAPTYPE_IRQ		0x80	/* IRQ Configuration */
  32.514 +#define HT_CAPTYPE_REMAPPING_40	0xA0	/* 40 bit address remapping */
  32.515 +#define HT_CAPTYPE_REMAPPING_64 0xA2	/* 64 bit address remapping */
  32.516 +#define HT_CAPTYPE_UNITID_CLUMP	0x90	/* Unit ID clumping */
  32.517 +#define HT_CAPTYPE_EXTCONF	0x98	/* Extended Configuration Space Access */
  32.518 +#define HT_CAPTYPE_MSI_MAPPING	0xA8	/* MSI Mapping Capability */
  32.519 +#define  HT_MSI_FLAGS		0x02		/* Offset to flags */
  32.520 +#define  HT_MSI_FLAGS_ENABLE	0x1		/* Mapping enable */
  32.521 +#define  HT_MSI_FLAGS_FIXED	0x2		/* Fixed mapping only */
  32.522 +#define  HT_MSI_FIXED_ADDR	0x00000000FEE00000ULL	/* Fixed addr */
  32.523 +#define  HT_MSI_ADDR_LO		0x04		/* Offset to low addr bits */
  32.524 +#define  HT_MSI_ADDR_LO_MASK	0xFFF00000	/* Low address bit mask */
  32.525 +#define  HT_MSI_ADDR_HI		0x08		/* Offset to high addr bits */
  32.526 +#define HT_CAPTYPE_DIRECT_ROUTE	0xB0	/* Direct routing configuration */
  32.527 +#define HT_CAPTYPE_VCSET	0xB8	/* Virtual Channel configuration */
  32.528 +#define HT_CAPTYPE_ERROR_RETRY	0xC0	/* Retry on error configuration */
  32.529 +#define HT_CAPTYPE_GEN3		0xD0	/* Generation 3 hypertransport configuration */
  32.530 +#define HT_CAPTYPE_PM		0xE0	/* Hypertransport powermanagement configuration */
  32.531 +
  32.532 +
  32.533 +#endif /* LINUX_PCI_REGS_H */
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xen/drivers/passthrough/vtd/Makefile	Thu Feb 21 15:06:37 2008 +0000
    33.3 @@ -0,0 +1,6 @@
    33.4 +obj-y += iommu.o
    33.5 +obj-y += dmar.o
    33.6 +obj-y += utils.o
    33.7 +obj-y += io.o
    33.8 +obj-y += qinval.o
    33.9 +obj-y += intremap.o
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xen/drivers/passthrough/vtd/dmar.c	Thu Feb 21 15:06:37 2008 +0000
    34.3 @@ -0,0 +1,625 @@
    34.4 +/*
    34.5 + * Copyright (c) 2006, Intel Corporation.
    34.6 + *
    34.7 + * This program is free software; you can redistribute it and/or modify it
    34.8 + * under the terms and conditions of the GNU General Public License,
    34.9 + * version 2, as published by the Free Software Foundation.
   34.10 + *
   34.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   34.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   34.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   34.14 + * more details.
   34.15 + *
   34.16 + * You should have received a copy of the GNU General Public License along with
   34.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   34.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   34.19 + *
   34.20 + * Copyright (C) Ashok Raj <ashok.raj@intel.com>
   34.21 + * Copyright (C) Shaohua Li <shaohua.li@intel.com>
   34.22 + * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
   34.23 + */
   34.24 +
   34.25 +#include <xen/init.h>
   34.26 +#include <xen/bitmap.h>
   34.27 +#include <xen/kernel.h>
   34.28 +#include <xen/acpi.h>
   34.29 +#include <xen/mm.h>
   34.30 +#include <xen/xmalloc.h>
   34.31 +#include <asm/string.h>
   34.32 +#include "dmar.h"
   34.33 +#include "../pci-direct.h"
   34.34 +#include "../pci_regs.h"
   34.35 +
   34.36 +int vtd_enabled;
   34.37 +boolean_param("vtd", vtd_enabled);
   34.38 +
   34.39 +#undef PREFIX
   34.40 +#define PREFIX VTDPREFIX "ACPI DMAR:"
   34.41 +#define DEBUG
   34.42 +
   34.43 +#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + \
   34.44 +                       sizeof(struct acpi_dev_scope))
   34.45 +
   34.46 +LIST_HEAD(acpi_drhd_units);
   34.47 +LIST_HEAD(acpi_rmrr_units);
   34.48 +LIST_HEAD(acpi_atsr_units);
   34.49 +
   34.50 +u8 dmar_host_address_width;
   34.51 +
   34.52 +static int __init acpi_register_drhd_unit(struct acpi_drhd_unit *drhd)
   34.53 +{
   34.54 +    /*
   34.55 +     * add INCLUDE_ALL at the tail, so scan the list will find it at
   34.56 +     * the very end.
   34.57 +     */
   34.58 +    if ( drhd->include_all )
   34.59 +        list_add_tail(&drhd->list, &acpi_drhd_units);
   34.60 +    else
   34.61 +        list_add(&drhd->list, &acpi_drhd_units);
   34.62 +    return 0;
   34.63 +}
   34.64 +
   34.65 +static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr)
   34.66 +{
   34.67 +    list_add(&rmrr->list, &acpi_rmrr_units);
   34.68 +    return 0;
   34.69 +}
   34.70 +
   34.71 +static int acpi_ioapic_device_match(
   34.72 +    struct list_head *ioapic_list, unsigned int apic_id)
   34.73 +{
   34.74 +    struct acpi_ioapic_unit *ioapic;
   34.75 +    list_for_each_entry( ioapic, ioapic_list, list ) {
   34.76 +        if (ioapic->apic_id == apic_id)
   34.77 +            return 1;
   34.78 +    }
   34.79 +    return 0;
   34.80 +}
   34.81 +
   34.82 +struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
   34.83 +{
   34.84 +    struct acpi_drhd_unit *drhd;
   34.85 +    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
   34.86 +        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
   34.87 +            dprintk(XENLOG_INFO VTDPREFIX,
   34.88 +                    "ioapic_to_drhd: drhd->address = %lx\n",
   34.89 +                    drhd->address);
   34.90 +            return drhd;
   34.91 +        }
   34.92 +    }
   34.93 +    return NULL;
   34.94 +}
   34.95 +
   34.96 +struct iommu * ioapic_to_iommu(unsigned int apic_id)
   34.97 +{
   34.98 +    struct acpi_drhd_unit *drhd;
   34.99 +
  34.100 +    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
  34.101 +        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
  34.102 +            dprintk(XENLOG_INFO VTDPREFIX,
  34.103 +                    "ioapic_to_iommu: drhd->address = %lx\n",
  34.104 +                    drhd->address);
  34.105 +            return drhd->iommu;
  34.106 +        }
  34.107 +    }
  34.108 +    dprintk(XENLOG_INFO VTDPREFIX, "returning NULL\n");
  34.109 +    return NULL;
  34.110 +}
  34.111 +
  34.112 +static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
  34.113 +                                 struct pci_dev *dev)
  34.114 +{
  34.115 +    int i;
  34.116 +
  34.117 +    for ( i = 0; i < cnt; i++ )
  34.118 +    {
  34.119 +        if ( (dev->bus == devices->bus) &&
  34.120 +             (dev->devfn == devices->devfn) )
  34.121 +            return 1;
  34.122 +        devices++;
  34.123 +    }
  34.124 +    return 0;
  34.125 +}
  34.126 +
  34.127 +static int __init acpi_register_atsr_unit(struct acpi_atsr_unit *atsr)
  34.128 +{
  34.129 +    /*
  34.130 +     * add ALL_PORTS at the tail, so scan the list will find it at
  34.131 +     * the very end.
  34.132 +     */
  34.133 +    if ( atsr->all_ports )
  34.134 +        list_add_tail(&atsr->list, &acpi_atsr_units);
  34.135 +    else
  34.136 +        list_add(&atsr->list, &acpi_atsr_units);
  34.137 +    return 0;
  34.138 +}
  34.139 +
  34.140 +struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev)
  34.141 +{
  34.142 +    struct acpi_drhd_unit *drhd;
  34.143 +    struct acpi_drhd_unit *include_all_drhd;
  34.144 +
  34.145 +    include_all_drhd = NULL;
  34.146 +    list_for_each_entry ( drhd, &acpi_drhd_units, list )
  34.147 +    {
  34.148 +        if ( drhd->include_all )
  34.149 +        {
  34.150 +            include_all_drhd = drhd;
  34.151 +            continue;
  34.152 +        }
  34.153 +
  34.154 +        if ( acpi_pci_device_match(drhd->devices,
  34.155 +                                   drhd->devices_cnt, dev) )
  34.156 +        {
  34.157 +            dprintk(XENLOG_INFO VTDPREFIX, 
  34.158 +                    "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
  34.159 +                    drhd->address);
  34.160 +            return drhd;
  34.161 +        }
  34.162 +    }
  34.163 +
  34.164 +    if ( include_all_drhd )
  34.165 +    {
  34.166 +        dprintk(XENLOG_INFO VTDPREFIX, 
  34.167 +                "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
  34.168 +                include_all_drhd->address);
  34.169 +        return include_all_drhd;
  34.170 +    }
  34.171 +
  34.172 +    return NULL;
  34.173 +}
  34.174 +
  34.175 +struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev)
  34.176 +{
  34.177 +    struct acpi_rmrr_unit *rmrr;
  34.178 +
  34.179 +    list_for_each_entry ( rmrr, &acpi_rmrr_units, list )
  34.180 +    {
  34.181 +        if ( acpi_pci_device_match(rmrr->devices,
  34.182 +                                   rmrr->devices_cnt, dev) )
  34.183 +            return rmrr;
  34.184 +    }
  34.185 +
  34.186 +    return NULL;
  34.187 +}
  34.188 +
  34.189 +struct acpi_atsr_unit * acpi_find_matched_atsr_unit(struct pci_dev *dev)
  34.190 +{
  34.191 +    struct acpi_atsr_unit *atsru;
  34.192 +    struct acpi_atsr_unit *all_ports_atsru;
  34.193 +
  34.194 +    all_ports_atsru = NULL;
  34.195 +    list_for_each_entry ( atsru, &acpi_atsr_units, list )
  34.196 +    {
  34.197 +        if ( atsru->all_ports )
  34.198 +            all_ports_atsru = atsru;
  34.199 +        if ( acpi_pci_device_match(atsru->devices,
  34.200 +                                   atsru->devices_cnt, dev) )
  34.201 +            return atsru;
  34.202 +    }
  34.203 +
  34.204 +    if ( all_ports_atsru )
  34.205 +    {
  34.206 +        dprintk(XENLOG_INFO VTDPREFIX,
  34.207 +                "acpi_find_matched_atsr_unit: all_ports_atsru\n");
  34.208 +        return all_ports_atsru;;
  34.209 +    }
  34.210 +
  34.211 +    return NULL;
  34.212 +}
  34.213 +
  34.214 +static int scope_device_count(void *start, void *end)
  34.215 +{
  34.216 +    struct acpi_dev_scope *scope;
  34.217 +    u8 bus, sub_bus, sec_bus;
  34.218 +    struct acpi_pci_path *path;
  34.219 +    int depth, count = 0;
  34.220 +    u8 dev, func;
  34.221 +    u32 l;
  34.222 +
  34.223 +    while ( start < end )
  34.224 +    {
  34.225 +        scope = start;
  34.226 +        if ( (scope->length < MIN_SCOPE_LEN) ||
  34.227 +             (scope->dev_type >= ACPI_DEV_ENTRY_COUNT) )
  34.228 +        {
  34.229 +            dprintk(XENLOG_WARNING VTDPREFIX, "Invalid device scope\n");
  34.230 +            return -EINVAL;
  34.231 +        }
  34.232 +
  34.233 +        path = (struct acpi_pci_path *)(scope + 1);
  34.234 +        bus = scope->start_bus;
  34.235 +        depth = (scope->length - sizeof(struct acpi_dev_scope))
  34.236 +		    / sizeof(struct acpi_pci_path);
  34.237 +        while ( --depth )
  34.238 +        {
  34.239 +            bus = read_pci_config_byte(
  34.240 +                bus, path->dev, path->fn, PCI_SECONDARY_BUS);
  34.241 +            path++;
  34.242 +        }
  34.243 +
  34.244 +        if ( scope->dev_type == ACPI_DEV_ENDPOINT )
  34.245 +        {
  34.246 +            dprintk(XENLOG_INFO VTDPREFIX,
  34.247 +                    "found endpoint: bdf = %x:%x:%x\n",
  34.248 +                    bus, path->dev, path->fn);
  34.249 +            count++;
  34.250 +        }
  34.251 +        else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
  34.252 +        {
  34.253 +            dprintk(XENLOG_INFO VTDPREFIX,
  34.254 +                    "found bridge: bdf = %x:%x:%x\n",
  34.255 +                    bus, path->dev, p