ia64/xen-unstable

changeset 15957:844e507d56b8

Add AMD IOMMU support into hypervisor
Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir@xensource.com>
date Fri Sep 21 17:15:47 2007 +0100 (2007-09-21)
parents a956ef58b012
children ca1360094654
files xen/arch/x86/hvm/svm/Makefile xen/arch/x86/hvm/svm/amd_iommu/Makefile xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c xen/arch/x86/hvm/svm/amd_iommu/pci-direct.h xen/arch/x86/hvm/svm/amd_iommu/pci_regs.h xen/arch/x86/setup.c xen/include/asm-x86/amd-iommu.h xen/include/asm-x86/fixmap.h xen/include/asm-x86/hvm/iommu.h xen/include/asm-x86/hvm/svm/amd-iommu-defs.h xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/Makefile	Fri Sep 21 17:10:00 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/Makefile	Fri Sep 21 17:15:47 2007 +0100
     1.3 @@ -1,6 +1,8 @@
     1.4  subdir-$(x86_32) += x86_32
     1.5  subdir-$(x86_64) += x86_64
     1.6  
     1.7 +subdir-y += amd_iommu
     1.8 +
     1.9  obj-y += asid.o
    1.10  obj-y += emulate.o
    1.11  obj-y += intr.o
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/Makefile	Fri Sep 21 17:15:47 2007 +0100
     2.3 @@ -0,0 +1,4 @@
     2.4 +obj-y += amd-iommu-detect.o
     2.5 +obj-y += amd-iommu-init.o
     2.6 +obj-y += amd-iommu-map.o
     2.7 +obj-y += pci-amd-iommu.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c	Fri Sep 21 17:15:47 2007 +0100
     3.3 @@ -0,0 +1,211 @@
     3.4 +/*
     3.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     3.6 + * Author: Leo Duran <leo.duran@amd.com>
     3.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     3.8 + *
     3.9 + * This program is free software; you can redistribute it and/or modify
    3.10 + * it under the terms of the GNU General Public License as published by
    3.11 + * the Free Software Foundation; either version 2 of the License, or
    3.12 + * (at your option) any later version.
    3.13 + *
    3.14 + * This program is distributed in the hope that it will be useful,
    3.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    3.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    3.17 + * GNU General Public License for more details.
    3.18 + *
    3.19 + * You should have received a copy of the GNU General Public License
    3.20 + * along with this program; if not, write to the Free Software
    3.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    3.22 + */
    3.23 +
    3.24 +#include <asm/iommu.h>
    3.25 +#include <asm/amd-iommu.h>
    3.26 +#include <asm/hvm/svm/amd-iommu-proto.h>
    3.27 +#include "pci-direct.h"
    3.28 +#include "pci_regs.h"
    3.29 +
    3.30 +static int __init valid_bridge_bus_config(int bus, int dev, int func,
    3.31 +            int *sec_bus, int *sub_bus)
    3.32 +{
    3.33 +    int pri_bus;
    3.34 +
    3.35 +    pri_bus = read_pci_config_byte(bus, dev, func, PCI_PRIMARY_BUS);
    3.36 +    *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS);
    3.37 +    *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS);
    3.38 +
    3.39 +    return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus );
    3.40 +}
    3.41 +
    3.42 +int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu)
    3.43 +{
    3.44 +    int bus, dev, func;
    3.45 +    int devfn, hdr_type;
    3.46 +    int sec_bus, sub_bus;
    3.47 +    int multi_func;
    3.48 +
    3.49 +    bus = iommu->last_downstream_bus = iommu->root_bus;
    3.50 +    iommu->downstream_bus_present[bus] = 1;
    3.51 +    dev = PCI_SLOT(iommu->first_devfn);
    3.52 +    multi_func = PCI_FUNC(iommu->first_devfn) > 0;
    3.53 +    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) {
    3.54 +        /* skipping to next device#? */
    3.55 +        if ( dev != PCI_SLOT(devfn) ) {
    3.56 +            dev = PCI_SLOT(devfn);
    3.57 +            multi_func = 0;
    3.58 +        }
    3.59 +        func = PCI_FUNC(devfn);
    3.60 + 
    3.61 +        if ( !VALID_PCI_VENDOR_ID(
    3.62 +            read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) )
    3.63 +            continue;
    3.64 +
    3.65 +        hdr_type = read_pci_config_byte(bus, dev, func,
    3.66 +                PCI_HEADER_TYPE);
    3.67 +        if ( func == 0 )
    3.68 +            multi_func = IS_PCI_MULTI_FUNCTION(hdr_type);
    3.69 +
    3.70 +        if ( (func == 0 || multi_func) &&
    3.71 +            IS_PCI_TYPE1_HEADER(hdr_type) ) {
    3.72 +            if (!valid_bridge_bus_config(bus, dev, func,
    3.73 +                &sec_bus, &sub_bus))
    3.74 +                return -ENODEV;
    3.75 +
    3.76 +            if ( sub_bus > iommu->last_downstream_bus )
    3.77 +                iommu->last_downstream_bus = sub_bus;
    3.78 +            do {
    3.79 +                iommu->downstream_bus_present[sec_bus] = 1;
    3.80 +            } while ( sec_bus++ < sub_bus );
    3.81 +        }
    3.82 +    }
    3.83 +
    3.84 +    return 0;
    3.85 +}
    3.86 +
    3.87 +int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
    3.88 +            struct amd_iommu *iommu)
    3.89 +{
    3.90 +    u32 cap_header, cap_range;
    3.91 +    u64 mmio_bar;
    3.92 +
    3.93 +    /* remove it when BIOS available */
    3.94 +    write_pci_config(bus, dev, func,
    3.95 +        cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
    3.96 +    write_pci_config(bus, dev, func,
    3.97 +        cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
    3.98 +    /* remove it when BIOS available */
    3.99 +
   3.100 +    mmio_bar = (u64)read_pci_config(bus, dev, func,
   3.101 +             cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
   3.102 +    mmio_bar |= read_pci_config(bus, dev, func,
   3.103 +            cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) &
   3.104 +            PCI_CAP_MMIO_BAR_LOW_MASK;
   3.105 +    iommu->mmio_base_phys = (unsigned long)mmio_bar;
   3.106 +
   3.107 +    if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) {
   3.108 +        dprintk(XENLOG_ERR ,
   3.109 +            "AMD IOMMU: Invalid MMIO_BAR = 0x%lx\n", mmio_bar);
   3.110 +        return -ENODEV;
   3.111 +    }
   3.112 +
   3.113 +    cap_header = read_pci_config(bus, dev, func, cap_ptr);
   3.114 +    iommu->revision = get_field_from_reg_u32(cap_header,
   3.115 +                  PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT);
   3.116 +    iommu->iotlb_support = get_field_from_reg_u32(cap_header,
   3.117 +                PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT);
   3.118 +    iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
   3.119 +                    PCI_CAP_HT_TUNNEL_MASK,
   3.120 +                    PCI_CAP_HT_TUNNEL_SHIFT);
   3.121 +    iommu->not_present_cached = get_field_from_reg_u32(cap_header,
   3.122 +                    PCI_CAP_NP_CACHE_MASK,
   3.123 +                    PCI_CAP_NP_CACHE_SHIFT);
   3.124 +
   3.125 +    cap_range = read_pci_config(bus, dev, func,
   3.126 +            cap_ptr + PCI_CAP_RANGE_OFFSET);
   3.127 +    iommu->root_bus = get_field_from_reg_u32(cap_range,
   3.128 +                PCI_CAP_BUS_NUMBER_MASK,
   3.129 +                PCI_CAP_BUS_NUMBER_SHIFT);
   3.130 +    iommu->first_devfn = get_field_from_reg_u32(cap_range,
   3.131 +                PCI_CAP_FIRST_DEVICE_MASK,
   3.132 +                PCI_CAP_FIRST_DEVICE_SHIFT);
   3.133 +    iommu->last_devfn = get_field_from_reg_u32(cap_range,
   3.134 +                PCI_CAP_LAST_DEVICE_MASK,
   3.135 +                PCI_CAP_LAST_DEVICE_SHIFT);
   3.136 +
   3.137 +    return 0;
   3.138 +}
   3.139 +
   3.140 +static int __init scan_caps_for_iommu(int bus, int dev, int func,
   3.141 +            iommu_detect_callback_ptr_t iommu_detect_callback)
   3.142 +{
   3.143 +    int cap_ptr, cap_id, cap_type;
   3.144 +    u32 cap_header;
   3.145 +    int count, error = 0;
   3.146 +
   3.147 +    count = 0;
   3.148 +    cap_ptr = read_pci_config_byte(bus, dev, func,
   3.149 +            PCI_CAPABILITY_LIST);
   3.150 +    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
   3.151 +        count < PCI_MAX_CAP_BLOCKS && !error ) {
   3.152 +        cap_ptr &= PCI_CAP_PTR_MASK;
   3.153 +        cap_header = read_pci_config(bus, dev, func, cap_ptr);
   3.154 +        cap_id = get_field_from_reg_u32(cap_header,
   3.155 +                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);
   3.156 +
   3.157 +        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) {
   3.158 +            cap_type = get_field_from_reg_u32(cap_header,
   3.159 +                    PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
   3.160 +            if ( cap_type == PCI_CAP_TYPE_IOMMU ) {
   3.161 +                error = iommu_detect_callback(
   3.162 +                        bus, dev, func, cap_ptr);
   3.163 +            }
   3.164 +        }
   3.165 +
   3.166 +        cap_ptr = get_field_from_reg_u32(cap_header,
   3.167 +                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
   3.168 +        ++count;    }
   3.169 +
   3.170 +    return error;
   3.171 +}
   3.172 +
   3.173 +static int __init scan_functions_for_iommu(int bus, int dev,
   3.174 +            iommu_detect_callback_ptr_t iommu_detect_callback)
   3.175 +{
   3.176 +    int func, hdr_type;
   3.177 +    int count, error = 0;
   3.178 +
   3.179 +    func = 0;
   3.180 +    count = 1;
   3.181 +    while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func,
   3.182 +            PCI_VENDOR_ID)) && !error && func < count ) {
   3.183 +        hdr_type = read_pci_config_byte(bus, dev, func,
   3.184 +                PCI_HEADER_TYPE);
   3.185 +
   3.186 +        if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) )
   3.187 +            count = PCI_MAX_FUNC_COUNT;
   3.188 +
   3.189 +        if ( IS_PCI_TYPE0_HEADER(hdr_type) ||
   3.190 +            IS_PCI_TYPE1_HEADER(hdr_type) ) {
   3.191 +            error =  scan_caps_for_iommu(bus, dev, func,
   3.192 +                    iommu_detect_callback);
   3.193 +        }
   3.194 +        ++func;
   3.195 +    }
   3.196 +
   3.197 +    return error;
   3.198 +}
   3.199 +
   3.200 +
   3.201 +int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback)
   3.202 +{
   3.203 +    int bus, dev, error = 0;
   3.204 +
   3.205 +    for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) {
   3.206 +        for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) {
   3.207 +            error =  scan_functions_for_iommu(bus, dev,
   3.208 +                  iommu_detect_callback);
   3.209 +        }
   3.210 +    }
   3.211 +
   3.212 +    return error;
   3.213 +}
   3.214 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c	Fri Sep 21 17:15:47 2007 +0100
     4.3 @@ -0,0 +1,145 @@
     4.4 +/*
     4.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     4.6 + * Author: Leo Duran <leo.duran@amd.com>
     4.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     4.8 + *
     4.9 + * This program is free software; you can redistribute it and/or modify
    4.10 + * it under the terms of the GNU General Public License as published by
    4.11 + * the Free Software Foundation; either version 2 of the License, or
    4.12 + * (at your option) any later version.
    4.13 + *
    4.14 + * This program is distributed in the hope that it will be useful,
    4.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    4.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    4.17 + * GNU General Public License for more details.
    4.18 + *
    4.19 + * You should have received a copy of the GNU General Public License
    4.20 + * along with this program; if not, write to the Free Software
    4.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    4.22 + */
    4.23 +
    4.24 +#include <asm/amd-iommu.h>
    4.25 +#include <asm/hvm/svm/amd-iommu-proto.h>
    4.26 +#include <asm-x86/fixmap.h>
    4.27 +#include "pci-direct.h"
    4.28 +#include "pci_regs.h"
    4.29 +
    4.30 +extern int nr_amd_iommus;
    4.31 +
    4.32 +int __init map_iommu_mmio_region(struct amd_iommu *iommu)
    4.33 +{
    4.34 +    unsigned long mfn;
    4.35 +
    4.36 +    if ( nr_amd_iommus > MAX_AMD_IOMMUS ) {
    4.37 +        gdprintk(XENLOG_ERR,
    4.38 +            "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
    4.39 +        return -ENOMEM;
    4.40 +    }
    4.41 +
    4.42 +    iommu->mmio_base = (void *) fix_to_virt(FIX_IOMMU_MMIO_BASE_0 +
    4.43 +                       nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
    4.44 +    mfn = (unsigned long)iommu->mmio_base_phys >> PAGE_SHIFT;
    4.45 +    map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
    4.46 +                    MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
    4.47 +
    4.48 +    memset((u8*)iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
    4.49 +
    4.50 +    return 0;
    4.51 +}
    4.52 +
    4.53 +void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
    4.54 +{
    4.55 +    if ( iommu->mmio_base ) {
    4.56 +        iounmap(iommu->mmio_base);
    4.57 +        iommu->mmio_base = NULL;
    4.58 +    }
    4.59 +}
    4.60 +
    4.61 +void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
    4.62 +{
    4.63 +    u64 addr_64, addr_lo, addr_hi;
    4.64 +    u32 entry;
    4.65 +
    4.66 +    addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
    4.67 +    addr_lo = addr_64 & DMA_32BIT_MASK;
    4.68 +    addr_hi = addr_64 >> 32;
    4.69 +
    4.70 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    4.71 +        IOMMU_DEV_TABLE_BASE_LOW_MASK,
    4.72 +        IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
    4.73 +    set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
    4.74 +        entry, IOMMU_DEV_TABLE_SIZE_MASK,
    4.75 +        IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
    4.76 +    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
    4.77 +
    4.78 +    set_field_in_reg_u32((u32)addr_hi, 0,
    4.79 +        IOMMU_DEV_TABLE_BASE_HIGH_MASK,
    4.80 +        IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
    4.81 +    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
    4.82 +}
    4.83 +
    4.84 +void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
    4.85 +{
    4.86 +    u64 addr_64, addr_lo, addr_hi;
    4.87 +    u32 power_of2_entries;
    4.88 +    u32 entry;
    4.89 +
    4.90 +    addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
    4.91 +    addr_lo = addr_64 & DMA_32BIT_MASK;
    4.92 +    addr_hi = addr_64 >> 32;
    4.93 +
    4.94 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    4.95 +        IOMMU_CMD_BUFFER_BASE_LOW_MASK,
    4.96 +        IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
    4.97 +    writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
    4.98 +
    4.99 +    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
   4.100 +        IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
   4.101 +
   4.102 +    set_field_in_reg_u32((u32)addr_hi, 0,
   4.103 +        IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
   4.104 +        IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
   4.105 +    set_field_in_reg_u32(power_of2_entries, entry,
   4.106 +        IOMMU_CMD_BUFFER_LENGTH_MASK,
   4.107 +        IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
   4.108 +    writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
   4.109 +}
   4.110 +
   4.111 +static void __init set_iommu_translation_control(struct amd_iommu *iommu,
   4.112 +            int enable)
   4.113 +{
   4.114 +    u32 entry;
   4.115 +
   4.116 +    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   4.117 +    set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
   4.118 +        IOMMU_CONTROL_ENABLED, entry,
   4.119 +        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
   4.120 +        IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
   4.121 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   4.122 +        IOMMU_CONTROL_ENABLED, entry,
   4.123 +        IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
   4.124 +        IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
   4.125 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   4.126 +}
   4.127 +
   4.128 +static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
   4.129 +            int enable)
   4.130 +{
   4.131 +    u32 entry;
   4.132 +
   4.133 +    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   4.134 +    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
   4.135 +        IOMMU_CONTROL_ENABLED, entry,
   4.136 +        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
   4.137 +        IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
   4.138 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
   4.139 +}
   4.140 +
   4.141 +void __init enable_iommu(struct amd_iommu *iommu)
   4.142 +{
   4.143 +    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
   4.144 +    set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
   4.145 +    printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
   4.146 +}
   4.147 +
   4.148 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c	Fri Sep 21 17:15:47 2007 +0100
     5.3 @@ -0,0 +1,419 @@
     5.4 +/*
     5.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     5.6 + * Author: Leo Duran <leo.duran@amd.com>
     5.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     5.8 + *
     5.9 + * This program is free software; you can redistribute it and/or modify
    5.10 + * it under the terms of the GNU General Public License as published by
    5.11 + * the Free Software Foundation; either version 2 of the License, or
    5.12 + * (at your option) any later version.
    5.13 + *
    5.14 + * This program is distributed in the hope that it will be useful,
    5.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    5.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    5.17 + * GNU General Public License for more details.
    5.18 + *
    5.19 + * You should have received a copy of the GNU General Public License
    5.20 + * along with this program; if not, write to the Free Software
    5.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    5.22 + */
    5.23 +
    5.24 +#include <asm/hvm/iommu.h>
    5.25 +#include <asm/amd-iommu.h>
    5.26 +#include <asm/hvm/svm/amd-iommu-proto.h>
    5.27 +#include <xen/sched.h>
    5.28 +
    5.29 +extern long amd_iommu_poll_comp_wait;
    5.30 +
    5.31 +static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
    5.32 +{
    5.33 +    u32 tail, head, *cmd_buffer;
    5.34 +    int i;
    5.35 +
    5.36 +    BUG_ON( !iommu || !cmd );
    5.37 +
    5.38 +    tail = iommu->cmd_buffer_tail;
    5.39 +    if ( ++tail == iommu->cmd_buffer.entries ) {
    5.40 +        tail = 0;
    5.41 +    }
    5.42 +    head = get_field_from_reg_u32(
    5.43 +            readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
    5.44 +            IOMMU_CMD_BUFFER_HEAD_MASK,
    5.45 +            IOMMU_CMD_BUFFER_HEAD_SHIFT);
    5.46 +    if ( head != tail ) {
    5.47 +        cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
    5.48 +            (iommu->cmd_buffer_tail * IOMMU_CMD_BUFFER_ENTRY_SIZE));
    5.49 +        for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; ++i ) {
    5.50 +            cmd_buffer[i] = cmd[i];
    5.51 +        }
    5.52 +
    5.53 +        iommu->cmd_buffer_tail = tail;
    5.54 +        return 1;
    5.55 +    }
    5.56 +
    5.57 +    return 0;
    5.58 +}
    5.59 +
    5.60 +static void commit_iommu_command_buffer(struct amd_iommu *iommu)
    5.61 +{
    5.62 +    u32 tail;
    5.63 +
    5.64 +    BUG_ON( !iommu );
    5.65 +
    5.66 +    set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
    5.67 +        IOMMU_CMD_BUFFER_TAIL_MASK,
    5.68 +        IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
    5.69 +    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
    5.70 +}
    5.71 +
    5.72 +int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
    5.73 +{
    5.74 +    BUG_ON( !iommu || !cmd );
    5.75 +
    5.76 +    if ( queue_iommu_command(iommu, cmd) ) {
    5.77 +        commit_iommu_command_buffer(iommu);
    5.78 +        return 1;
    5.79 +    }
    5.80 +    return 0;
    5.81 +}
    5.82 +
    5.83 +static void invalidate_iommu_page(struct amd_iommu *iommu,
    5.84 +            u64 io_addr, u16 domain_id)
    5.85 +{
    5.86 +    u64 addr_lo, addr_hi;
    5.87 +    u32 cmd[4], entry;
    5.88 +
    5.89 +    addr_lo = io_addr & DMA_32BIT_MASK;
    5.90 +    addr_hi = io_addr >> 32;
    5.91 +
    5.92 +    set_field_in_reg_u32(domain_id, 0,
    5.93 +        IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
    5.94 +        IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
    5.95 +    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
    5.96 +        IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry);
    5.97 +    cmd[1] = entry;
    5.98 +
    5.99 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
   5.100 +        IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
   5.101 +        IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
   5.102 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
   5.103 +        IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
   5.104 +        IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
   5.105 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
   5.106 +        IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
   5.107 +        IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
   5.108 +    cmd[2] = entry;
   5.109 +
   5.110 +    set_field_in_reg_u32((u32)addr_hi, 0,
   5.111 +        IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
   5.112 +        IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
   5.113 +    cmd[3] = entry;
   5.114 +
   5.115 +    cmd[0] = 0;
   5.116 +    send_iommu_command(iommu, cmd);
   5.117 +}
   5.118 +
   5.119 +static void flush_command_buffer(struct amd_iommu *iommu)
   5.120 +{
   5.121 +    u32 cmd[4], status;
   5.122 +    int loop_count, comp_wait;
   5.123 +
   5.124 +    /* clear 'ComWaitInt' in status register (WIC) */
   5.125 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
   5.126 +        IOMMU_STATUS_COMP_WAIT_INT_MASK,
   5.127 +        IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
   5.128 +    writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
   5.129 +
   5.130 +    /* send an empty COMPLETION_WAIT command to flush command buffer */
   5.131 +    cmd[3] = cmd[2] = 0;
   5.132 +    set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
   5.133 +        IOMMU_CMD_OPCODE_MASK,
   5.134 +        IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
   5.135 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
   5.136 +        IOMMU_COMP_WAIT_I_FLAG_MASK,
   5.137 +        IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
   5.138 +    send_iommu_command(iommu, cmd);
   5.139 +
   5.140 +    /* wait for 'ComWaitInt' to signal comp#endifletion? */
   5.141 +    if ( amd_iommu_poll_comp_wait ) {
   5.142 +        loop_count = amd_iommu_poll_comp_wait;
   5.143 +        do {
   5.144 +            status = readl(iommu->mmio_base +
   5.145 +                    IOMMU_STATUS_MMIO_OFFSET);
   5.146 +            comp_wait = get_field_from_reg_u32(status,
   5.147 +                    IOMMU_STATUS_COMP_WAIT_INT_MASK,
   5.148 +                    IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
   5.149 +            --loop_count;
   5.150 +        } while ( loop_count && !comp_wait );
   5.151 +
   5.152 +        if ( comp_wait ) {
   5.153 +            /* clear 'ComWaitInt' in status register (WIC) */
   5.154 +            status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
   5.155 +            writel(status, iommu->mmio_base +
   5.156 +                IOMMU_STATUS_MMIO_OFFSET);
   5.157 +        } else
   5.158 +            dprintk(XENLOG_WARNING, "AMD IOMMU: %s(): Warning:"
   5.159 +                " ComWaitInt bit did not assert!\n",
   5.160 +                 __FUNCTION__);
   5.161 +    }
   5.162 +}
   5.163 +
   5.164 +static void clear_page_table_entry_present(u32 *pte)
   5.165 +{
   5.166 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
   5.167 +        IOMMU_PTE_PRESENT_MASK,
   5.168 +        IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
   5.169 +}
   5.170 +
   5.171 +static void set_page_table_entry_present(u32 *pte, u64 page_addr,
   5.172 +                int iw, int ir)
   5.173 +{
   5.174 +    u64 addr_lo, addr_hi;
   5.175 +    u32 entry;
   5.176 +
   5.177 +    addr_lo = page_addr & DMA_32BIT_MASK;
   5.178 +    addr_hi = page_addr >> 32;
   5.179 +
   5.180 +    set_field_in_reg_u32((u32)addr_hi, 0,
   5.181 +        IOMMU_PTE_ADDR_HIGH_MASK,
   5.182 +        IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
   5.183 +    set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED :
   5.184 +        IOMMU_CONTROL_DISABLED, entry,
   5.185 +        IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
   5.186 +        IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
   5.187 +    set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED :
   5.188 +        IOMMU_CONTROL_DISABLED, entry,
   5.189 +        IOMMU_PTE_IO_READ_PERMISSION_MASK,
   5.190 +        IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
   5.191 +    pte[1] = entry;
   5.192 +
   5.193 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   5.194 +        IOMMU_PTE_ADDR_LOW_MASK,
   5.195 +        IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
   5.196 +    set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry,
   5.197 +        IOMMU_PTE_NEXT_LEVEL_MASK,
   5.198 +        IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
   5.199 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.200 +        IOMMU_PTE_PRESENT_MASK,
   5.201 +        IOMMU_PTE_PRESENT_SHIFT, &entry);
   5.202 +    pte[0] = entry;
   5.203 +}
   5.204 +
   5.205 +
   5.206 +static void amd_iommu_set_page_directory_entry(u32 *pde, 
   5.207 +            u64 next_ptr, u8 next_level)
   5.208 +{
   5.209 +    u64 addr_lo, addr_hi;
   5.210 +    u32 entry;
   5.211 +
   5.212 +    addr_lo = next_ptr & DMA_32BIT_MASK;
   5.213 +    addr_hi = next_ptr >> 32;
   5.214 +
   5.215 +    /* enable read/write permissions,which will be enforced at the PTE */
   5.216 +    set_field_in_reg_u32((u32)addr_hi, 0,
   5.217 +        IOMMU_PDE_ADDR_HIGH_MASK, IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
   5.218 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.219 +        IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
   5.220 +        IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
   5.221 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.222 +        IOMMU_PDE_IO_READ_PERMISSION_MASK,
   5.223 +        IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
   5.224 +    pde[1] = entry;
   5.225 +
   5.226 +    /* mark next level as 'present' */
   5.227 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   5.228 +        IOMMU_PDE_ADDR_LOW_MASK, IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
   5.229 +    set_field_in_reg_u32(next_level, entry,
   5.230 +        IOMMU_PDE_NEXT_LEVEL_MASK,
   5.231 +        IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
   5.232 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.233 +        IOMMU_PDE_PRESENT_MASK,
   5.234 +        IOMMU_PDE_PRESENT_SHIFT, &entry);
   5.235 +    pde[0] = entry;
   5.236 +}
   5.237 +
   5.238 +void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
   5.239 +                u8 paging_mode)
   5.240 +{
   5.241 +    u64 addr_hi, addr_lo;
   5.242 +    u32 entry;
   5.243 +
   5.244 +    dte[6] = dte[5] = dte[4] = 0;
   5.245 +
   5.246 +    set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
   5.247 +        IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
   5.248 +        IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
   5.249 +    dte[3] = entry;
   5.250 +
   5.251 +    set_field_in_reg_u32(domain_id, 0,
   5.252 +        IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
   5.253 +        IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
   5.254 +    dte[2] = entry;
   5.255 +
   5.256 +    addr_lo = root_ptr & DMA_32BIT_MASK;
   5.257 +    addr_hi = root_ptr >> 32;
   5.258 +    set_field_in_reg_u32((u32)addr_hi, 0,
   5.259 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
   5.260 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
   5.261 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.262 +        IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
   5.263 +        IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
   5.264 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.265 +        IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
   5.266 +        IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
   5.267 +    dte[1] = entry;
   5.268 +
   5.269 +    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
   5.270 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
   5.271 +        IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
   5.272 +    set_field_in_reg_u32(paging_mode, entry,
   5.273 +        IOMMU_DEV_TABLE_PAGING_MODE_MASK,
   5.274 +        IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
   5.275 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.276 +        IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
   5.277 +        IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
   5.278 +    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
   5.279 +        IOMMU_DEV_TABLE_VALID_MASK,
   5.280 +        IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
   5.281 +    dte[0] = entry;
   5.282 +}
   5.283 +
   5.284 +static void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
   5.285 +{
   5.286 +    u64 addr_lo, addr_hi, ptr;
   5.287 +
   5.288 +    addr_lo = get_field_from_reg_u32(entry[0],
   5.289 +            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
   5.290 +            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
   5.291 +
   5.292 +    addr_hi = get_field_from_reg_u32(entry[1],
   5.293 +            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
   5.294 +            IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
   5.295 +
   5.296 +    ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
   5.297 +    return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
   5.298 +}
   5.299 +
   5.300 +static int amd_iommu_is_pte_present(u32 *entry)
   5.301 +{
   5.302 +    return (get_field_from_reg_u32(entry[0],
   5.303 +            IOMMU_PDE_PRESENT_MASK,
   5.304 +            IOMMU_PDE_PRESENT_SHIFT));
   5.305 +}
   5.306 +
   5.307 +static void *get_pte_from_page_tables(void *table, int level,
   5.308 +        unsigned long io_pfn)
   5.309 +{
   5.310 +    unsigned long offset;
   5.311 +    void *pde = 0;
   5.312 +
   5.313 +    BUG_ON( !table );
   5.314 +
   5.315 +    while ( level > 0 )
   5.316 +    {
   5.317 +        void *next_table = 0;
   5.318 +        unsigned long next_ptr;
   5.319 +        offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
   5.320 +            (level - IOMMU_PAGING_MODE_LEVEL_1)));
   5.321 +        offset &= ~PTE_PER_TABLE_MASK;
   5.322 +        pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   5.323 +
   5.324 +        if ( level == 1 )
   5.325 +            break;
   5.326 +        if ( !pde )
   5.327 +           return NULL;
   5.328 +        if ( !amd_iommu_is_pte_present(pde) ) {
   5.329 +            next_table = alloc_xenheap_page();
   5.330 +            if ( next_table == NULL )
   5.331 +                return NULL;
   5.332 +            memset(next_table, 0, PAGE_SIZE);
   5.333 +            if ( *(u64*)(pde) == 0 ) {
   5.334 +                next_ptr = (u64)virt_to_maddr(next_table);
   5.335 +                amd_iommu_set_page_directory_entry((u32 *)pde,
   5.336 +                    next_ptr, level - 1);
   5.337 +            } else
   5.338 +                free_xenheap_page(next_table);
   5.339 +        }
   5.340 +        table = amd_iommu_get_vptr_from_page_table_entry(pde);
   5.341 +        level--;
   5.342 +    }
   5.343 +
   5.344 +    return pde;
   5.345 +}
   5.346 +
   5.347 +int amd_iommu_map_page(struct domain *d, unsigned long gfn,
   5.348 +        unsigned long mfn)
   5.349 +{
   5.350 +    void *pte;
   5.351 +    unsigned long flags;
   5.352 +    u64 maddr;
   5.353 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
   5.354 +    int iw, ir;
   5.355 +
   5.356 +    BUG_ON( !hd->root_table );
   5.357 +
   5.358 +    maddr = (u64)(mfn << PAGE_SHIFT);
   5.359 +
   5.360 +    iw = IOMMU_IO_WRITE_ENABLED;
   5.361 +    ir = IOMMU_IO_READ_ENABLED;
   5.362 +
   5.363 +    spin_lock_irqsave(&hd->mapping_lock, flags);
   5.364 +
   5.365 +    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   5.366 +
   5.367 +    if ( pte != 0 ) {
   5.368 +        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
   5.369 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   5.370 +        return 0;
   5.371 +    } else {
   5.372 +        dprintk(XENLOG_ERR,
   5.373 +            "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n",
   5.374 +            __FUNCTION__, gfn);
   5.375 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   5.376 +        return -EIO;
   5.377 +    }
   5.378 +}
   5.379 +
   5.380 +int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
   5.381 +{
   5.382 +    void *pte;
   5.383 +    unsigned long flags;
   5.384 +    u64 io_addr = gfn;
   5.385 +    int requestor_id;
   5.386 +    struct amd_iommu *iommu;
   5.387 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
   5.388 +
   5.389 +    BUG_ON( !hd->root_table );
   5.390 +
   5.391 +    requestor_id = hd->domain_id;
   5.392 +    io_addr = (u64)(gfn << PAGE_SHIFT);
   5.393 +
   5.394 +    spin_lock_irqsave(&hd->mapping_lock, flags);
   5.395 +
   5.396 +    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   5.397 +
   5.398 +    if ( pte != 0 ) {
   5.399 +        /* mark PTE as 'page not present' */
   5.400 +        clear_page_table_entry_present((u32 *)pte);
   5.401 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   5.402 +
   5.403 +        /* send INVALIDATE_IOMMU_PAGES command */
   5.404 +        for_each_amd_iommu(iommu) {
   5.405 +
   5.406 +            spin_lock_irqsave(&iommu->lock, flags);
   5.407 +
   5.408 +            invalidate_iommu_page(iommu, io_addr, requestor_id);
   5.409 +            flush_command_buffer(iommu);
   5.410 +
   5.411 +            spin_unlock_irqrestore(&iommu->lock, flags);
   5.412 +        }
   5.413 +
   5.414 +        return 0;
   5.415 +    } else {
   5.416 +        dprintk(XENLOG_ERR,
   5.417 +            "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", 
   5.418 +            __FUNCTION__, gfn);
   5.419 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
   5.420 +        return -EIO;
   5.421 +    }
   5.422 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c	Fri Sep 21 17:15:47 2007 +0100
     6.3 @@ -0,0 +1,389 @@
     6.4 +/*
     6.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
     6.6 + * Author: Leo Duran <leo.duran@amd.com>
     6.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
     6.8 + *
     6.9 + * This program is free software; you can redistribute it and/or modify
    6.10 + * it under the terms of the GNU General Public License as published by
    6.11 + * the Free Software Foundation; either version 2 of the License, or
    6.12 + * (at your option) any later version.
    6.13 + *
    6.14 + * This program is distributed in the hope that it will be useful,
    6.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    6.17 + * GNU General Public License for more details.
    6.18 + *
    6.19 + * You should have received a copy of the GNU General Public License
    6.20 + * along with this program; if not, write to the Free Software
    6.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
    6.22 + */
    6.23 +
    6.24 +#include <asm/amd-iommu.h>
    6.25 +#include <asm/hvm/svm/amd-iommu-proto.h>
    6.26 +#include <xen/sched.h>
    6.27 +#include <asm/mm.h>
    6.28 +#include "pci-direct.h"
    6.29 +#include "pci_regs.h"
    6.30 +
    6.31 +struct list_head amd_iommu_head;
    6.32 +long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
    6.33 +static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
    6.34 +int nr_amd_iommus = 0;
    6.35 +
    6.36 +/* will set if amd-iommu HW is found */
    6.37 +int amd_iommu_enabled = 0;
    6.38 +
    6.39 +static int enable_amd_iommu = 0;
    6.40 +boolean_param("enable_amd_iommu", enable_amd_iommu);
    6.41 +
    6.42 +static void deallocate_domain_page_tables(struct hvm_iommu *hd)
    6.43 +{
    6.44 +    if ( hd->root_table )
    6.45 +        free_xenheap_page(hd->root_table);
    6.46 +}
    6.47 +
    6.48 +static void deallocate_domain_resources(struct hvm_iommu *hd)
    6.49 +{
    6.50 +    deallocate_domain_page_tables(hd);
    6.51 +}
    6.52 +
    6.53 +static void __init init_cleanup(void)
    6.54 +{
    6.55 +    struct amd_iommu *iommu;
    6.56 +
    6.57 +    dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
    6.58 +
    6.59 +    for_each_amd_iommu(iommu) {
    6.60 +        unmap_iommu_mmio_region(iommu);
    6.61 +    }
    6.62 +}
    6.63 +
    6.64 +static void __init deallocate_iommu_table_struct(
    6.65 +            struct table_struct *table)
    6.66 +{
    6.67 +    if (table->buffer) {
    6.68 +        free_xenheap_pages(table->buffer,
    6.69 +            get_order_from_bytes(table->alloc_size));
    6.70 +        table->buffer = NULL;
    6.71 +    }
    6.72 +}
    6.73 +
    6.74 +static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
    6.75 +{
    6.76 +    deallocate_iommu_table_struct(&iommu->dev_table);
    6.77 +    deallocate_iommu_table_struct(&iommu->cmd_buffer);;
    6.78 +}
    6.79 +
    6.80 +static void __init detect_cleanup(void)
    6.81 +{
    6.82 +    struct amd_iommu *iommu;
    6.83 +
    6.84 +    dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
    6.85 +
    6.86 +    for_each_amd_iommu(iommu) {
    6.87 +        list_del(&iommu->list);
    6.88 +        deallocate_iommu_resources(iommu);
    6.89 +        xfree(iommu);
    6.90 +    }
    6.91 +}
    6.92 +
    6.93 +static int requestor_id_from_bdf(int bdf)
    6.94 +{
    6.95 +    /* HACK - HACK */
    6.96 +    /* account for possible 'aliasing' by parent device */
    6.97 +   return bdf;
    6.98 +}
    6.99 +
   6.100 +static int __init allocate_iommu_table_struct(struct table_struct *table,
   6.101 +            const char *name)
   6.102 +{
   6.103 +    table->buffer = (void *) alloc_xenheap_pages(
   6.104 +        get_order_from_bytes(table->alloc_size));
   6.105 +
   6.106 +    if ( !table->buffer ) {
   6.107 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
   6.108 +        return -ENOMEM;
   6.109 +    }
   6.110 +    memset(table->buffer, 0, table->alloc_size);
   6.111 +
   6.112 +    return 0;
   6.113 +}
   6.114 +
   6.115 +static int __init allocate_iommu_resources(struct amd_iommu *iommu)
   6.116 +{
   6.117 +    /* allocate 'device table' on a 4K boundary */
   6.118 +    iommu->dev_table.alloc_size =
   6.119 +        PAGE_ALIGN(((iommu->last_downstream_bus + 1) *
   6.120 +        IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
   6.121 +        IOMMU_DEV_TABLE_ENTRY_SIZE);
   6.122 +    iommu->dev_table.entries =
   6.123 +        iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
   6.124 +
   6.125 +    if (allocate_iommu_table_struct(&iommu->dev_table,
   6.126 +            "Device Table") != 0)
   6.127 +        goto error_out;
   6.128 +
   6.129 +    /* allocate 'command buffer' in power of 2 increments of 4K */
   6.130 +    iommu->cmd_buffer_tail = 0;
   6.131 +    iommu->cmd_buffer.alloc_size =
   6.132 +        PAGE_SIZE << get_order_from_bytes(
   6.133 +        PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
   6.134 +        IOMMU_CMD_BUFFER_ENTRY_SIZE));
   6.135 +
   6.136 +   iommu->cmd_buffer.entries =
   6.137 +        iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
   6.138 +
   6.139 +    if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
   6.140 +            "Command Buffer") != 0 )
   6.141 +        goto error_out;
   6.142 +
   6.143 +    return 0;
   6.144 +
   6.145 +error_out:
   6.146 +    deallocate_iommu_resources(iommu);
   6.147 +    return -ENOMEM;
   6.148 +}
   6.149 +
   6.150 +int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
   6.151 +{
   6.152 +    struct amd_iommu *iommu;
   6.153 +
   6.154 +    iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
   6.155 +    if ( !iommu ) {
   6.156 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
   6.157 +        return -ENOMEM;
   6.158 +    }
   6.159 +    memset(iommu, 0, sizeof(struct amd_iommu));
   6.160 +    spin_lock_init(&iommu->lock);
   6.161 +
   6.162 +    /* get capability and topology information */
   6.163 +    if ( get_iommu_capabilities(bus, dev, func, cap_ptr, iommu) != 0 )
   6.164 +        goto error_out;
   6.165 +    if ( get_iommu_last_downstream_bus(iommu) != 0 )
   6.166 +        goto error_out;
   6.167 +
   6.168 +    list_add_tail(&iommu->list, &amd_iommu_head);
   6.169 +
   6.170 +    /* allocate resources for this IOMMU */
   6.171 +    if (allocate_iommu_resources(iommu) != 0)
   6.172 +        goto error_out;
   6.173 +
   6.174 +    return 0;
   6.175 +
   6.176 +error_out:
   6.177 +    xfree(iommu);
   6.178 +    return -ENODEV;
   6.179 +}
   6.180 +
   6.181 +static int __init amd_iommu_init(void)
   6.182 +{
   6.183 +    struct amd_iommu *iommu;
   6.184 +    unsigned long flags;
   6.185 +
   6.186 +    for_each_amd_iommu(iommu) {
   6.187 +        spin_lock_irqsave(&iommu->lock, flags);
   6.188 +
   6.189 +        /* register IOMMU data strucures in MMIO space */
   6.190 +        if (map_iommu_mmio_region(iommu) != 0)
   6.191 +            goto error_out;
   6.192 +        register_iommu_dev_table_in_mmio_space(iommu);
   6.193 +        register_iommu_cmd_buffer_in_mmio_space(iommu);
   6.194 +
   6.195 +        /* enable IOMMU translation services */
   6.196 +        enable_iommu(iommu);
   6.197 +        nr_amd_iommus++;
   6.198 +
   6.199 +        spin_unlock_irqrestore(&iommu->lock, flags);
   6.200 +    }
   6.201 +
   6.202 +    amd_iommu_enabled = 1;
   6.203 +
   6.204 +    return 0;
   6.205 +
   6.206 +error_out:
   6.207 +    init_cleanup();
   6.208 +    return -ENODEV;
   6.209 +}
   6.210 +
   6.211 +struct amd_iommu *find_iommu_for_device(int bus, int devfn)
   6.212 +{
   6.213 +    struct amd_iommu *iommu;
   6.214 +
   6.215 +    for_each_amd_iommu(iommu) {
   6.216 +        if ( bus == iommu->root_bus ) {
   6.217 +            if ( devfn >= iommu->first_devfn &&
   6.218 +                devfn <= iommu->last_devfn )
   6.219 +                return iommu;
   6.220 +        }
   6.221 +        else if ( bus <= iommu->last_downstream_bus ) {
   6.222 +            if ( iommu->downstream_bus_present[bus] )
   6.223 +                return iommu;
   6.224 +        }
   6.225 +    }
   6.226 +
   6.227 +    return NULL;
   6.228 +}
   6.229 +
   6.230 +void amd_iommu_setup_domain_device(struct domain *domain,
   6.231 +        struct amd_iommu *iommu, int requestor_id)
   6.232 +{
   6.233 +    void *dte;
   6.234 +    u64 root_ptr;
   6.235 +    unsigned long flags;
   6.236 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   6.237 +
   6.238 +    BUG_ON( !hd->root_table||!hd->paging_mode );
   6.239 +
   6.240 +    root_ptr = (u64)virt_to_maddr(hd->root_table);
   6.241 +    dte = iommu->dev_table.buffer +
   6.242 +        (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   6.243 +
   6.244 +    spin_lock_irqsave(&iommu->lock, flags); 
   6.245 +
   6.246 +    amd_iommu_set_dev_table_entry((u32 *)dte,
   6.247 +        root_ptr, hd->domain_id, hd->paging_mode);
   6.248 +
   6.249 +    dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
   6.250 +        "root_ptr:%lx, domain_id:%d, paging_mode:%d\n",
   6.251 +        requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
   6.252 +
   6.253 +    spin_unlock_irqrestore(&iommu->lock, flags);
   6.254 +}
   6.255 +
   6.256 +void __init amd_iommu_setup_dom0_devices(void)
   6.257 +{
   6.258 +    struct hvm_iommu *hd = domain_hvm_iommu(dom0);
   6.259 +    struct amd_iommu *iommu;
   6.260 +    struct pci_dev *pdev;
   6.261 +    int bus, dev, func;
   6.262 +    u32 l;
   6.263 +    int req_id, bdf;
   6.264 +
   6.265 +    for ( bus = 0; bus < 256; bus++ ) {
   6.266 +        for ( dev = 0; dev < 32; dev++ ) {
   6.267 +            for ( func = 0; func < 8; func++ ) {
   6.268 +                l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
   6.269 +                /* some broken boards return 0 or ~0 if a slot is empty: */
   6.270 +                if ( l == 0xffffffff || l == 0x00000000 ||
   6.271 +                    l == 0x0000ffff || l == 0xffff0000 )
   6.272 +                    continue;
   6.273 +
   6.274 +                pdev = xmalloc(struct pci_dev);
   6.275 +                pdev->bus = bus;
   6.276 +                pdev->devfn = PCI_DEVFN(dev, func);
   6.277 +                list_add_tail(&pdev->list, &hd->pdev_list);
   6.278 +
   6.279 +                bdf = (bus << 8) | pdev->devfn;
   6.280 +                req_id = requestor_id_from_bdf(bdf);
   6.281 +                iommu = find_iommu_for_device(bus, pdev->devfn);
   6.282 +
   6.283 +                if ( iommu )
   6.284 +                    amd_iommu_setup_domain_device(dom0, iommu, req_id);
   6.285 +            }
   6.286 +        }
   6.287 +    }
   6.288 +}
   6.289 +
   6.290 +int amd_iommu_detect(void)
   6.291 +{
   6.292 +    unsigned long i;
   6.293 +
   6.294 +    if ( !enable_amd_iommu ) {
   6.295 +        printk("AMD IOMMU: Disabled\n");
   6.296 +        return 0;
   6.297 +    }
   6.298 +
   6.299 +    INIT_LIST_HEAD(&amd_iommu_head);
   6.300 +
   6.301 +    if ( scan_for_iommu(iommu_detect_callback) != 0 ) {
   6.302 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
   6.303 +        goto error_out;
   6.304 +    }
   6.305 +
   6.306 +    if ( !iommu_found() ) {
   6.307 +        printk("AMD IOMMU: Not found!\n");
   6.308 +        return 0;
   6.309 +    }
   6.310 +
   6.311 +    if ( amd_iommu_init() != 0 ) {
   6.312 +        dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
   6.313 +        goto error_out;
   6.314 +    }
   6.315 +
   6.316 +    if ( amd_iommu_domain_init(dom0) != 0 )
   6.317 +        goto error_out;
   6.318 +
   6.319 +    /* setup 1:1 page table for dom0 */
   6.320 +    for ( i = 0; i < max_page; i++ )
   6.321 +        amd_iommu_map_page(dom0, i, i);
   6.322 +
   6.323 +    amd_iommu_setup_dom0_devices();
   6.324 +    return 0;
   6.325 +
   6.326 +error_out:
   6.327 +     detect_cleanup();
   6.328 +     return -ENODEV;
   6.329 +
   6.330 +}
   6.331 +
   6.332 +static int allocate_domain_resources(struct hvm_iommu *hd)
   6.333 +{
   6.334 +    /* allocate root table */
   6.335 +    hd->root_table = (void *)alloc_xenheap_page();
   6.336 +    if ( !hd->root_table )
   6.337 +        return -ENOMEM;
   6.338 +    memset((u8*)hd->root_table, 0, PAGE_SIZE);
   6.339 +
   6.340 +    return 0;
   6.341 +}
   6.342 +
   6.343 +static int get_paging_mode(unsigned long entries)
   6.344 +{
   6.345 +    int level = 1;
   6.346 +
   6.347 +    BUG_ON ( !max_page );
   6.348 +
   6.349 +    if ( entries > max_page )
   6.350 +        entries = max_page;
   6.351 +
   6.352 +    while ( entries > PTE_PER_TABLE_SIZE ) {
   6.353 +        entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
   6.354 +        ++level;
   6.355 +        if ( level > 6 )
   6.356 +            return -ENOMEM;
   6.357 +    }
   6.358 +
   6.359 +    dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level);
   6.360 +
   6.361 +    return level;
   6.362 +}
   6.363 +
   6.364 +int amd_iommu_domain_init(struct domain *domain)
   6.365 +{
   6.366 +    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   6.367 +
   6.368 +    spin_lock_init(&hd->mapping_lock);
   6.369 +    spin_lock_init(&hd->iommu_list_lock);
   6.370 +    INIT_LIST_HEAD(&hd->pdev_list);
   6.371 +
   6.372 +    /* allocate page directroy */
   6.373 +    if ( allocate_domain_resources(hd) != 0 ) {
   6.374 +        dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
   6.375 +        goto error_out;
   6.376 +    }
   6.377 +
   6.378 +    if ( is_hvm_domain(domain) )
   6.379 +        hd->paging_mode = IOMMU_PAGE_TABLE_LEVEL_4;
   6.380 +    else
   6.381 +        hd->paging_mode = get_paging_mode(max_page);
   6.382 +
   6.383 +    hd->domain_id = domain->domain_id;
   6.384 +
   6.385 +    return 0;
   6.386 +
   6.387 +error_out:
   6.388 +    deallocate_domain_resources(hd);
   6.389 +    return -ENOMEM;
   6.390 +}
   6.391 +
   6.392 +
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-direct.h	Fri Sep 21 17:15:47 2007 +0100
     7.3 @@ -0,0 +1,48 @@
     7.4 +#ifndef ASM_PCI_DIRECT_H
     7.5 +#define ASM_PCI_DIRECT_H 1
     7.6 +
     7.7 +#include <xen/types.h>
     7.8 +#include <asm/io.h>
     7.9 +
    7.10 +/* Direct PCI access. This is used for PCI accesses in early boot before
    7.11 +   the PCI subsystem works. */ 
    7.12 +
    7.13 +#define PDprintk(x...)
    7.14 +
    7.15 +static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
    7.16 +{
    7.17 +    u32 v; 
    7.18 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    7.19 +    v = inl(0xcfc); 
    7.20 +    if (v != 0xffffffff)
    7.21 +        PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
    7.22 +    return v;
    7.23 +}
    7.24 +
    7.25 +static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
    7.26 +{
    7.27 +    u8 v; 
    7.28 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    7.29 +    v = inb(0xcfc + (offset&3)); 
    7.30 +    PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
    7.31 +    return v;
    7.32 +}
    7.33 +
    7.34 +static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
    7.35 +{
    7.36 +    u16 v; 
    7.37 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    7.38 +    v = inw(0xcfc + (offset&2)); 
    7.39 +    PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
    7.40 +    return v;
    7.41 +}
    7.42 +
    7.43 +static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
    7.44 +                    u32 val)
    7.45 +{
    7.46 +    PDprintk("%x writing to %x: %x\n", slot, offset, val); 
    7.47 +    outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
    7.48 +    outl(val, 0xcfc); 
    7.49 +}
    7.50 +
    7.51 +#endif
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci_regs.h	Fri Sep 21 17:15:47 2007 +0100
     8.3 @@ -0,0 +1,513 @@
     8.4 +/*
     8.5 + *	pci_regs.h
     8.6 + *
     8.7 + *	PCI standard defines
     8.8 + *	Copyright 1994, Drew Eckhardt
     8.9 + *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
    8.10 + *
    8.11 + *	For more information, please consult the following manuals (look at
    8.12 + *	http://www.pcisig.com/ for how to get them):
    8.13 + *
    8.14 + *	PCI BIOS Specification
    8.15 + *	PCI Local Bus Specification
    8.16 + *	PCI to PCI Bridge Specification
    8.17 + *	PCI System Design Guide
    8.18 + *
    8.19 + * 	For hypertransport information, please consult the following manuals
    8.20 + * 	from http://www.hypertransport.org
    8.21 + *
    8.22 + *	The Hypertransport I/O Link Specification
    8.23 + */
    8.24 +
    8.25 +#ifndef LINUX_PCI_REGS_H
    8.26 +#define LINUX_PCI_REGS_H
    8.27 +
    8.28 +/*
    8.29 + * Under PCI, each device has 256 bytes of configuration address space,
    8.30 + * of which the first 64 bytes are standardized as follows:
    8.31 + */
    8.32 +#define PCI_VENDOR_ID		0x00	/* 16 bits */
    8.33 +#define PCI_DEVICE_ID		0x02	/* 16 bits */
    8.34 +#define PCI_COMMAND		0x04	/* 16 bits */
    8.35 +#define  PCI_COMMAND_IO		0x1	/* Enable response in I/O space */
    8.36 +#define  PCI_COMMAND_MEMORY	0x2	/* Enable response in Memory space */
    8.37 +#define  PCI_COMMAND_MASTER	0x4	/* Enable bus mastering */
    8.38 +#define  PCI_COMMAND_SPECIAL	0x8	/* Enable response to special cycles */
    8.39 +#define  PCI_COMMAND_INVALIDATE	0x10	/* Use memory write and invalidate */
    8.40 +#define  PCI_COMMAND_VGA_PALETTE 0x20	/* Enable palette snooping */
    8.41 +#define  PCI_COMMAND_PARITY	0x40	/* Enable parity checking */
    8.42 +#define  PCI_COMMAND_WAIT 	0x80	/* Enable address/data stepping */
    8.43 +#define  PCI_COMMAND_SERR	0x100	/* Enable SERR */
    8.44 +#define  PCI_COMMAND_FAST_BACK	0x200	/* Enable back-to-back writes */
    8.45 +#define  PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
    8.46 +
    8.47 +#define PCI_STATUS		0x06	/* 16 bits */
    8.48 +#define  PCI_STATUS_CAP_LIST	0x10	/* Support Capability List */
    8.49 +#define  PCI_STATUS_66MHZ	0x20	/* Support 66 Mhz PCI 2.1 bus */
    8.50 +#define  PCI_STATUS_UDF		0x40	/* Support User Definable Features [obsolete] */
    8.51 +#define  PCI_STATUS_FAST_BACK	0x80	/* Accept fast-back to back */
    8.52 +#define  PCI_STATUS_PARITY	0x100	/* Detected parity error */
    8.53 +#define  PCI_STATUS_DEVSEL_MASK	0x600	/* DEVSEL timing */
    8.54 +#define  PCI_STATUS_DEVSEL_FAST		0x000
    8.55 +#define  PCI_STATUS_DEVSEL_MEDIUM	0x200
    8.56 +#define  PCI_STATUS_DEVSEL_SLOW		0x400
    8.57 +#define  PCI_STATUS_SIG_TARGET_ABORT	0x800 /* Set on target abort */
    8.58 +#define  PCI_STATUS_REC_TARGET_ABORT	0x1000 /* Master ack of " */
    8.59 +#define  PCI_STATUS_REC_MASTER_ABORT	0x2000 /* Set on master abort */
    8.60 +#define  PCI_STATUS_SIG_SYSTEM_ERROR	0x4000 /* Set when we drive SERR */
    8.61 +#define  PCI_STATUS_DETECTED_PARITY	0x8000 /* Set on parity error */
    8.62 +
    8.63 +#define PCI_CLASS_REVISION	0x08	/* High 24 bits are class, low 8 revision */
    8.64 +#define PCI_REVISION_ID		0x08	/* Revision ID */
    8.65 +#define PCI_CLASS_PROG		0x09	/* Reg. Level Programming Interface */
    8.66 +#define PCI_CLASS_DEVICE	0x0a	/* Device class */
    8.67 +
    8.68 +#define PCI_CACHE_LINE_SIZE	0x0c	/* 8 bits */
    8.69 +#define PCI_LATENCY_TIMER	0x0d	/* 8 bits */
    8.70 +#define PCI_HEADER_TYPE		0x0e	/* 8 bits */
    8.71 +#define  PCI_HEADER_TYPE_NORMAL		0
    8.72 +#define  PCI_HEADER_TYPE_BRIDGE		1
    8.73 +#define  PCI_HEADER_TYPE_CARDBUS	2
    8.74 +
    8.75 +#define PCI_BIST		0x0f	/* 8 bits */
    8.76 +#define  PCI_BIST_CODE_MASK	0x0f	/* Return result */
    8.77 +#define  PCI_BIST_START		0x40	/* 1 to start BIST, 2 secs or less */
    8.78 +#define  PCI_BIST_CAPABLE	0x80	/* 1 if BIST capable */
    8.79 +
    8.80 +/*
    8.81 + * Base addresses specify locations in memory or I/O space.
    8.82 + * Decoded size can be determined by writing a value of
    8.83 + * 0xffffffff to the register, and reading it back.  Only
    8.84 + * 1 bits are decoded.
    8.85 + */
    8.86 +#define PCI_BASE_ADDRESS_0	0x10	/* 32 bits */
    8.87 +#define PCI_BASE_ADDRESS_1	0x14	/* 32 bits [htype 0,1 only] */
    8.88 +#define PCI_BASE_ADDRESS_2	0x18	/* 32 bits [htype 0 only] */
    8.89 +#define PCI_BASE_ADDRESS_3	0x1c	/* 32 bits */
    8.90 +#define PCI_BASE_ADDRESS_4	0x20	/* 32 bits */
    8.91 +#define PCI_BASE_ADDRESS_5	0x24	/* 32 bits */
    8.92 +#define  PCI_BASE_ADDRESS_SPACE		0x01	/* 0 = memory, 1 = I/O */
    8.93 +#define  PCI_BASE_ADDRESS_SPACE_IO	0x01
    8.94 +#define  PCI_BASE_ADDRESS_SPACE_MEMORY	0x00
    8.95 +#define  PCI_BASE_ADDRESS_MEM_TYPE_MASK	0x06
    8.96 +#define  PCI_BASE_ADDRESS_MEM_TYPE_32	0x00	/* 32 bit address */
    8.97 +#define  PCI_BASE_ADDRESS_MEM_TYPE_1M	0x02	/* Below 1M [obsolete] */
    8.98 +#define  PCI_BASE_ADDRESS_MEM_TYPE_64	0x04	/* 64 bit address */
    8.99 +#define  PCI_BASE_ADDRESS_MEM_PREFETCH	0x08	/* prefetchable? */
   8.100 +#define  PCI_BASE_ADDRESS_MEM_MASK	(~0x0fUL)
   8.101 +#define  PCI_BASE_ADDRESS_IO_MASK	(~0x03UL)
   8.102 +/* bit 1 is reserved if address_space = 1 */
   8.103 +
   8.104 +/* Header type 0 (normal devices) */
   8.105 +#define PCI_CARDBUS_CIS		0x28
   8.106 +#define PCI_SUBSYSTEM_VENDOR_ID	0x2c
   8.107 +#define PCI_SUBSYSTEM_ID	0x2e
   8.108 +#define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
   8.109 +#define  PCI_ROM_ADDRESS_ENABLE	0x01
   8.110 +#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
   8.111 +
   8.112 +#define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
   8.113 +
   8.114 +/* 0x35-0x3b are reserved */
   8.115 +#define PCI_INTERRUPT_LINE	0x3c	/* 8 bits */
   8.116 +#define PCI_INTERRUPT_PIN	0x3d	/* 8 bits */
   8.117 +#define PCI_MIN_GNT		0x3e	/* 8 bits */
   8.118 +#define PCI_MAX_LAT		0x3f	/* 8 bits */
   8.119 +
   8.120 +/* Header type 1 (PCI-to-PCI bridges) */
   8.121 +#define PCI_PRIMARY_BUS		0x18	/* Primary bus number */
   8.122 +#define PCI_SECONDARY_BUS	0x19	/* Secondary bus number */
   8.123 +#define PCI_SUBORDINATE_BUS	0x1a	/* Highest bus number behind the bridge */
   8.124 +#define PCI_SEC_LATENCY_TIMER	0x1b	/* Latency timer for secondary interface */
   8.125 +#define PCI_IO_BASE		0x1c	/* I/O range behind the bridge */
   8.126 +#define PCI_IO_LIMIT		0x1d
   8.127 +#define  PCI_IO_RANGE_TYPE_MASK	0x0fUL	/* I/O bridging type */
   8.128 +#define  PCI_IO_RANGE_TYPE_16	0x00
   8.129 +#define  PCI_IO_RANGE_TYPE_32	0x01
   8.130 +#define  PCI_IO_RANGE_MASK	(~0x0fUL)
   8.131 +#define PCI_SEC_STATUS		0x1e	/* Secondary status register, only bit 14 used */
   8.132 +#define PCI_MEMORY_BASE		0x20	/* Memory range behind */
   8.133 +#define PCI_MEMORY_LIMIT	0x22
   8.134 +#define  PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
   8.135 +#define  PCI_MEMORY_RANGE_MASK	(~0x0fUL)
   8.136 +#define PCI_PREF_MEMORY_BASE	0x24	/* Prefetchable memory range behind */
   8.137 +#define PCI_PREF_MEMORY_LIMIT	0x26
   8.138 +#define  PCI_PREF_RANGE_TYPE_MASK 0x0fUL
   8.139 +#define  PCI_PREF_RANGE_TYPE_32	0x00
   8.140 +#define  PCI_PREF_RANGE_TYPE_64	0x01
   8.141 +#define  PCI_PREF_RANGE_MASK	(~0x0fUL)
   8.142 +#define PCI_PREF_BASE_UPPER32	0x28	/* Upper half of prefetchable memory range */
   8.143 +#define PCI_PREF_LIMIT_UPPER32	0x2c
   8.144 +#define PCI_IO_BASE_UPPER16	0x30	/* Upper half of I/O addresses */
   8.145 +#define PCI_IO_LIMIT_UPPER16	0x32
   8.146 +/* 0x34 same as for htype 0 */
   8.147 +/* 0x35-0x3b is reserved */
   8.148 +#define PCI_ROM_ADDRESS1	0x38	/* Same as PCI_ROM_ADDRESS, but for htype 1 */
   8.149 +/* 0x3c-0x3d are same as for htype 0 */
   8.150 +#define PCI_BRIDGE_CONTROL	0x3e
   8.151 +#define  PCI_BRIDGE_CTL_PARITY	0x01	/* Enable parity detection on secondary interface */
   8.152 +#define  PCI_BRIDGE_CTL_SERR	0x02	/* The same for SERR forwarding */
   8.153 +#define  PCI_BRIDGE_CTL_NO_ISA	0x04	/* Disable bridging of ISA ports */
   8.154 +#define  PCI_BRIDGE_CTL_VGA	0x08	/* Forward VGA addresses */
   8.155 +#define  PCI_BRIDGE_CTL_MASTER_ABORT	0x20  /* Report master aborts */
   8.156 +#define  PCI_BRIDGE_CTL_BUS_RESET	0x40	/* Secondary bus reset */
   8.157 +#define  PCI_BRIDGE_CTL_FAST_BACK	0x80	/* Fast Back2Back enabled on secondary interface */
   8.158 +
   8.159 +/* Header type 2 (CardBus bridges) */
   8.160 +#define PCI_CB_CAPABILITY_LIST	0x14
   8.161 +/* 0x15 reserved */
   8.162 +#define PCI_CB_SEC_STATUS	0x16	/* Secondary status */
   8.163 +#define PCI_CB_PRIMARY_BUS	0x18	/* PCI bus number */
   8.164 +#define PCI_CB_CARD_BUS		0x19	/* CardBus bus number */
   8.165 +#define PCI_CB_SUBORDINATE_BUS	0x1a	/* Subordinate bus number */
   8.166 +#define PCI_CB_LATENCY_TIMER	0x1b	/* CardBus latency timer */
   8.167 +#define PCI_CB_MEMORY_BASE_0	0x1c
   8.168 +#define PCI_CB_MEMORY_LIMIT_0	0x20
   8.169 +#define PCI_CB_MEMORY_BASE_1	0x24
   8.170 +#define PCI_CB_MEMORY_LIMIT_1	0x28
   8.171 +#define PCI_CB_IO_BASE_0	0x2c
   8.172 +#define PCI_CB_IO_BASE_0_HI	0x2e
   8.173 +#define PCI_CB_IO_LIMIT_0	0x30
   8.174 +#define PCI_CB_IO_LIMIT_0_HI	0x32
   8.175 +#define PCI_CB_IO_BASE_1	0x34
   8.176 +#define PCI_CB_IO_BASE_1_HI	0x36
   8.177 +#define PCI_CB_IO_LIMIT_1	0x38
   8.178 +#define PCI_CB_IO_LIMIT_1_HI	0x3a
   8.179 +#define  PCI_CB_IO_RANGE_MASK	(~0x03UL)
   8.180 +/* 0x3c-0x3d are same as for htype 0 */
   8.181 +#define PCI_CB_BRIDGE_CONTROL	0x3e
   8.182 +#define  PCI_CB_BRIDGE_CTL_PARITY	0x01	/* Similar to standard bridge control register */
   8.183 +#define  PCI_CB_BRIDGE_CTL_SERR		0x02
   8.184 +#define  PCI_CB_BRIDGE_CTL_ISA		0x04
   8.185 +#define  PCI_CB_BRIDGE_CTL_VGA		0x08
   8.186 +#define  PCI_CB_BRIDGE_CTL_MASTER_ABORT	0x20
   8.187 +#define  PCI_CB_BRIDGE_CTL_CB_RESET	0x40	/* CardBus reset */
   8.188 +#define  PCI_CB_BRIDGE_CTL_16BIT_INT	0x80	/* Enable interrupt for 16-bit cards */
   8.189 +#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100	/* Prefetch enable for both memory regions */
   8.190 +#define  PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
   8.191 +#define  PCI_CB_BRIDGE_CTL_POST_WRITES	0x400
   8.192 +#define PCI_CB_SUBSYSTEM_VENDOR_ID	0x40
   8.193 +#define PCI_CB_SUBSYSTEM_ID		0x42
   8.194 +#define PCI_CB_LEGACY_MODE_BASE		0x44	/* 16-bit PC Card legacy mode base address (ExCa) */
   8.195 +/* 0x48-0x7f reserved */
   8.196 +
   8.197 +/* Capability lists */
   8.198 +
   8.199 +#define PCI_CAP_LIST_ID		0	/* Capability ID */
   8.200 +#define  PCI_CAP_ID_PM		0x01	/* Power Management */
   8.201 +#define  PCI_CAP_ID_AGP		0x02	/* Accelerated Graphics Port */
   8.202 +#define  PCI_CAP_ID_VPD		0x03	/* Vital Product Data */
   8.203 +#define  PCI_CAP_ID_SLOTID	0x04	/* Slot Identification */
   8.204 +#define  PCI_CAP_ID_MSI		0x05	/* Message Signalled Interrupts */
   8.205 +#define  PCI_CAP_ID_CHSWP	0x06	/* CompactPCI HotSwap */
   8.206 +#define  PCI_CAP_ID_PCIX	0x07	/* PCI-X */
   8.207 +#define  PCI_CAP_ID_HT		0x08	/* HyperTransport */
   8.208 +#define  PCI_CAP_ID_VNDR	0x09	/* Vendor specific capability */
   8.209 +#define  PCI_CAP_ID_SHPC 	0x0C	/* PCI Standard Hot-Plug Controller */
   8.210 +#define  PCI_CAP_ID_EXP 	0x10	/* PCI Express */
   8.211 +#define  PCI_CAP_ID_MSIX	0x11	/* MSI-X */
   8.212 +#define PCI_CAP_LIST_NEXT	1	/* Next capability in the list */
   8.213 +#define PCI_CAP_FLAGS		2	/* Capability defined flags (16 bits) */
   8.214 +#define PCI_CAP_SIZEOF		4
   8.215 +
   8.216 +/* Power Management Registers */
   8.217 +
   8.218 +#define PCI_PM_PMC		2	/* PM Capabilities Register */
   8.219 +#define  PCI_PM_CAP_VER_MASK	0x0007	/* Version */
   8.220 +#define  PCI_PM_CAP_PME_CLOCK	0x0008	/* PME clock required */
   8.221 +#define  PCI_PM_CAP_RESERVED    0x0010  /* Reserved field */
   8.222 +#define  PCI_PM_CAP_DSI		0x0020	/* Device specific initialization */
   8.223 +#define  PCI_PM_CAP_AUX_POWER	0x01C0	/* Auxilliary power support mask */
   8.224 +#define  PCI_PM_CAP_D1		0x0200	/* D1 power state support */
   8.225 +#define  PCI_PM_CAP_D2		0x0400	/* D2 power state support */
   8.226 +#define  PCI_PM_CAP_PME		0x0800	/* PME pin supported */
   8.227 +#define  PCI_PM_CAP_PME_MASK	0xF800	/* PME Mask of all supported states */
   8.228 +#define  PCI_PM_CAP_PME_D0	0x0800	/* PME# from D0 */
   8.229 +#define  PCI_PM_CAP_PME_D1	0x1000	/* PME# from D1 */
   8.230 +#define  PCI_PM_CAP_PME_D2	0x2000	/* PME# from D2 */
   8.231 +#define  PCI_PM_CAP_PME_D3	0x4000	/* PME# from D3 (hot) */
   8.232 +#define  PCI_PM_CAP_PME_D3cold	0x8000	/* PME# from D3 (cold) */
   8.233 +#define PCI_PM_CTRL		4	/* PM control and status register */
   8.234 +#define  PCI_PM_CTRL_STATE_MASK	0x0003	/* Current power state (D0 to D3) */
   8.235 +#define  PCI_PM_CTRL_NO_SOFT_RESET	0x0004	/* No reset for D3hot->D0 */
   8.236 +#define  PCI_PM_CTRL_PME_ENABLE	0x0100	/* PME pin enable */
   8.237 +#define  PCI_PM_CTRL_DATA_SEL_MASK	0x1e00	/* Data select (??) */
   8.238 +#define  PCI_PM_CTRL_DATA_SCALE_MASK	0x6000	/* Data scale (??) */
   8.239 +#define  PCI_PM_CTRL_PME_STATUS	0x8000	/* PME pin status */
   8.240 +#define PCI_PM_PPB_EXTENSIONS	6	/* PPB support extensions (??) */
   8.241 +#define  PCI_PM_PPB_B2_B3	0x40	/* Stop clock when in D3hot (??) */
   8.242 +#define  PCI_PM_BPCC_ENABLE	0x80	/* Bus power/clock control enable (??) */
   8.243 +#define PCI_PM_DATA_REGISTER	7	/* (??) */
   8.244 +#define PCI_PM_SIZEOF		8
   8.245 +
   8.246 +/* AGP registers */
   8.247 +
   8.248 +#define PCI_AGP_VERSION		2	/* BCD version number */
   8.249 +#define PCI_AGP_RFU		3	/* Rest of capability flags */
   8.250 +#define PCI_AGP_STATUS		4	/* Status register */
   8.251 +#define  PCI_AGP_STATUS_RQ_MASK	0xff000000	/* Maximum number of requests - 1 */
   8.252 +#define  PCI_AGP_STATUS_SBA	0x0200	/* Sideband addressing supported */
   8.253 +#define  PCI_AGP_STATUS_64BIT	0x0020	/* 64-bit addressing supported */
   8.254 +#define  PCI_AGP_STATUS_FW	0x0010	/* FW transfers supported */
   8.255 +#define  PCI_AGP_STATUS_RATE4	0x0004	/* 4x transfer rate supported */
   8.256 +#define  PCI_AGP_STATUS_RATE2	0x0002	/* 2x transfer rate supported */
   8.257 +#define  PCI_AGP_STATUS_RATE1	0x0001	/* 1x transfer rate supported */
   8.258 +#define PCI_AGP_COMMAND		8	/* Control register */
   8.259 +#define  PCI_AGP_COMMAND_RQ_MASK 0xff000000  /* Master: Maximum number of requests */
   8.260 +#define  PCI_AGP_COMMAND_SBA	0x0200	/* Sideband addressing enabled */
   8.261 +#define  PCI_AGP_COMMAND_AGP	0x0100	/* Allow processing of AGP transactions */
   8.262 +#define  PCI_AGP_COMMAND_64BIT	0x0020 	/* Allow processing of 64-bit addresses */
   8.263 +#define  PCI_AGP_COMMAND_FW	0x0010 	/* Force FW transfers */
   8.264 +#define  PCI_AGP_COMMAND_RATE4	0x0004	/* Use 4x rate */
   8.265 +#define  PCI_AGP_COMMAND_RATE2	0x0002	/* Use 2x rate */
   8.266 +#define  PCI_AGP_COMMAND_RATE1	0x0001	/* Use 1x rate */
   8.267 +#define PCI_AGP_SIZEOF		12
   8.268 +
   8.269 +/* Vital Product Data */
   8.270 +
   8.271 +#define PCI_VPD_ADDR		2	/* Address to access (15 bits!) */
   8.272 +#define  PCI_VPD_ADDR_MASK	0x7fff	/* Address mask */
   8.273 +#define  PCI_VPD_ADDR_F		0x8000	/* Write 0, 1 indicates completion */
   8.274 +#define PCI_VPD_DATA		4	/* 32-bits of data returned here */
   8.275 +
   8.276 +/* Slot Identification */
   8.277 +
   8.278 +#define PCI_SID_ESR		2	/* Expansion Slot Register */
   8.279 +#define  PCI_SID_ESR_NSLOTS	0x1f	/* Number of expansion slots available */
   8.280 +#define  PCI_SID_ESR_FIC	0x20	/* First In Chassis Flag */
   8.281 +#define PCI_SID_CHASSIS_NR	3	/* Chassis Number */
   8.282 +
   8.283 +/* Message Signalled Interrupts registers */
   8.284 +
   8.285 +#define PCI_MSI_FLAGS		2	/* Various flags */
   8.286 +#define  PCI_MSI_FLAGS_64BIT	0x80	/* 64-bit addresses allowed */
   8.287 +#define  PCI_MSI_FLAGS_QSIZE	0x70	/* Message queue size configured */
   8.288 +#define  PCI_MSI_FLAGS_QMASK	0x0e	/* Maximum queue size available */
   8.289 +#define  PCI_MSI_FLAGS_ENABLE	0x01	/* MSI feature enabled */
   8.290 +#define  PCI_MSI_FLAGS_MASKBIT	0x100	/* 64-bit mask bits allowed */
   8.291 +#define PCI_MSI_RFU		3	/* Rest of capability flags */
   8.292 +#define PCI_MSI_ADDRESS_LO	4	/* Lower 32 bits */
   8.293 +#define PCI_MSI_ADDRESS_HI	8	/* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
   8.294 +#define PCI_MSI_DATA_32		8	/* 16 bits of data for 32-bit devices */
   8.295 +#define PCI_MSI_DATA_64		12	/* 16 bits of data for 64-bit devices */
   8.296 +#define PCI_MSI_MASK_BIT	16	/* Mask bits register */
   8.297 +
   8.298 +/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
   8.299 +#define PCI_MSIX_FLAGS		2
   8.300 +#define  PCI_MSIX_FLAGS_QSIZE	0x7FF
   8.301 +#define  PCI_MSIX_FLAGS_ENABLE	(1 << 15)
   8.302 +#define  PCI_MSIX_FLAGS_MASKALL	(1 << 14)
   8.303 +#define PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
   8.304 +#define PCI_MSIX_FLAGS_BITMASK	(1 << 0)
   8.305 +
   8.306 +/* CompactPCI Hotswap Register */
   8.307 +
   8.308 +#define PCI_CHSWP_CSR		2	/* Control and Status Register */
   8.309 +#define  PCI_CHSWP_DHA		0x01	/* Device Hiding Arm */
   8.310 +#define  PCI_CHSWP_EIM		0x02	/* ENUM# Signal Mask */
   8.311 +#define  PCI_CHSWP_PIE		0x04	/* Pending Insert or Extract */
   8.312 +#define  PCI_CHSWP_LOO		0x08	/* LED On / Off */
   8.313 +#define  PCI_CHSWP_PI		0x30	/* Programming Interface */
   8.314 +#define  PCI_CHSWP_EXT		0x40	/* ENUM# status - extraction */
   8.315 +#define  PCI_CHSWP_INS		0x80	/* ENUM# status - insertion */
   8.316 +
   8.317 +/* PCI-X registers */
   8.318 +
   8.319 +#define PCI_X_CMD		2	/* Modes & Features */
   8.320 +#define  PCI_X_CMD_DPERR_E	0x0001	/* Data Parity Error Recovery Enable */
   8.321 +#define  PCI_X_CMD_ERO		0x0002	/* Enable Relaxed Ordering */
   8.322 +#define  PCI_X_CMD_MAX_READ	0x000c	/* Max Memory Read Byte Count */
   8.323 +#define  PCI_X_CMD_MAX_SPLIT	0x0070	/* Max Outstanding Split Transactions */
   8.324 +#define  PCI_X_CMD_VERSION(x) 	(((x) >> 12) & 3) /* Version */
   8.325 +#define PCI_X_STATUS		4	/* PCI-X capabilities */
   8.326 +#define  PCI_X_STATUS_DEVFN	0x000000ff	/* A copy of devfn */
   8.327 +#define  PCI_X_STATUS_BUS	0x0000ff00	/* A copy of bus nr */
   8.328 +#define  PCI_X_STATUS_64BIT	0x00010000	/* 64-bit device */
   8.329 +#define  PCI_X_STATUS_133MHZ	0x00020000	/* 133 MHz capable */
   8.330 +#define  PCI_X_STATUS_SPL_DISC	0x00040000	/* Split Completion Discarded */
   8.331 +#define  PCI_X_STATUS_UNX_SPL	0x00080000	/* Unexpected Split Completion */
   8.332 +#define  PCI_X_STATUS_COMPLEX	0x00100000	/* Device Complexity */
   8.333 +#define  PCI_X_STATUS_MAX_READ	0x00600000	/* Designed Max Memory Read Count */
   8.334 +#define  PCI_X_STATUS_MAX_SPLIT	0x03800000	/* Designed Max Outstanding Split Transactions */
   8.335 +#define  PCI_X_STATUS_MAX_CUM	0x1c000000	/* Designed Max Cumulative Read Size */
   8.336 +#define  PCI_X_STATUS_SPL_ERR	0x20000000	/* Rcvd Split Completion Error Msg */
   8.337 +#define  PCI_X_STATUS_266MHZ	0x40000000	/* 266 MHz capable */
   8.338 +#define  PCI_X_STATUS_533MHZ	0x80000000	/* 533 MHz capable */
   8.339 +
   8.340 +/* PCI Express capability registers */
   8.341 +
   8.342 +#define PCI_EXP_FLAGS		2	/* Capabilities register */
   8.343 +#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
   8.344 +#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
   8.345 +#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
   8.346 +#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
   8.347 +#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
   8.348 +#define  PCI_EXP_TYPE_UPSTREAM	0x5	/* Upstream Port */
   8.349 +#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
   8.350 +#define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
   8.351 +#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
   8.352 +#define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
   8.353 +#define PCI_EXP_DEVCAP		4	/* Device capabilities */
   8.354 +#define  PCI_EXP_DEVCAP_PAYLOAD	0x07	/* Max_Payload_Size */
   8.355 +#define  PCI_EXP_DEVCAP_PHANTOM	0x18	/* Phantom functions */
   8.356 +#define  PCI_EXP_DEVCAP_EXT_TAG	0x20	/* Extended tags */
   8.357 +#define  PCI_EXP_DEVCAP_L0S	0x1c0	/* L0s Acceptable Latency */
   8.358 +#define  PCI_EXP_DEVCAP_L1	0xe00	/* L1 Acceptable Latency */
   8.359 +#define  PCI_EXP_DEVCAP_ATN_BUT	0x1000	/* Attention Button Present */
   8.360 +#define  PCI_EXP_DEVCAP_ATN_IND	0x2000	/* Attention Indicator Present */
   8.361 +#define  PCI_EXP_DEVCAP_PWR_IND	0x4000	/* Power Indicator Present */
   8.362 +#define  PCI_EXP_DEVCAP_PWR_VAL	0x3fc0000 /* Slot Power Limit Value */
   8.363 +#define  PCI_EXP_DEVCAP_PWR_SCL	0xc000000 /* Slot Power Limit Scale */
   8.364 +#define PCI_EXP_DEVCTL		8	/* Device Control */
   8.365 +#define  PCI_EXP_DEVCTL_CERE	0x0001	/* Correctable Error Reporting En. */
   8.366 +#define  PCI_EXP_DEVCTL_NFERE	0x0002	/* Non-Fatal Error Reporting Enable */
   8.367 +#define  PCI_EXP_DEVCTL_FERE	0x0004	/* Fatal Error Reporting Enable */
   8.368 +#define  PCI_EXP_DEVCTL_URRE	0x0008	/* Unsupported Request Reporting En. */
   8.369 +#define  PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
   8.370 +#define  PCI_EXP_DEVCTL_PAYLOAD	0x00e0	/* Max_Payload_Size */
   8.371 +#define  PCI_EXP_DEVCTL_EXT_TAG	0x0100	/* Extended Tag Field Enable */
   8.372 +#define  PCI_EXP_DEVCTL_PHANTOM	0x0200	/* Phantom Functions Enable */
   8.373 +#define  PCI_EXP_DEVCTL_AUX_PME	0x0400	/* Auxiliary Power PM Enable */
   8.374 +#define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
   8.375 +#define  PCI_EXP_DEVCTL_READRQ	0x7000	/* Max_Read_Request_Size */
   8.376 +#define PCI_EXP_DEVSTA		10	/* Device Status */
   8.377 +#define  PCI_EXP_DEVSTA_CED	0x01	/* Correctable Error Detected */
   8.378 +#define  PCI_EXP_DEVSTA_NFED	0x02	/* Non-Fatal Error Detected */
   8.379 +#define  PCI_EXP_DEVSTA_FED	0x04	/* Fatal Error Detected */
   8.380 +#define  PCI_EXP_DEVSTA_URD	0x08	/* Unsupported Request Detected */
   8.381 +#define  PCI_EXP_DEVSTA_AUXPD	0x10	/* AUX Power Detected */
   8.382 +#define  PCI_EXP_DEVSTA_TRPND	0x20	/* Transactions Pending */
   8.383 +#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
   8.384 +#define PCI_EXP_LNKCTL		16	/* Link Control */
   8.385 +#define  PCI_EXP_LNKCTL_CLKREQ_EN 0x100	/* Enable clkreq */
   8.386 +#define PCI_EXP_LNKSTA		18	/* Link Status */
   8.387 +#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
   8.388 +#define PCI_EXP_SLTCTL		24	/* Slot Control */
   8.389 +#define PCI_EXP_SLTSTA		26	/* Slot Status */
   8.390 +#define PCI_EXP_RTCTL		28	/* Root Control */
   8.391 +#define  PCI_EXP_RTCTL_SECEE	0x01	/* System Error on Correctable Error */
   8.392 +#define  PCI_EXP_RTCTL_SENFEE	0x02	/* System Error on Non-Fatal Error */
   8.393 +#define  PCI_EXP_RTCTL_SEFEE	0x04	/* System Error on Fatal Error */
   8.394 +#define  PCI_EXP_RTCTL_PMEIE	0x08	/* PME Interrupt Enable */
   8.395 +#define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
   8.396 +#define PCI_EXP_RTCAP		30	/* Root Capabilities */
   8.397 +#define PCI_EXP_RTSTA		32	/* Root Status */
   8.398 +
   8.399 +/* Extended Capabilities (PCI-X 2.0 and Express) */
   8.400 +#define PCI_EXT_CAP_ID(header)		(header & 0x0000ffff)
   8.401 +#define PCI_EXT_CAP_VER(header)		((header >> 16) & 0xf)
   8.402 +#define PCI_EXT_CAP_NEXT(header)	((header >> 20) & 0xffc)
   8.403 +
   8.404 +#define PCI_EXT_CAP_ID_ERR	1
   8.405 +#define PCI_EXT_CAP_ID_VC	2
   8.406 +#define PCI_EXT_CAP_ID_DSN	3
   8.407 +#define PCI_EXT_CAP_ID_PWR	4
   8.408 +
   8.409 +/* Advanced Error Reporting */
   8.410 +#define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
   8.411 +#define  PCI_ERR_UNC_TRAIN	0x00000001	/* Training */
   8.412 +#define  PCI_ERR_UNC_DLP	0x00000010	/* Data Link Protocol */
   8.413 +#define  PCI_ERR_UNC_POISON_TLP	0x00001000	/* Poisoned TLP */
   8.414 +#define  PCI_ERR_UNC_FCP	0x00002000	/* Flow Control Protocol */
   8.415 +#define  PCI_ERR_UNC_COMP_TIME	0x00004000	/* Completion Timeout */
   8.416 +#define  PCI_ERR_UNC_COMP_ABORT	0x00008000	/* Completer Abort */
   8.417 +#define  PCI_ERR_UNC_UNX_COMP	0x00010000	/* Unexpected Completion */
   8.418 +#define  PCI_ERR_UNC_RX_OVER	0x00020000	/* Receiver Overflow */
   8.419 +#define  PCI_ERR_UNC_MALF_TLP	0x00040000	/* Malformed TLP */
   8.420 +#define  PCI_ERR_UNC_ECRC	0x00080000	/* ECRC Error Status */
   8.421 +#define  PCI_ERR_UNC_UNSUP	0x00100000	/* Unsupported Request */
   8.422 +#define PCI_ERR_UNCOR_MASK	8	/* Uncorrectable Error Mask */
   8.423 +	/* Same bits as above */
   8.424 +#define PCI_ERR_UNCOR_SEVER	12	/* Uncorrectable Error Severity */
   8.425 +	/* Same bits as above */
   8.426 +#define PCI_ERR_COR_STATUS	16	/* Correctable Error Status */
   8.427 +#define  PCI_ERR_COR_RCVR	0x00000001	/* Receiver Error Status */
   8.428 +#define  PCI_ERR_COR_BAD_TLP	0x00000040	/* Bad TLP Status */
   8.429 +#define  PCI_ERR_COR_BAD_DLLP	0x00000080	/* Bad DLLP Status */
   8.430 +#define  PCI_ERR_COR_REP_ROLL	0x00000100	/* REPLAY_NUM Rollover */
   8.431 +#define  PCI_ERR_COR_REP_TIMER	0x00001000	/* Replay Timer Timeout */
   8.432 +#define PCI_ERR_COR_MASK	20	/* Correctable Error Mask */
   8.433 +	/* Same bits as above */
   8.434 +#define PCI_ERR_CAP		24	/* Advanced Error Capabilities */
   8.435 +#define  PCI_ERR_CAP_FEP(x)	((x) & 31)	/* First Error Pointer */
   8.436 +#define  PCI_ERR_CAP_ECRC_GENC	0x00000020	/* ECRC Generation Capable */
   8.437 +#define  PCI_ERR_CAP_ECRC_GENE	0x00000040	/* ECRC Generation Enable */
   8.438 +#define  PCI_ERR_CAP_ECRC_CHKC	0x00000080	/* ECRC Check Capable */
   8.439 +#define  PCI_ERR_CAP_ECRC_CHKE	0x00000100	/* ECRC Check Enable */
   8.440 +#define PCI_ERR_HEADER_LOG	28	/* Header Log Register (16 bytes) */
   8.441 +#define PCI_ERR_ROOT_COMMAND	44	/* Root Error Command */
   8.442 +/* Correctable Err Reporting Enable */
   8.443 +#define PCI_ERR_ROOT_CMD_COR_EN		0x00000001
   8.444 +/* Non-fatal Err Reporting Enable */
   8.445 +#define PCI_ERR_ROOT_CMD_NONFATAL_EN	0x00000002
   8.446 +/* Fatal Err Reporting Enable */
   8.447 +#define PCI_ERR_ROOT_CMD_FATAL_EN	0x00000004
   8.448 +#define PCI_ERR_ROOT_STATUS	48
   8.449 +#define PCI_ERR_ROOT_COR_RCV		0x00000001	/* ERR_COR Received */
   8.450 +/* Multi ERR_COR Received */
   8.451 +#define PCI_ERR_ROOT_MULTI_COR_RCV	0x00000002
   8.452 +/* ERR_FATAL/NONFATAL Recevied */
   8.453 +#define PCI_ERR_ROOT_UNCOR_RCV		0x00000004
   8.454 +/* Multi ERR_FATAL/NONFATAL Recevied */
   8.455 +#define PCI_ERR_ROOT_MULTI_UNCOR_RCV	0x00000008
   8.456 +#define PCI_ERR_ROOT_FIRST_FATAL	0x00000010	/* First Fatal */
   8.457 +#define PCI_ERR_ROOT_NONFATAL_RCV	0x00000020	/* Non-Fatal Received */
   8.458 +#define PCI_ERR_ROOT_FATAL_RCV		0x00000040	/* Fatal Received */
   8.459 +#define PCI_ERR_ROOT_COR_SRC	52
   8.460 +#define PCI_ERR_ROOT_SRC	54
   8.461 +
   8.462 +/* Virtual Channel */
   8.463 +#define PCI_VC_PORT_REG1	4
   8.464 +#define PCI_VC_PORT_REG2	8
   8.465 +#define PCI_VC_PORT_CTRL	12
   8.466 +#define PCI_VC_PORT_STATUS	14
   8.467 +#define PCI_VC_RES_CAP		16
   8.468 +#define PCI_VC_RES_CTRL		20
   8.469 +#define PCI_VC_RES_STATUS	26
   8.470 +
   8.471 +/* Power Budgeting */
   8.472 +#define PCI_PWR_DSR		4	/* Data Select Register */
   8.473 +#define PCI_PWR_DATA		8	/* Data Register */
   8.474 +#define  PCI_PWR_DATA_BASE(x)	((x) & 0xff)	    /* Base Power */
   8.475 +#define  PCI_PWR_DATA_SCALE(x)	(((x) >> 8) & 3)    /* Data Scale */
   8.476 +#define  PCI_PWR_DATA_PM_SUB(x)	(((x) >> 10) & 7)   /* PM Sub State */
   8.477 +#define  PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
   8.478 +#define  PCI_PWR_DATA_TYPE(x)	(((x) >> 15) & 7)   /* Type */
   8.479 +#define  PCI_PWR_DATA_RAIL(x)	(((x) >> 18) & 7)   /* Power Rail */
   8.480 +#define PCI_PWR_CAP		12	/* Capability */
   8.481 +#define  PCI_PWR_CAP_BUDGET(x)	((x) & 1)	/* Included in system budget */
   8.482 +
   8.483 +/*
   8.484 + * Hypertransport sub capability types
   8.485 + *
   8.486 + * Unfortunately there are both 3 bit and 5 bit capability types defined
   8.487 + * in the HT spec, catering for that is a little messy. You probably don't
   8.488 + * want to use these directly, just use pci_find_ht_capability() and it
   8.489 + * will do the right thing for you.
   8.490 + */
   8.491 +#define HT_3BIT_CAP_MASK	0xE0
   8.492 +#define HT_CAPTYPE_SLAVE	0x00	/* Slave/Primary link configuration */
   8.493 +#define HT_CAPTYPE_HOST		0x20	/* Host/Secondary link configuration */
   8.494 +
   8.495 +#define HT_5BIT_CAP_MASK	0xF8
   8.496 +#define HT_CAPTYPE_IRQ		0x80	/* IRQ Configuration */
   8.497 +#define HT_CAPTYPE_REMAPPING_40	0xA0	/* 40 bit address remapping */
   8.498 +#define HT_CAPTYPE_REMAPPING_64 0xA2	/* 64 bit address remapping */
   8.499 +#define HT_CAPTYPE_UNITID_CLUMP	0x90	/* Unit ID clumping */
   8.500 +#define HT_CAPTYPE_EXTCONF	0x98	/* Extended Configuration Space Access */
   8.501 +#define HT_CAPTYPE_MSI_MAPPING	0xA8	/* MSI Mapping Capability */
   8.502 +#define  HT_MSI_FLAGS		0x02		/* Offset to flags */
   8.503 +#define  HT_MSI_FLAGS_ENABLE	0x1		/* Mapping enable */
   8.504 +#define  HT_MSI_FLAGS_FIXED	0x2		/* Fixed mapping only */
   8.505 +#define  HT_MSI_FIXED_ADDR	0x00000000FEE00000ULL	/* Fixed addr */
   8.506 +#define  HT_MSI_ADDR_LO		0x04		/* Offset to low addr bits */
   8.507 +#define  HT_MSI_ADDR_LO_MASK	0xFFF00000	/* Low address bit mask */
   8.508 +#define  HT_MSI_ADDR_HI		0x08		/* Offset to high addr bits */
   8.509 +#define HT_CAPTYPE_DIRECT_ROUTE	0xB0	/* Direct routing configuration */
   8.510 +#define HT_CAPTYPE_VCSET	0xB8	/* Virtual Channel configuration */
   8.511 +#define HT_CAPTYPE_ERROR_RETRY	0xC0	/* Retry on error configuration */
   8.512 +#define HT_CAPTYPE_GEN3		0xD0	/* Generation 3 hypertransport configuration */
   8.513 +#define HT_CAPTYPE_PM		0xE0	/* Hypertransport powermanagement configuration */
   8.514 +
   8.515 +
   8.516 +#endif /* LINUX_PCI_REGS_H */
     9.1 --- a/xen/arch/x86/setup.c	Fri Sep 21 17:10:00 2007 +0100
     9.2 +++ b/xen/arch/x86/setup.c	Fri Sep 21 17:15:47 2007 +0100
     9.3 @@ -1040,6 +1040,8 @@ void __init __start_xen(unsigned long mb
     9.4  
     9.5      iommu_setup();
     9.6  
     9.7 +    amd_iommu_detect();
     9.8 +
     9.9      /*
    9.10       * We're going to setup domain0 using the module(s) that we stashed safely
    9.11       * above our heap. The second module, if present, is an initrd ramdisk.
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/include/asm-x86/amd-iommu.h	Fri Sep 21 17:15:47 2007 +0100
    10.3 @@ -0,0 +1,70 @@
    10.4 +/*
    10.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    10.6 + * Author: Leo Duran <leo.duran@amd.com>
    10.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    10.8 + *
    10.9 + * This program is free software; you can redistribute it and/or modify
   10.10 + * it under the terms of the GNU General Public License as published by
   10.11 + * the Free Software Foundation; either version 2 of the License, or
   10.12 + * (at your option) any later version.
   10.13 + *
   10.14 + * This program is distributed in the hope that it will be useful,
   10.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   10.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   10.17 + * GNU General Public License for more details.
   10.18 + *
   10.19 + * You should have received a copy of the GNU General Public License
   10.20 + * along with this program; if not, write to the Free Software
   10.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   10.22 + */
   10.23 +#ifndef _ASM_X86_64_AMD_IOMMU_H
   10.24 +#define _ASM_X86_64_AMD_IOMMU_H
   10.25 +
   10.26 +#include <xen/init.h>
   10.27 +#include <xen/types.h>
   10.28 +#include <xen/spinlock.h>
   10.29 +#include <xen/mm.h>
   10.30 +#include <asm/hvm/svm/amd-iommu-defs.h>
   10.31 +
   10.32 +#define iommu_found()           (!list_empty(&amd_iommu_head))
   10.33 +
   10.34 +extern int amd_iommu_enabled;
   10.35 +extern struct list_head amd_iommu_head;
   10.36 +
   10.37 +extern int __init amd_iommu_detect(void);
   10.38 +
   10.39 +struct table_struct {
   10.40 +    void *buffer;
   10.41 +    unsigned long entries;
   10.42 +    unsigned long alloc_size;
   10.43 +};
   10.44 +
   10.45 +struct amd_iommu {
   10.46 +    struct list_head list;
   10.47 +    spinlock_t lock; /* protect iommu */
   10.48 +
   10.49 +    int iotlb_support;
   10.50 +    int ht_tunnel_support;
   10.51 +    int not_present_cached;
   10.52 +    u8  revision;
   10.53 +
   10.54 +    u8  root_bus;
   10.55 +    u8  first_devfn;
   10.56 +    u8  last_devfn;
   10.57 +
   10.58 +    int last_downstream_bus;
   10.59 +    int downstream_bus_present[PCI_MAX_BUS_COUNT];
   10.60 +
   10.61 +    void *mmio_base;
   10.62 +    unsigned long mmio_base_phys;
   10.63 +
   10.64 +    struct table_struct dev_table;
   10.65 +    struct table_struct cmd_buffer;
   10.66 +    u32 cmd_buffer_tail;
   10.67 +
   10.68 +    int exclusion_enabled;
   10.69 +    unsigned long exclusion_base;
   10.70 +    unsigned long exclusion_limit;
   10.71 +};
   10.72 +
   10.73 +#endif /* _ASM_X86_64_AMD_IOMMU_H */
    11.1 --- a/xen/include/asm-x86/fixmap.h	Fri Sep 21 17:10:00 2007 +0100
    11.2 +++ b/xen/include/asm-x86/fixmap.h	Fri Sep 21 17:15:47 2007 +0100
    11.3 @@ -18,6 +18,7 @@
    11.4  #include <asm/page.h>
    11.5  #include <xen/kexec.h>
    11.6  #include <asm/iommu.h>
    11.7 +#include <asm/amd-iommu.h>
    11.8  
    11.9  /*
   11.10   * Here we define all the compile-time 'special' virtual
   11.11 @@ -43,6 +44,8 @@ enum fixed_addresses {
   11.12        + ((KEXEC_XEN_NO_PAGES >> 1) * KEXEC_IMAGE_NR) - 1,
   11.13      FIX_IOMMU_REGS_BASE_0,
   11.14      FIX_IOMMU_REGS_END = FIX_IOMMU_REGS_BASE_0 + MAX_IOMMUS-1,
   11.15 +    FIX_IOMMU_MMIO_BASE_0,
   11.16 +    FIX_IOMMU_MMIO_END = FIX_IOMMU_MMIO_BASE_0 + IOMMU_PAGES -1,
   11.17      __end_of_fixed_addresses
   11.18  };
   11.19  
    12.1 --- a/xen/include/asm-x86/hvm/iommu.h	Fri Sep 21 17:10:00 2007 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/iommu.h	Fri Sep 21 17:15:47 2007 +0100
    12.3 @@ -42,6 +42,11 @@ struct hvm_iommu {
    12.4      spinlock_t mapping_lock;       /* io page table lock */
    12.5      int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
    12.6      struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
    12.7 +
    12.8 +    /* amd iommu support */
    12.9 +    int domain_id;
   12.10 +    int paging_mode;
   12.11 +    void *root_table;
   12.12  };
   12.13  
   12.14  #endif // __ASM_X86_HVM_IOMMU_H__
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h	Fri Sep 21 17:15:47 2007 +0100
    13.3 @@ -0,0 +1,419 @@
    13.4 +/*
    13.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    13.6 + * Author: Leo Duran <leo.duran@amd.com>
    13.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    13.8 + *
    13.9 + * This program is free software; you can redistribute it and/or modify
   13.10 + * it under the terms of the GNU General Public License as published by
   13.11 + * the Free Software Foundation; either version 2 of the License, or
   13.12 + * (at your option) any later version.
   13.13 + *
   13.14 + * This program is distributed in the hope that it will be useful,
   13.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   13.17 + * GNU General Public License for more details.
   13.18 + *
   13.19 + * You should have received a copy of the GNU General Public License
   13.20 + * along with this program; if not, write to the Free Software
   13.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   13.22 + */
   13.23 +
   13.24 +#ifndef _ASM_X86_64_AMD_IOMMU_DEFS_H
   13.25 +#define _ASM_X86_64_AMD_IOMMU_DEFS_H
   13.26 +
   13.27 +/* Reserve some non-mapped pages to handle error conditions.
   13.28 + * 'bad_dma_address' will point to these reserved pages, and
   13.29 + * the mapping funtions will return 'bad_dma_address' if there
   13.30 + * are not enough page table entries available.
   13.31 + */
   13.32 +#define IOMMU_RESERVED_BASE_ADDR	0
   13.33 +#define IOMMU_RESERVED_PAGES		32
   13.34 +
   13.35 +/* IOMMU ComWaitInt polling after issuing a COMPLETION_WAIT command */
   13.36 +#define COMPLETION_WAIT_DEFAULT_POLLING_COUNT	10
   13.37 +
   13.38 +/* IOMMU Command Buffer entries: in power of 2 increments, minimum of 256 */
   13.39 +#define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES	512
   13.40 +
   13.41 +#define BITMAP_ENTRIES_PER_BYTE		8
   13.42 +
   13.43 +#define PTE_PER_TABLE_SHIFT		9
   13.44 +#define PTE_PER_TABLE_SIZE		(1 << PTE_PER_TABLE_SHIFT)
   13.45 +#define PTE_PER_TABLE_MASK		(~(PTE_PER_TABLE_SIZE - 1))
   13.46 +#define PTE_PER_TABLE_ALIGN(entries) 	\
   13.47 +	(((entries) + PTE_PER_TABLE_SIZE - 1) & PTE_PER_TABLE_MASK)
   13.48 +#define PTE_PER_TABLE_ALLOC(entries)	\
   13.49 +	PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT)
   13.50 +
   13.51 +/* 0-based aperture order (represents virtual address space for DMA mappings */
   13.52 +#define APERTURE_ORDER_FOR_32B_APERTURE		0
   13.53 +#define APERTURE_ORDER_FOR_64MB_APERTURE	1
   13.54 +#define APERTURE_ORDER_FOR_128MB_APERTURE	2
   13.55 +#define APERTURE_ORDER_FOR_256MB_APERTURE	3
   13.56 +#define APERTURE_ORDER_FOR_512MB_APERTURE	4
   13.57 +#define APERTURE_ORDER_FOR_1GB_APERTURE		5
   13.58 +#define APERTURE_ORDER_FOR_MAX_APERTURE		APERTURE_ORDER_FOR_1GB_APERTURE
   13.59 +
   13.60 +/* The minimum 32MB aperture requires 2**13 level-1 page table entries */
   13.61 +#define SHIFT_FOR_MIN_APERTURE		13
   13.62 +#define PAGES_FROM_APERTURE_ORDER(order)	\
   13.63 +	((1 << (order)) << SHIFT_FOR_MIN_APERTURE)
   13.64 +#define ORDER_FROM_APERTURE_PAGES(pages)	\
   13.65 +	get_order(((pages) * PAGE_SIZE) >> SHIFT_FOR_MIN_APERTURE)
   13.66 +
   13.67 +/*
   13.68 + * PCI config-space
   13.69 + */
   13.70 +#define VALID_PCI_VENDOR_ID(id)		(((id) != 0) && ((id) != 0xFFFF))
   13.71 +#define IS_PCI_MULTI_FUNCTION(hdr)	((hdr) & 0x80)
   13.72 +#define IS_PCI_TYPE0_HEADER(hdr)	(((hdr) & 0x7f) == 0)
   13.73 +#define IS_PCI_TYPE1_HEADER(hdr)	(((hdr) & 0x7f) == 1)
   13.74 +
   13.75 +#define PCI_MAX_BUS_COUNT	256
   13.76 +#define PCI_MAX_DEV_COUNT	32
   13.77 +#define PCI_MAX_FUNC_COUNT	8
   13.78 +#define PCI_MIN_DEVFN		0
   13.79 +#define PCI_MAX_DEVFN		0xFF
   13.80 +
   13.81 +/*
   13.82 + * Capability blocks are 4-byte aligned, and must start at >= offset 0x40,
   13.83 + * for a max of 48 possible cap_blocks (256 - 0x40 = 192; 192 / 4 = 48)
   13.84 + * The lower 2 bits of each pointer are reserved, and must be masked off.
   13.85 + */
   13.86 +#define PCI_MIN_CAP_OFFSET	0x40
   13.87 +#define PCI_MAX_CAP_BLOCKS	48
   13.88 +#define PCI_CAP_PTR_MASK	0xFC
   13.89 +
   13.90 +/* IOMMU Capability */
   13.91 +#define PCI_CAP_ID_MASK		0x000000FF
   13.92 +#define PCI_CAP_ID_SHIFT	0
   13.93 +#define PCI_CAP_NEXT_PTR_MASK	0x0000FF00
   13.94 +#define PCI_CAP_NEXT_PTR_SHIFT	8
   13.95 +#define PCI_CAP_TYPE_MASK	0x00070000
   13.96 +#define PCI_CAP_TYPE_SHIFT	16
   13.97 +#define PCI_CAP_REV_MASK	0x00F80000
   13.98 +#define PCI_CAP_REV_SHIFT	19
   13.99 +#define PCI_CAP_IOTLB_MASK	0x01000000
  13.100 +#define PCI_CAP_IOTLB_SHIFT	24
  13.101 +#define PCI_CAP_HT_TUNNEL_MASK	0x02000000
  13.102 +#define PCI_CAP_HT_TUNNEL_SHIFT	25
  13.103 +#define PCI_CAP_NP_CACHE_MASK	0x04000000
  13.104 +#define PCI_CAP_NP_CACHE_SHIFT	26
  13.105 +#define PCI_CAP_RESET_MASK	0x80000000
  13.106 +#define PCI_CAP_RESET_SHIFT	31
  13.107 +
  13.108 +#define PCI_CAP_ID_SECURE_DEVICE	0x0F
  13.109 +#define PCI_CAP_TYPE_IOMMU		0x3
  13.110 +
  13.111 +#define PCI_CAP_MMIO_BAR_LOW_OFFSET	0x04
  13.112 +#define PCI_CAP_MMIO_BAR_HIGH_OFFSET	0x08
  13.113 +#define PCI_CAP_MMIO_BAR_LOW_MASK	0xFFFFC000
  13.114 +#define IOMMU_MMIO_REGION_LENGTH	0x4000
  13.115 +
  13.116 +#define PCI_CAP_RANGE_OFFSET		0x0C
  13.117 +#define PCI_CAP_BUS_NUMBER_MASK		0x0000FF00
  13.118 +#define PCI_CAP_BUS_NUMBER_SHIFT	8
  13.119 +#define PCI_CAP_FIRST_DEVICE_MASK	0x00FF0000
  13.120 +#define PCI_CAP_FIRST_DEVICE_SHIFT	16
  13.121 +#define PCI_CAP_LAST_DEVICE_MASK	0xFF000000
  13.122 +#define PCI_CAP_LAST_DEVICE_SHIFT	24
  13.123 +
  13.124 +/* Device Table */
  13.125 +#define IOMMU_DEV_TABLE_BASE_LOW_OFFSET		0x00
  13.126 +#define IOMMU_DEV_TABLE_BASE_HIGH_OFFSET	0x04
  13.127 +#define IOMMU_DEV_TABLE_BASE_LOW_MASK		0xFFFFF000
  13.128 +#define IOMMU_DEV_TABLE_BASE_LOW_SHIFT		12
  13.129 +#define IOMMU_DEV_TABLE_BASE_HIGH_MASK		0x000FFFFF
  13.130 +#define IOMMU_DEV_TABLE_BASE_HIGH_SHIFT		0
  13.131 +#define IOMMU_DEV_TABLE_SIZE_MASK		0x000001FF
  13.132 +#define IOMMU_DEV_TABLE_SIZE_SHIFT		0
  13.133 +
  13.134 +#define IOMMU_DEV_TABLE_ENTRIES_PER_BUS		256
  13.135 +#define IOMMU_DEV_TABLE_ENTRY_SIZE		32
  13.136 +#define IOMMU_DEV_TABLE_U32_PER_ENTRY		(IOMMU_DEV_TABLE_ENTRY_SIZE / 4)
  13.137 +
  13.138 +#define IOMMU_DEV_TABLE_SYS_MGT_DMA_ABORTED	0x0
  13.139 +#define IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED	0x1
  13.140 +#define IOMMU_DEV_TABLE_SYS_MGT_INT_FORWARDED	0x2
  13.141 +#define IOMMU_DEV_TABLE_SYS_MGT_DMA_FORWARDED	0x3
  13.142 +
  13.143 +#define IOMMU_DEV_TABLE_IO_CONTROL_ABORTED	0x0
  13.144 +#define IOMMU_DEV_TABLE_IO_CONTROL_FORWARDED	0x1
  13.145 +#define IOMMU_DEV_TABLE_IO_CONTROL_TRANSLATED	0x2
  13.146 +
  13.147 +#define IOMMU_DEV_TABLE_INT_CONTROL_ABORTED	0x0
  13.148 +#define IOMMU_DEV_TABLE_INT_CONTROL_FORWARDED	0x1
  13.149 +#define IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED	0x2
  13.150 +
  13.151 +/* DeviceTable Entry[31:0] */
  13.152 +#define IOMMU_DEV_TABLE_VALID_MASK			0x00000001
  13.153 +#define IOMMU_DEV_TABLE_VALID_SHIFT			0
  13.154 +#define IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK		0x00000002
  13.155 +#define IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT		1
  13.156 +#define IOMMU_DEV_TABLE_PAGING_MODE_MASK		0x00000E00
  13.157 +#define IOMMU_DEV_TABLE_PAGING_MODE_SHIFT		9
  13.158 +#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK		0xFFFFF000
  13.159 +#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT	12
  13.160 +
  13.161 +/* DeviceTable Entry[63:32] */
  13.162 +#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK	0x000FFFFF
  13.163 +#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT	0
  13.164 +#define IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK		0x20000000
  13.165 +#define IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT	29
  13.166 +#define IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK	0x40000000
  13.167 +#define IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT	30
  13.168 +
  13.169 +/* DeviceTable Entry[95:64] */
  13.170 +#define IOMMU_DEV_TABLE_DOMAIN_ID_MASK	0x0000FFFF
  13.171 +#define IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT	0
  13.172 +
  13.173 +/* DeviceTable Entry[127:96] */
  13.174 +#define IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK		0x00000001
  13.175 +#define IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT		0
  13.176 +#define IOMMU_DEV_TABLE_SUPRESS_LOGGED_PAGES_MASK	0x00000002
  13.177 +#define IOMMU_DEV_TABLE_SUPRESS_LOGGED_PAGES_SHIFT	1
  13.178 +#define IOMMU_DEV_TABLE_SUPRESS_ALL_PAGES_MASK		0x00000004
  13.179 +#define IOMMU_DEV_TABLE_SUPRESS_ALL_PAGES_SHIFT		2
  13.180 +#define IOMMU_DEV_TABLE_IO_CONTROL_MASK			0x00000018
  13.181 +#define IOMMU_DEV_TABLE_IO_CONTROL_SHIFT		3
  13.182 +#define IOMMU_DEV_TABLE_IOTLB_CACHE_HINT_MASK		0x00000020
  13.183 +#define IOMMU_DEV_TABLE_IOTLB_CACHE_HINT_SHIFT		5
  13.184 +#define IOMMU_DEV_TABLE_SNOOP_DISABLE_MASK		0x00000040
  13.185 +#define IOMMU_DEV_TABLE_SNOOP_DISABLE_SHIFT		6
  13.186 +#define IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK		0x00000080
  13.187 +#define IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT		7
  13.188 +#define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK		0x00000300
  13.189 +#define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT	8
  13.190 +
  13.191 +/* DeviceTable Entry[159:128] */
  13.192 +#define IOMMU_DEV_TABLE_INT_VALID_MASK			0x00000001
  13.193 +#define IOMMU_DEV_TABLE_INT_VALID_SHIFT			0
  13.194 +#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK		0x0000001E
  13.195 +#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT		1
  13.196 +#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK		0xFFFFFFC0
  13.197 +#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT		6
  13.198 +
  13.199 +/* DeviceTable Entry[191:160] */
  13.200 +#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK		0x000FFFFF
  13.201 +#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT	0
  13.202 +#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK		0x01000000
  13.203 +#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT		24
  13.204 +#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK		0x02000000
  13.205 +#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT		25
  13.206 +#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK		0x04000000
  13.207 +#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT		26
  13.208 +#define IOMMU_DEV_TABLE_INT_CONTROL_MASK		0x30000000
  13.209 +#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT		28
  13.210 +#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK		0x40000000
  13.211 +#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT		30
  13.212 +#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK		0x80000000
  13.213 +#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT		31
  13.214 +
  13.215 +/* Command Buffer */
  13.216 +#define IOMMU_CMD_BUFFER_BASE_LOW_OFFSET	0x08
  13.217 +#define IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET	0x0C
  13.218 +#define IOMMU_CMD_BUFFER_HEAD_OFFSET		0x2000
  13.219 +#define IOMMU_CMD_BUFFER_TAIL_OFFSET		0x2008
  13.220 +#define IOMMU_CMD_BUFFER_BASE_LOW_MASK		0xFFFFF000
  13.221 +#define IOMMU_CMD_BUFFER_BASE_LOW_SHIFT		12
  13.222 +#define IOMMU_CMD_BUFFER_BASE_HIGH_MASK		0x000FFFFF
  13.223 +#define IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT	0
  13.224 +#define IOMMU_CMD_BUFFER_LENGTH_MASK		0x0F000000
  13.225 +#define IOMMU_CMD_BUFFER_LENGTH_SHIFT		24
  13.226 +#define IOMMU_CMD_BUFFER_HEAD_MASK		0x0007FFF0
  13.227 +#define IOMMU_CMD_BUFFER_HEAD_SHIFT		4
  13.228 +#define IOMMU_CMD_BUFFER_TAIL_MASK		0x0007FFF0
  13.229 +#define IOMMU_CMD_BUFFER_TAIL_SHIFT		4
  13.230 +
  13.231 +#define IOMMU_CMD_BUFFER_ENTRY_SIZE			16
  13.232 +#define IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE	8
  13.233 +#define IOMMU_CMD_BUFFER_U32_PER_ENTRY 	(IOMMU_CMD_BUFFER_ENTRY_SIZE / 4)
  13.234 +
  13.235 +#define IOMMU_CMD_OPCODE_MASK			0xF0000000
  13.236 +#define IOMMU_CMD_OPCODE_SHIFT			28
  13.237 +#define IOMMU_CMD_COMPLETION_WAIT		0x1
  13.238 +#define IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY	0x2
  13.239 +#define IOMMU_CMD_INVALIDATE_IOMMU_PAGES	0x3
  13.240 +#define IOMMU_CMD_INVALIDATE_IOTLB_PAGES	0x4
  13.241 +#define IOMMU_CMD_INVALIDATE_INT_TABLE		0x5
  13.242 +
  13.243 +/* COMPLETION_WAIT command */
  13.244 +#define IOMMU_COMP_WAIT_DATA_BUFFER_SIZE	8
  13.245 +#define IOMMU_COMP_WAIT_DATA_BUFFER_ALIGNMENT	8
  13.246 +#define IOMMU_COMP_WAIT_S_FLAG_MASK		0x00000001
  13.247 +#define IOMMU_COMP_WAIT_S_FLAG_SHIFT		0
  13.248 +#define IOMMU_COMP_WAIT_I_FLAG_MASK		0x00000002
  13.249 +#define IOMMU_COMP_WAIT_I_FLAG_SHIFT		1
  13.250 +#define IOMMU_COMP_WAIT_F_FLAG_MASK		0x00000004
  13.251 +#define IOMMU_COMP_WAIT_F_FLAG_SHIFT		2
  13.252 +#define IOMMU_COMP_WAIT_ADDR_LOW_MASK		0xFFFFFFF8
  13.253 +#define IOMMU_COMP_WAIT_ADDR_LOW_SHIFT		3
  13.254 +#define IOMMU_COMP_WAIT_ADDR_HIGH_MASK		0x000FFFFF
  13.255 +#define IOMMU_COMP_WAIT_ADDR_HIGH_SHIFT		0
  13.256 +
  13.257 +/* INVALIDATE_IOMMU_PAGES command */
  13.258 +#define IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK	0x0000FFFF
  13.259 +#define IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT	0
  13.260 +#define IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK	0x00000001
  13.261 +#define IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT	0
  13.262 +#define IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK	0x00000002
  13.263 +#define IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT	1
  13.264 +#define IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK	0xFFFFF000
  13.265 +#define IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT	12
  13.266 +#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK	0xFFFFFFFF
  13.267 +#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT	0
  13.268 +
  13.269 +/* Event Log */
  13.270 +#define IOMMU_EVENT_LOG_BASE_LOW_OFFSET		0x10
  13.271 +#define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET	0x14
  13.272 +#define IOMMU_EVENT_LOG_HEAD_OFFSET		0x2010
  13.273 +#define IOMMU_EVENT_LOG_TAIL_OFFSET		0x2018
  13.274 +#define IOMMU_EVENT_LOG_BASE_LOW_MASK		0xFFFFF000
  13.275 +#define IOMMU_EVENT_LOG_BASE_LOW_SHIFT		12
  13.276 +#define IOMMU_EVENT_LOG_BASE_HIGH_MASK		0x000FFFFF
  13.277 +#define IOMMU_EVENT_LOG_BASE_HIGH_SHIFT		0
  13.278 +#define IOMMU_EVENT_LOG_LENGTH_MASK		0x0F000000
  13.279 +#define IOMMU_EVENT_LOG_LENGTH_SHIFT		24
  13.280 +#define IOMMU_EVENT_LOG_HEAD_MASK		0x0007FFF0
  13.281 +#define IOMMU_EVENT_LOG_HEAD_SHIFT		4
  13.282 +#define IOMMU_EVENT_LOG_TAIL_MASK		0x0007FFF0
  13.283 +#define IOMMU_EVENT_LOG_TAIL_SHIFT		4
  13.284 +
  13.285 +#define IOMMU_EVENT_LOG_ENTRY_SIZE 			16
  13.286 +#define IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE	8
  13.287 +#define IOMMU_EVENT_LOG_U32_PER_ENTRY	(IOMMU_EVENT_LOG_ENTRY_SIZE / 4)
  13.288 +
  13.289 +#define IOMMU_EVENT_CODE_MASK			0xF0000000
  13.290 +#define IOMMU_EVENT_CODE_SHIFT			28
  13.291 +#define IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY	0x1
  13.292 +#define IOMMU_EVENT_IO_PAGE_FALT		0x2
  13.293 +#define IOMMU_EVENT_DEV_TABLE_HW_ERROR		0x3
  13.294 +#define IOMMU_EVENT_PAGE_TABLE_HW_ERROR		0x4
  13.295 +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR	0x5
  13.296 +#define IOMMU_EVENT_COMMAND_HW_ERROR		0x6
  13.297 +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT		0x7
  13.298 +#define IOMMU_EVENT_INVALID_DEV_REQUEST		0x8
  13.299 +
  13.300 +/* Control Register */
  13.301 +#define IOMMU_CONTROL_MMIO_OFFSET			0x18
  13.302 +#define IOMMU_CONTROL_TRANSLATION_ENABLE_MASK		0x00000001
  13.303 +#define IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT		0
  13.304 +#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK	0x00000002
  13.305 +#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT	1
  13.306 +#define IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK		0x00000004
  13.307 +#define IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT		2
  13.308 +#define IOMMU_CONTROL_EVENT_LOG_INT_MASK		0x00000008
  13.309 +#define IOMMU_CONTROL_EVENT_LOG_INT_SHIFT		3
  13.310 +#define IOMMU_CONTROL_COMP_WAIT_INT_MASK		0x00000010
  13.311 +#define IOMMU_CONTROL_COMP_WAIT_INT_SHIFT		4
  13.312 +#define IOMMU_CONTROL_TRANSLATION_CHECK_DISABLE_MASK	0x00000020
  13.313 +#define IOMMU_CONTROL_TRANSLATION_CHECK_DISABLE_SHIFT	5
  13.314 +#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_MASK		0x000000C0
  13.315 +#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_SHIFT	6
  13.316 +#define IOMMU_CONTROL_PASS_POSTED_WRITE_MASK		0x00000100
  13.317 +#define IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT		8
  13.318 +#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK	0x00000200
  13.319 +#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT	9
  13.320 +#define IOMMU_CONTROL_COHERENT_MASK			0x00000400
  13.321 +#define IOMMU_CONTROL_COHERENT_SHIFT			10
  13.322 +#define IOMMU_CONTROL_ISOCHRONOUS_MASK			0x00000800
  13.323 +#define IOMMU_CONTROL_ISOCHRONOUS_SHIFT			11
  13.324 +#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK	0x00001000
  13.325 +#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT	12
  13.326 +#define IOMMU_CONTROL_RESTART_MASK			0x80000000
  13.327 +#define IOMMU_CONTROL_RESTART_SHIFT			31
  13.328 +
  13.329 +/* Exclusion Register */
  13.330 +#define IOMMU_EXCLUSION_BASE_LOW_OFFSET		0x20
  13.331 +#define IOMMU_EXCLUSION_BASE_HIGH_OFFSET	0x24
  13.332 +#define IOMMU_EXCLUSION_LIMIT_LOW_OFFSET	0x28
  13.333 +#define IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET	0x2C
  13.334 +#define IOMMU_EXCLUSION_BASE_LOW_MASK		0xFFFFF000
  13.335 +#define IOMMU_EXCLUSION_BASE_LOW_SHIFT		12
  13.336 +#define IOMMU_EXCLUSION_BASE_HIGH_MASK		0xFFFFFFFF
  13.337 +#define IOMMU_EXCLUSION_BASE_HIGH_SHIFT		0
  13.338 +#define IOMMU_EXCLUSION_RANGE_ENABLE_MASK	0x00000001
  13.339 +#define IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT	0
  13.340 +#define IOMMU_EXCLUSION_ALLOW_ALL_MASK		0x00000002
  13.341 +#define IOMMU_EXCLUSION_ALLOW_ALL_SHIFT		1
  13.342 +#define IOMMU_EXCLUSION_LIMIT_LOW_MASK		0xFFFFF000
  13.343 +#define IOMMU_EXCLUSION_LIMIT_LOW_SHIFT		12
  13.344 +#define IOMMU_EXCLUSION_LIMIT_HIGH_MASK		0xFFFFFFFF
  13.345 +#define IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT	0
  13.346 +
  13.347 +/* Status Register*/
  13.348 +#define IOMMU_STATUS_MMIO_OFFSET		0x2020
  13.349 +#define IOMMU_STATUS_EVENT_OVERFLOW_MASK	0x00000001
  13.350 +#define IOMMU_STATUS_EVENT_OVERFLOW_SHIFT	0
  13.351 +#define IOMMU_STATUS_EVENT_LOG_INT_MASK		0x00000002
  13.352 +#define IOMMU_STATUS_EVENT_LOG_INT_SHIFT	1
  13.353 +#define IOMMU_STATUS_COMP_WAIT_INT_MASK		0x00000004
  13.354 +#define IOMMU_STATUS_COMP_WAIT_INT_SHIFT	2
  13.355 +#define IOMMU_STATUS_EVENT_LOG_RUN_MASK		0x00000008
  13.356 +#define IOMMU_STATUS_EVENT_LOG_RUN_SHIFT	3
  13.357 +#define IOMMU_STATUS_CMD_BUFFER_RUN_MASK	0x00000010
  13.358 +#define IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT	4
  13.359 +
  13.360 +/* I/O Page Table */
  13.361 +#define IOMMU_PAGE_TABLE_ENTRY_SIZE	8
  13.362 +#define IOMMU_PAGE_TABLE_U32_PER_ENTRY	(IOMMU_PAGE_TABLE_ENTRY_SIZE / 4)
  13.363 +#define IOMMU_PAGE_TABLE_ALIGNMENT	4096
  13.364 +
  13.365 +#define IOMMU_PTE_PRESENT_MASK			0x00000001
  13.366 +#define IOMMU_PTE_PRESENT_SHIFT			0
  13.367 +#define IOMMU_PTE_NEXT_LEVEL_MASK		0x00000E00
  13.368 +#define IOMMU_PTE_NEXT_LEVEL_SHIFT		9
  13.369 +#define IOMMU_PTE_ADDR_LOW_MASK			0xFFFFF000
  13.370 +#define IOMMU_PTE_ADDR_LOW_SHIFT		12
  13.371 +#define IOMMU_PTE_ADDR_HIGH_MASK		0x000FFFFF
  13.372 +#define IOMMU_PTE_ADDR_HIGH_SHIFT		0
  13.373 +#define IOMMU_PTE_U_MASK			0x08000000
  13.374 +#define IOMMU_PTE_U_SHIFT			7
  13.375 +#define IOMMU_PTE_FC_MASK			0x10000000
  13.376 +#define IOMMU_PTE_FC_SHIFT			28
  13.377 +#define IOMMU_PTE_IO_READ_PERMISSION_MASK	0x20000000
  13.378 +#define IOMMU_PTE_IO_READ_PERMISSION_SHIFT	29
  13.379 +#define IOMMU_PTE_IO_WRITE_PERMISSION_MASK	0x40000000
  13.380 +#define IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT	30
  13.381 +
  13.382 +/* I/O Page Directory */
  13.383 +#define IOMMU_PAGE_DIRECTORY_ENTRY_SIZE		8
  13.384 +#define IOMMU_PAGE_DIRECTORY_ALIGNMENT		4096
  13.385 +#define IOMMU_PDE_PRESENT_MASK			0x00000001
  13.386 +#define IOMMU_PDE_PRESENT_SHIFT			0
  13.387 +#define IOMMU_PDE_NEXT_LEVEL_MASK		0x00000E00
  13.388 +#define IOMMU_PDE_NEXT_LEVEL_SHIFT		9
  13.389 +#define IOMMU_PDE_ADDR_LOW_MASK			0xFFFFF000
  13.390 +#define IOMMU_PDE_ADDR_LOW_SHIFT		12
  13.391 +#define IOMMU_PDE_ADDR_HIGH_MASK		0x000FFFFF
  13.392 +#define IOMMU_PDE_ADDR_HIGH_SHIFT		0
  13.393 +#define IOMMU_PDE_IO_READ_PERMISSION_MASK	0x20000000
  13.394 +#define IOMMU_PDE_IO_READ_PERMISSION_SHIFT	29
  13.395 +#define IOMMU_PDE_IO_WRITE_PERMISSION_MASK	0x40000000
  13.396 +#define IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT	30
  13.397 +
  13.398 +/* Paging modes */
  13.399 +#define IOMMU_PAGING_MODE_DISABLED	0x0
  13.400 +#define IOMMU_PAGING_MODE_LEVEL_0	0x0
  13.401 +#define IOMMU_PAGING_MODE_LEVEL_1	0x1
  13.402 +#define IOMMU_PAGING_MODE_LEVEL_2	0x2
  13.403 +#define IOMMU_PAGING_MODE_LEVEL_3	0x3
  13.404 +#define IOMMU_PAGING_MODE_LEVEL_4	0x4
  13.405 +#define IOMMU_PAGING_MODE_LEVEL_5	0x5
  13.406 +#define IOMMU_PAGING_MODE_LEVEL_6	0x6
  13.407 +#define IOMMU_PAGING_MODE_LEVEL_7	0x7
  13.408 +
  13.409 +/* Flags */
  13.410 +#define IOMMU_CONTROL_DISABLED	0
  13.411 +#define IOMMU_CONTROL_ENABLED	1
  13.412 +
  13.413 +#define MMIO_PAGES_PER_IOMMU        (IOMMU_MMIO_REGION_LENGTH / PAGE_SIZE_4K)
  13.414 +#define IOMMU_PAGES                 (MMIO_PAGES_PER_IOMMU * MAX_AMD_IOMMUS)
  13.415 +#define DEFAULT_DOMAIN_ADDRESS_WIDTH    48
  13.416 +#define MAX_AMD_IOMMUS                  32
  13.417 +#define IOMMU_PAGE_TABLE_LEVEL_3        3
  13.418 +#define IOMMU_PAGE_TABLE_LEVEL_4        4
  13.419 +#define IOMMU_IO_WRITE_ENABLED          1
  13.420 +#define IOMMU_IO_READ_ENABLED           1
  13.421 +
  13.422 +#endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Sep 21 17:15:47 2007 +0100
    14.3 @@ -0,0 +1,88 @@
    14.4 +/*
    14.5 + * Copyright (C) 2007 Advanced Micro Devices, Inc.
    14.6 + * Author: Leo Duran <leo.duran@amd.com>
    14.7 + * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
    14.8 + *
    14.9 + * This program is free software; you can redistribute it and/or modify
   14.10 + * it under the terms of the GNU General Public License as published by
   14.11 + * the Free Software Foundation; either version 2 of the License, or
   14.12 + * (at your option) any later version.
   14.13 + *
   14.14 + * This program is distributed in the hope that it will be useful,
   14.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   14.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14.17 + * GNU General Public License for more details.
   14.18 + *
   14.19 + * You should have received a copy of the GNU General Public License
   14.20 + * along with this program; if not, write to the Free Software
   14.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   14.22 + */
   14.23 +
   14.24 +#ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
   14.25 +#define _ASM_X86_64_AMD_IOMMU_PROTO_H
   14.26 +
   14.27 +#include <asm/amd-iommu.h>
   14.28 +
   14.29 +#define for_each_amd_iommu(amd_iommu) \
   14.30 +    list_for_each_entry(amd_iommu, \
   14.31 +        &amd_iommu_head, list)
   14.32 +
   14.33 +#define DMA_32BIT_MASK  0x00000000ffffffffULL
   14.34 +#define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
   14.35 +#define PAGE_SHIFT_4K                   (12)
   14.36 +#define PAGE_SIZE_4K                    (1UL << PAGE_SHIFT_4K)
   14.37 +#define PAGE_MASK_4K                    (((u64)-1) << PAGE_SHIFT_4K)
   14.38 +
   14.39 +typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 cap_ptr);
   14.40 +
   14.41 +/* amd-iommu-detect functions */
   14.42 +int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback);
   14.43 +int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
   14.44 +           struct amd_iommu *iommu);
   14.45 +int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu);
   14.46 +
   14.47 +/* amd-iommu-init functions */
   14.48 +int __init map_iommu_mmio_region(struct amd_iommu *iommu);
   14.49 +void __init unmap_iommu_mmio_region(struct amd_iommu *iommu);
   14.50 +void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu);
   14.51 +void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu);
   14.52 +void __init enable_iommu(struct amd_iommu *iommu);
   14.53 +
   14.54 +/* mapping functions */
   14.55 +int amd_iommu_map_page(struct domain *d, unsigned long gfn,
   14.56 +        unsigned long mfn);
   14.57 +int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
   14.58 +
   14.59 +/* device table functions */
   14.60 +void amd_iommu_set_dev_table_entry(u32 *dte,
   14.61 +        u64 root_ptr, u16 domain_id, u8 paging_mode);
   14.62 +
   14.63 +/* send cmd to iommu */
   14.64 +int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
   14.65 +
   14.66 +/* iommu domain funtions */
   14.67 +int amd_iommu_domain_init(struct domain *domain);
   14.68 +void amd_iommu_setup_domain_device(struct domain *domain,
   14.69 +    struct amd_iommu *iommu, int requestor_id);
   14.70 +
   14.71 +/* find iommu for bdf */
   14.72 +struct amd_iommu *find_iommu_for_device(int bus, int devfn);
   14.73 +
   14.74 +static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
   14.75 +{
   14.76 +    u32 field;
   14.77 +    field = (reg_value & mask) >> shift;
   14.78 +    return field;
   14.79 +}
   14.80 +
   14.81 +static inline u32 set_field_in_reg_u32(u32 field, u32 reg_value,
   14.82 +        u32 mask, u32 shift, u32 *reg)
   14.83 +{
   14.84 +    reg_value &= ~mask;
   14.85 +    reg_value |= (field << shift) & mask;
   14.86 +    if (reg)
   14.87 +        *reg = reg_value;
   14.88 +    return reg_value;
   14.89 +}
   14.90 +
   14.91 +#endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */