{
u8 bus, devfn;
struct acpi_drhd_unit *drhd;
- struct acpi_drhd_unit *found = NULL, *include_all = NULL;
+ struct acpi_drhd_unit *include_all = NULL;
int i;
if (pdev->info.is_extfn) {
return drhd;
if ( test_bit(bus, drhd->scope.buses) )
- found = drhd;
+ return drhd;
if ( drhd->include_all )
include_all = drhd;
}
-
- return found ? found : include_all;
+ return include_all;
}
struct acpi_atsr_unit * acpi_find_matched_atsr_unit(u8 bus, u8 devfn)
{
struct acpi_atsr_unit *atsr;
- struct acpi_atsr_unit *found = NULL, *include_all = NULL;
- int i;
+ struct acpi_atsr_unit *all_ports = NULL;
list_for_each_entry ( atsr, &acpi_atsr_units, list )
{
- for (i = 0; i < atsr->scope.devices_cnt; i++)
- if ( atsr->scope.devices[i] == PCI_BDF2(bus, devfn) )
- return atsr;
-
if ( test_bit(bus, atsr->scope.buses) )
- found = atsr;
+ return atsr;
if ( atsr->all_ports )
- include_all = atsr;
+ all_ports = atsr;
}
-
- return found ? found : include_all;
+ return all_ports;
}
/*
return -EINVAL;
}
- if ( scope->dev_type == ACPI_DEV_ENDPOINT ||
+ if ( scope->dev_type == ACPI_DEV_P2PBRIDGE ||
+ scope->dev_type == ACPI_DEV_ENDPOINT ||
scope->dev_type == ACPI_DEV_IOAPIC ||
scope->dev_type == ACPI_DEV_MSI_HPET )
count++;
"found bridge: bdf = %x:%x.%x sec = %x sub = %x\n",
bus, path->dev, path->fn, sec_bus, sub_bus);
+ dmar_scope_add_buses(scope, acpi_scope->start_bus, acpi_scope->start_bus);
dmar_scope_add_buses(scope, sec_bus, sub_bus);
break;
case ACPI_DEV_MSI_HPET:
dprintk(XENLOG_INFO VTDPREFIX, "found MSI HPET: bdf = %x:%x.%x\n",
bus, path->dev, path->fn);
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
break;
case ACPI_DEV_ENDPOINT:
dprintk(XENLOG_INFO VTDPREFIX, "found endpoint: bdf = %x:%x.%x\n",
bus, path->dev, path->fn);
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
break;
case ACPI_DEV_IOAPIC:
list_add(&acpi_ioapic_unit->list, &drhd->ioapic_list);
}
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
break;
}
-
+ scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
start += acpi_scope->length;
}
#include "dmar.h"
+extern int qinval_enabled;
+extern int ats_enabled;
extern struct qi_ctrl *qi_ctrl;
extern struct ir_ctrl *ir_ctrl;
struct iommu * ioapic_to_iommu(unsigned int apic_id);
struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
void clear_fault_bits(struct iommu *iommu);
+int qinval_device_iotlb(struct iommu *iommu,
+ u32 max_invs_pend, u16 sid, u16 size, u64 addr);
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu);
#endif // _VTD_EXTERN_H_
obj-y += vtd.o
+obj-y += ats.o
--- /dev/null
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Allen Kay <allen.m.kay@intel.com>
+ */
+
+#include <xen/sched.h>
+#include <xen/iommu.h>
+#include <xen/time.h>
+#include <xen/pci.h>
+#include <xen/pci_regs.h>
+#include <asm/msi.h>
+#include "../iommu.h"
+#include "../dmar.h"
+#include "../vtd.h"
+#include "../extern.h"
+
+int ats_enabled = 0;
+
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
+{
+ return NULL;
+}
+
+/*
+ * BUGBUG: return 0 until pcimmcfg is checked in.
+ */
+int pci_find_ext_capability(int seg, int bus, int devfn, int cap)
+{
+ return 0;
+}
+
+int ats_device(int seg, int bus, int devfn)
+{
+ return 0;
+}
+
+int enable_ats_device(int seg, int bus, int devfn)
+{
+ return 0;
+}
+
+static int device_in_domain(struct iommu *iommu,
+ struct pci_ats_dev *pdev, u16 did)
+{
+ return 0;
+}
+
+int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
+{
+ return 0;
+}
static int flush_context_reg(
void *_iommu,
u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry)
{
struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
}
static int inline iommu_flush_context_global(
- struct iommu *iommu, int non_present_entry_flush)
+ struct iommu *iommu, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
static int inline iommu_flush_context_domain(
- struct iommu *iommu, u16 did, int non_present_entry_flush)
+ struct iommu *iommu, u16 did, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
static int inline iommu_flush_context_device(
struct iommu *iommu, u16 did, u16 source_id,
- u8 function_mask, int non_present_entry_flush)
+ u8 function_mask, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, did, source_id, function_mask,
DMA_CCMD_DEVICE_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
/* return value determine if we need a write buffer flush */
static int flush_iotlb_reg(void *_iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+ u64 addr, unsigned int size_order, u64 type,
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
}
static int inline iommu_flush_iotlb_global(struct iommu *iommu,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- non_present_entry_flush);
+ flush_non_present_entry, flush_dev_iotlb);
}
static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
+ flush_non_present_entry, flush_dev_iotlb);
}
static int inline get_alignment(u64 base, unsigned int size)
}
static int inline iommu_flush_iotlb_psi(
- struct iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+ struct iommu *iommu, u16 did, u64 addr, unsigned int pages,
+ int flush_non_present_entry, int flush_dev_iotlb)
{
unsigned int align;
struct iommu_flush *flush = iommu_get_flush(iommu);
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
/*
* PSI requires page size is 2 ^ x, and the base address is naturally
align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
/* Fallback to domain selective flush if size is too big */
if ( align > cap_max_amask_val(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
addr >>= PAGE_SHIFT_4K + align;
addr <<= PAGE_SHIFT_4K + align;
- return flush->iotlb(iommu, did, addr, align,
- DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+ return flush->iotlb(iommu, did, addr, align, DMA_TLB_PSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
}
void iommu_flush_all(void)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
+ int flush_dev_iotlb;
flush_all_cache();
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
iommu_flush_context_global(iommu, 0);
- iommu_flush_iotlb_global(iommu, 0);
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
}
}
struct iommu *iommu;
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
+ int flush_dev_iotlb;
spin_lock(&hd->mapping_lock);
/* get last level pte */
{
iommu = drhd->iommu;
if ( test_bit(iommu->index, &hd->iommu_bitmap) )
+ {
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
- addr, 1, 0))
+ addr, 1, 0, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
+ }
}
unmap_vtd_domain_page(page);
iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "drhd->address = %"PRIx64"\n", drhd->address);
+ gdprintk(XENLOG_INFO VTDPREFIX, "iommu->reg = %p\n", iommu->reg);
+
/* Calculate number of pagetable levels: between 2 and 4. */
sagaw = cap_sagaw(iommu->cap);
for ( agaw = level_to_agaw(4); agaw >= 0; agaw-- )
}
context_set_address_root(*context, pgd_maddr);
- context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+ if ( ats_enabled && ecap_dev_iotlb(iommu->ecap) )
+ context_set_translation_type(*context, CONTEXT_TT_DEV_IOTLB);
+ else
+ context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+
spin_unlock(&hd->mapping_lock);
}
DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, 0, 1);
+ {
+ int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
+ }
set_bit(iommu->index, &hd->iommu_bitmap);
DMA_CCMD_MASK_NOBIT, 0) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+ {
+ int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0, flush_dev_iotlb);
+ }
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
int pte_present;
+ int flush_dev_iotlb;
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
continue;
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
(paddr_t)gfn << PAGE_SHIFT_4K, 1,
- !pte_present) )
+ !pte_present, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}
pdev->domain = d;
list_add(&pdev->domain_list, &d->arch.pdev_list);
domain_context_mapping(d, pdev->bus, pdev->devfn);
+#if defined(NOT_YET)
+ if ( ats_device(0, pdev->bus, pdev->devfn) )
+ enable_ats_device(0, pdev->bus, pdev->devfn);
+#endif
}
}
}
int (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
- u64 type, int non_present_entry_flush);
+ u64 type, int flush_non_present_entry, int flush_dev_iotlb);
};
struct intel_iommu {
#include "vtd.h"
#include "extern.h"
+int qinval_enabled;
+
static void print_qi_regs(struct iommu *iommu)
{
u64 val;
static int flush_context_qi(
void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry)
{
int ret = 0;
struct iommu *iommu = (struct iommu *)_iommu;
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
static int flush_iotlb_qi(
void *_iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
u8 dr = 0, dw = 0;
int ret = 0;
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
ret = queue_invalidate_iotlb(iommu,
(type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
dw, did, (u8)size_order, 0, addr);
+#if defined(NOT_YET)
+ if ( flush_dev_iotlb )
+ ret |= dev_invalidate_iotlb(iommu, did, addr, size_order, type);
+#endif
ret |= invalidate_sync(iommu);
}
return ret;
cpu_relax();
}
+ qinval_enabled = 1;
return 0;
}
obj-y += vtd.o
+obj-y += ats.o
--- /dev/null
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Allen Kay <allen.m.kay@intel.com>
+ */
+
+#include <xen/sched.h>
+#include <xen/iommu.h>
+#include <xen/time.h>
+#include <xen/pci.h>
+#include <xen/pci_regs.h>
+#include <asm/msi.h>
+#include "../iommu.h"
+#include "../dmar.h"
+#include "../vtd.h"
+#include "../extern.h"
+
+LIST_HEAD(ats_dev_drhd_units);
+
+#define ATS_REG_CAP 4
+#define ATS_REG_CTL 6
+#define ATS_QUEUE_DEPTH_MASK 0xF
+#define ATS_ENABLE (1<<15)
+
+struct pci_ats_dev {
+ struct list_head list;
+ u8 bus;
+ u8 devfn;
+ u16 ats_queue_depth; /* ATS device invalidation queue depth */
+ spinlock_t lock;
+};
+static LIST_HEAD(ats_devices);
+
+static void parse_ats_param(char *s);
+custom_param("ats", parse_ats_param);
+
+int ats_enabled = 1;
+
+static void parse_ats_param(char *s)
+{
+ char *ss;
+
+ do {
+ ss = strchr(s, ',');
+ if ( ss )
+ *ss = '\0';
+
+ if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
+ !strcmp(s, "0") || !strcmp(s, "disable") )
+ ats_enabled = 0;
+
+ if ( !strcmp(s, "on") || !strcmp(s, "yes") || !strcmp(s, "true") ||
+ !strcmp(s, "1") || !strcmp(s, "enable") )
+ ats_enabled = 1;
+
+ s = ss + 1;
+ } while ( ss );
+}
+
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
+{
+ struct acpi_drhd_unit *drhd;
+ list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
+ {
+ if ( drhd->iommu == iommu )
+ return drhd;
+ }
+ return NULL;
+}
+
+/*
+ * BUGBUG: return 0 until pcimmcfg is checked in.
+ */
+int pci_find_ext_capability(int seg, int bus, int devfn, int cap)
+{
+ return 0;
+}
+
+int ats_device(int seg, int bus, int devfn)
+{
+ struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
+ struct pci_dev *pdev;
+ int pos = 0;
+
+ if ( !ats_enabled )
+ return 0;
+
+ if ( !qinval_enabled )
+ return 0;
+
+ pdev = pci_get_pdev(bus, devfn);
+ drhd = acpi_find_matched_drhd_unit(pdev);
+ if ( !ecap_dev_iotlb(drhd->iommu->ecap) )
+ return 0;
+
+ if ( !acpi_find_matched_atsr_unit(bus, devfn) )
+ return 0;
+
+ ats_drhd = find_ats_dev_drhd(drhd->iommu);
+ pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
+
+ if ( pos && (ats_drhd == NULL) )
+ {
+ new_drhd = xmalloc(struct acpi_drhd_unit);
+ memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
+ list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
+ }
+ return pos;
+}
+
+int enable_ats_device(int seg, int bus, int devfn)
+{
+ struct pci_ats_dev *pdev;
+ u32 value;
+ u16 queue_depth;
+ int pos;
+
+ pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
+
+ if ( !pos )
+ {
+ dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return 0;
+ }
+ else
+ dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ value = pci_conf_read16(bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), pos + ATS_REG_CAP);
+ queue_depth = value & ATS_QUEUE_DEPTH_MASK;
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ value = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL);
+ value |= ATS_ENABLE;
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL, value);
+
+ if ( acpi_find_matched_atsr_unit(bus, devfn) )
+ {
+ pdev = xmalloc(struct pci_ats_dev);
+ pdev->bus = bus;
+ pdev->devfn = devfn;
+ pdev->ats_queue_depth = queue_depth;
+ list_add(&(pdev->list), &ats_devices);
+ }
+ return pos;
+}
+
+static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
+{
+ struct root_entry *root_entry = NULL;
+ struct context_entry *ctxt_entry = NULL;
+ int tt, found = 0;
+
+ root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
+ if ( !root_entry || !root_present(root_entry[pdev->bus]) )
+ goto out;
+
+ ctxt_entry = (struct context_entry *)
+ map_vtd_domain_page(root_entry[pdev->bus].val);
+
+ if ( ctxt_entry == NULL )
+ goto out;
+
+ if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
+ goto out;
+
+ tt = context_translation_type(ctxt_entry[pdev->devfn]);
+ if ( tt != CONTEXT_TT_DEV_IOTLB )
+ goto out;
+
+ found = 1;
+out:
+ if ( root_entry )
+ unmap_vtd_domain_page(root_entry);
+
+ if ( ctxt_entry )
+ unmap_vtd_domain_page(ctxt_entry);
+
+ if ( found )
+ return 1;
+
+ return 0;
+}
+
+int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
+{
+ struct pci_ats_dev *pdev;
+ int sbit, ret = 0;
+ u16 sid;
+
+ if ( !ecap_dev_iotlb(iommu->ecap) )
+ return ret;
+
+ list_for_each_entry( pdev, &ats_devices, list )
+ {
+ sid = (pdev->bus << 8) | pdev->devfn;
+
+ switch ( type ) {
+ case DMA_TLB_DSI_FLUSH:
+ if ( !device_in_domain(iommu, pdev, did) )
+ break;
+ /* fall through if DSI condition met */
+ case DMA_TLB_GLOBAL_FLUSH:
+ /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
+ sbit = 1;
+ addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
+ ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
+ break;
+ case DMA_TLB_PSI_FLUSH:
+ if ( !device_in_domain(iommu, pdev, did) )
+ break;
+
+ addr &= ~0 << (PAGE_SHIFT + size_order);
+
+ /* if size <= 4K, set sbit = 0, else set sbit = 1 */
+ sbit = size_order ? 1 : 0;
+
+ /* clear lower bits */
+ addr &= (~0 << (PAGE_SHIFT + size_order));
+
+ /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
+ if ( sbit )
+ addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
+
+ ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
+ break;
+ default:
+ dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
+ break;
+ }
+ }
+ return ret;
+}
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
+#define PCI_EXT_CAP_ID_ARI 0xE
+#define PCI_EXT_CAP_ID_ATS 0xF
+#define PCI_EXT_CAP_ID_IOV 0x10
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */