return -ENOMEM;
}
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
- tasklet_init(&hvm_irq_dpci->dirq_tasklet,
+ tasklet_init(&hvm_irq_dpci->dirq_tasklet,
hvm_dirq_assist, (unsigned long)d);
hvm_irq_dpci->mirq = xmalloc_array(struct hvm_mirq_dpci_mapping,
d->nr_pirqs);
d->nr_pirqs * sizeof(*hvm_irq_dpci->mirq));
bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);
- memset(hvm_irq_dpci->hvm_timer, 0,
+ memset(hvm_irq_dpci->hvm_timer, 0,
nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer));
for ( int i = 0; i < d->nr_pirqs; i++ ) {
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
spin_unlock(&d->event_lock);
return -EBUSY;
}
-
+
/* if pirq is already mapped as vmsi, update the guest data/addr */
if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec ||
hvm_irq_dpci->mirq[pirq].gmsi.gflags != pt_irq_bind->u.msi.gflags) {
int enable_ats_device(int seg, int bus, int devfn)
{
return 0;
-}
+}
int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
/* Follow MSI setting */
if (x2apic_enabled)
msg.address_hi = dest & 0xFFFFFF00;
- msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
+ msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
msg.address_lo |= INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC:
MSI_ADDR_DESTMODE_PHYS;
msg.address_lo |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
irq_desc[irq].handler = &dma_msi_type;
irq_to_iommu[irq] = iommu;
-#ifdef CONFIG_X86
+#ifdef CONFIG_X86
ret = request_irq(irq, iommu_page_fault, 0, "dmar", iommu);
#else
ret = request_irq_vector(irq, iommu_page_fault, 0, "dmar", iommu);
}
root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr);
-
+
printk(" root_entry = %p\n", root_entry);
printk(" root_entry[%x] = %"PRIx64"\n", bus, root_entry[bus].val);
if ( !root_present(root_entry[bus]) )
trigger : 1, /* 0: edge, 1: level */
mask : 1, /* 0: enabled, 1: disabled */
__reserved_2 : 15;
-
+
union {
struct { __u32
__reserved_1 : 24,
list_add(&(pdev->list), &ats_devices);
return pos;
-}
+}
static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
{
addr &= ~0 << (PAGE_SHIFT + size_order);
/* if size <= 4K, set sbit = 0, else set sbit = 1 */
- sbit = size_order ? 1 : 0;
-
+ sbit = size_order ? 1 : 0;
+
/* clear lower bits */
addr &= (~0 << (PAGE_SHIFT + size_order));
break;
default:
dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
- break;
+ break;
}
}
return ret;
for ( i = 0; i < max_pfn; i++ )
{
/*
- * Set up 1:1 mapping for dom0. Default to use only conventional RAM
- * areas and let RMRRs include needed reserved regions. When set, the
+ * Set up 1:1 mapping for dom0. Default to use only conventional RAM
+ * areas and let RMRRs include needed reserved regions. When set, the
* inclusive mapping maps in everything below 4GB except unusable
- * ranges.
+ * ranges.
*/
if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
(!iommu_inclusive_mapping ||
extern int amd_iov_detect(void);
static inline const struct iommu_ops *iommu_get_ops(void)
-{
+{
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
int device_assigned(u8 bus, u8 devfn);
int assign_device(struct domain *d, u8 bus, u8 devfn);
int deassign_device(struct domain *d, u8 bus, u8 devfn);
-int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
+int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs);
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int iommu_unmap_page(struct domain *d, unsigned long gfn);