#include <public/hvm/hvm_info_table.h>
#include <public/hvm/hvm_vcpu.h>
+#include "../x86_64/mmconfig.h"
+
/*
* Have the TSS cover the ISA port range, which makes it
* - 104 bytes base structure
return 0;
}
+int __init pvh_setup_ecam(struct domain *d)
+{
+ unsigned int i;
+ int rc;
+
+ for ( i = 0; i < pci_mmcfg_config_num; i++ )
+ {
+ size_t size = (pci_mmcfg_config[i].end_bus_number + 1) << 20;
+
+ rc = register_vpci_ecam_handler(d, pci_mmcfg_config[i].address, size,
+ pci_mmcfg_config[i].pci_segment);
+ if ( rc )
+ return rc;
+ }
+
+ return 0;
+}
+
int __init dom0_construct_pvh(struct domain *d, const module_t *image,
unsigned long image_headroom,
module_t *initrd,
return rc;
}
+ rc = pvh_setup_ecam(d);
+ if ( rc )
+ {
+ printk("Failed to setup Dom0 PCI ECAM areas: %d\n", rc);
+ return rc;
+ }
+
panic("Building a PVHv2 Dom0 is not yet supported.");
return 0;
}
handler->ops = &vpci_portio_ops;
}
+/* Handlers to trap PCI ECAM config accesses. */
+static struct hvm_ecam *vpci_ecam_find(struct domain *d, unsigned long addr)
+{
+ struct hvm_ecam *ecam = NULL;
+
+ list_for_each_entry ( ecam, &d->arch.hvm_domain.ecam_regions, next )
+ if ( addr >= ecam->addr && addr < ecam->addr + ecam->size )
+ return ecam;
+
+ return NULL;
+}
+
+static void vpci_ecam_decode_addr(unsigned long addr, unsigned int *bus,
+ unsigned int *devfn, unsigned int *reg)
+{
+ *bus = (addr >> 20) & 0xff;
+ *devfn = (addr >> 12) & 0xff;
+ *reg = addr & 0xfff;
+}
+
+static int vpci_ecam_accept(struct vcpu *v, unsigned long addr)
+{
+
+ return !!vpci_ecam_find(v->domain, addr);
+}
+
+static int vpci_ecam_read(struct vcpu *v, unsigned long addr,
+ unsigned int len, unsigned long *data)
+{
+ struct domain *d = v->domain;
+ struct hvm_ecam *ecam = vpci_ecam_find(d, addr);
+ unsigned int bus, devfn, reg;
+ uint32_t data32;
+ int rc;
+
+ ASSERT(ecam);
+
+ vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, ®);
+
+ if ( vpci_access_check(reg, len) || reg >= 0xfff )
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = xen_vpci_read(ecam->segment, bus, devfn, reg, len, &data32);
+ if ( !rc )
+ *data = data32;
+
+ return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY;
+}
+
+static int vpci_ecam_write(struct vcpu *v, unsigned long addr,
+ unsigned int len, unsigned long data)
+{
+ struct domain *d = v->domain;
+ struct hvm_ecam *ecam = vpci_ecam_find(d, addr);
+ unsigned int bus, devfn, reg;
+ int rc;
+
+ ASSERT(ecam);
+
+ vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, ®);
+
+ if ( vpci_access_check(reg, len) || reg >= 0xfff )
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = xen_vpci_write(ecam->segment, bus, devfn, reg, len, data);
+
+ return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY;
+}
+
+static const struct hvm_mmio_ops vpci_ecam_ops = {
+ .check = vpci_ecam_accept,
+ .read = vpci_ecam_read,
+ .write = vpci_ecam_write,
+};
+
+int register_vpci_ecam_handler(struct domain *d, paddr_t addr, size_t size,
+ unsigned int seg)
+{
+ struct hvm_ecam *ecam;
+
+ ASSERT(is_hardware_domain(d));
+ ASSERT(atomic_read(&d->pause_count));
+
+ if ( vpci_ecam_find(d, addr) )
+ return -EEXIST;
+
+ ecam = xzalloc(struct hvm_ecam);
+ if ( !ecam )
+ return -ENOMEM;
+
+ if ( list_empty(&d->arch.hvm_domain.ecam_regions) )
+ register_mmio_handler(d, &vpci_ecam_ops);
+
+ ecam->addr = addr;
+ ecam->segment = seg;
+ ecam->size = size;
+ list_add(&ecam->next, &d->arch.hvm_domain.ecam_regions);
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C