return 0;
}
-static int hvm_pt_pci_read_config(struct hvm_pt_device *d, uint32_t addr,
- uint32_t *data, int len)
+int hvm_pt_pci_read_config(struct hvm_pt_device *d, uint32_t addr,
+ uint32_t *data, int len, bool pcie)
{
uint32_t val = 0;
struct hvm_pt_reg_group *reg_grp_entry = NULL;
unsigned int func = PCI_FUNC(d->pdev->devfn);
/* Sanity checks. */
- if ( hvm_pt_pci_config_access_check(d, addr, len) )
+ if ( !pcie && hvm_pt_pci_config_access_check(d, addr, len) )
return X86EMUL_UNHANDLEABLE;
/* Find register group entry. */
return X86EMUL_OKAY;
}
-static int hvm_pt_pci_write_config(struct hvm_pt_device *d, uint32_t addr,
- uint32_t val, int len)
+int hvm_pt_pci_write_config(struct hvm_pt_device *d, uint32_t addr,
+ uint32_t val, int len, bool pcie)
{
int index = 0;
struct hvm_pt_reg_group *reg_grp_entry = NULL;
unsigned int func = PCI_FUNC(d->pdev->devfn);
/* Sanity checks. */
- if ( hvm_pt_pci_config_access_check(d, addr, len) )
+ if ( !pcie && hvm_pt_pci_config_access_check(d, addr, len) )
return X86EMUL_UNHANDLEABLE;
/* Find register group entry. */
if ( dev != NULL )
{
reg = (currd->arch.pci_cf8 & 0xfc) | addr;
- rc = hvm_pt_pci_read_config(dev, reg, &data32, size);
+ rc = hvm_pt_pci_read_config(dev, reg, &data32, size, false);
if ( rc == X86EMUL_OKAY )
{
read_unlock(&currd->arch.hvm_domain.pt_lock);
if ( dev != NULL )
{
reg = (currd->arch.pci_cf8 & 0xfc) | addr;
- rc = hvm_pt_pci_write_config(dev, reg, data32, size);
+ rc = hvm_pt_pci_write_config(dev, reg, data32, size, false);
if ( rc == X86EMUL_OKAY )
{
read_unlock(&currd->arch.hvm_domain.pt_lock);
* Architecture-dependent PCI access functions.
*/
+#include <xen/acpi.h>
#include <xen/spinlock.h>
#include <xen/pci.h>
#include <asm/io.h>
#include <xsm/xsm.h>
+#include "x86_64/mmconfig.h"
+
static DEFINE_SPINLOCK(pci_config_lock);
uint32_t pci_conf_read(uint32_t cf8, uint8_t offset, uint8_t bytes)
return rc;
}
+
+/* Handlers to trap PCIe config accesses. */
+static struct acpi_mcfg_allocation *pcie_find_mmcfg(unsigned long addr)
+{
+ int i;
+
+ for ( i = 0; i < pci_mmcfg_config_num; i++ )
+ {
+ unsigned long start, end;
+
+ start = pci_mmcfg_config[i].address;
+ end = pci_mmcfg_config[i].address +
+ ((pci_mmcfg_config[i].end_bus_number + 1) << 20);
+ if ( addr >= start && addr < end )
+ return &pci_mmcfg_config[i];
+ }
+
+ return NULL;
+}
+
+static struct hvm_pt_device *hw_pcie_get_device(unsigned int seg,
+ unsigned int bus,
+ unsigned int slot,
+ unsigned int func)
+{
+ struct hvm_pt_device *dev;
+ struct domain *d = current->domain;
+
+ list_for_each_entry( dev, &d->arch.hvm_domain.pt_devices, entries )
+ {
+ if ( dev->pdev->seg != seg || dev->pdev->bus != bus ||
+ dev->pdev->devfn != PCI_DEVFN(slot, func) )
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void pcie_decode_addr(unsigned long addr, unsigned int *bus,
+ unsigned int *slot, unsigned int *func,
+ unsigned int *reg)
+{
+
+ *bus = (addr >> 20) & 0xff;
+ *slot = (addr >> 15) & 0x1f;
+ *func = (addr >> 12) & 0x7;
+ *reg = addr & 0xfff;
+}
+
+static int pcie_range(struct vcpu *v, unsigned long addr)
+{
+
+ return pcie_find_mmcfg(addr) != NULL ? 1 : 0;
+}
+
+static int pcie_read(struct vcpu *v, unsigned long addr,
+ unsigned int len, unsigned long *pval)
+{
+ struct acpi_mcfg_allocation *mmcfg = pcie_find_mmcfg(addr);
+ struct domain *d = v->domain;
+ unsigned int seg, bus, slot, func, reg;
+ struct hvm_pt_device *dev;
+ uint32_t val;
+ int rc;
+
+ ASSERT(mmcfg != NULL);
+
+ if ( len > 4 || len == 3 )
+ return X86EMUL_UNHANDLEABLE;
+
+ addr -= mmcfg->address;
+ seg = mmcfg->pci_segment;
+ pcie_decode_addr(addr, &bus, &slot, &func, ®);
+
+ read_lock(&d->arch.hvm_domain.pt_lock);
+ dev = hw_pcie_get_device(seg, bus, slot, func);
+ if ( dev != NULL )
+ {
+ rc = hvm_pt_pci_read_config(dev, reg, &val, len, true);
+ if ( rc == X86EMUL_OKAY )
+ {
+ read_unlock(&d->arch.hvm_domain.pt_lock);
+ goto out;
+ }
+ }
+ read_unlock(&d->arch.hvm_domain.pt_lock);
+
+ /* Pass-through */
+ switch ( len )
+ {
+ case 1:
+ val = pci_conf_read8(seg, bus, slot, func, reg);
+ break;
+ case 2:
+ val = pci_conf_read16(seg, bus, slot, func, reg);
+ break;
+ case 4:
+ val = pci_conf_read32(seg, bus, slot, func, reg);
+ break;
+ }
+
+ out:
+ *pval = val;
+ return X86EMUL_OKAY;
+}
+
+static int pcie_write(struct vcpu *v, unsigned long addr,
+ unsigned int len, unsigned long val)
+{
+ struct acpi_mcfg_allocation *mmcfg = pcie_find_mmcfg(addr);
+ struct domain *d = v->domain;
+ unsigned int seg, bus, slot, func, reg;
+ struct hvm_pt_device *dev;
+ int rc;
+
+ ASSERT(mmcfg != NULL);
+
+ if ( len > 4 || len == 3 )
+ return X86EMUL_UNHANDLEABLE;
+
+ addr -= mmcfg->address;
+ seg = mmcfg->pci_segment;
+ pcie_decode_addr(addr, &bus, &slot, &func, ®);
+
+ read_lock(&d->arch.hvm_domain.pt_lock);
+ dev = hw_pcie_get_device(seg, bus, slot, func);
+ if ( dev != NULL )
+ {
+ rc = hvm_pt_pci_write_config(dev, reg, val, len, true);
+ if ( rc == X86EMUL_OKAY )
+ {
+ read_unlock(&d->arch.hvm_domain.pt_lock);
+ return rc;
+ }
+ }
+ read_unlock(&d->arch.hvm_domain.pt_lock);
+
+ /* Pass-through */
+ switch ( len )
+ {
+ case 1:
+ pci_conf_write8(seg, bus, slot, func, reg, val);
+ break;
+ case 2:
+ pci_conf_write16(seg, bus, slot, func, reg, val);
+ break;
+ case 4:
+ pci_conf_write32(seg, bus, slot, func, reg, val);
+ break;
+ }
+
+ return X86EMUL_OKAY;
+}
+
+const struct hvm_mmio_ops hvm_pt_pcie_mmio_ops = {
+ .check = pcie_range,
+ .read = pcie_read,
+ .write = pcie_write
+};