spin_unlock_irqrestore(&pci_config_lock, flags);
}
+
+int pci_conf_write_intercept(unsigned int seg, unsigned int bdf,
+ unsigned int reg, unsigned int size,
+ uint32_t *data)
+{
+ struct pci_dev *pdev;
+ int rc = 0;
+
+ /*
+ * Avoid expensive operations when no hook is going to do anything
+ * for the access anyway.
+ */
+ if ( reg < 64 || reg >= 256 )
+ return 0;
+
+ spin_lock(&pcidevs_lock);
+
+ pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
+ if ( pdev )
+ rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
+
+ spin_unlock(&pcidevs_lock);
+
+ return rc;
+}
return ioports_access_permitted(d, port, port + bytes - 1);
}
-static bool_t pci_cfg_ok(struct domain *currd, bool_t write,
- unsigned int start, unsigned int size)
+static bool_t pci_cfg_ok(struct domain *currd, unsigned int start,
+ unsigned int size, uint32_t *write)
{
uint32_t machine_bdf;
start |= CF8_ADDR_HI(currd->arch.pci_cf8);
}
- return !xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
- start, start + size - 1, write);
+ if ( xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
+ start, start + size - 1, !!write) != 0 )
+ return 0;
+
+ return !write ||
+ pci_conf_write_intercept(0, machine_bdf, start, size, write) >= 0;
}
uint32_t guest_io_read(unsigned int port, unsigned int bytes,
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(currd, 0, port & 3, size) )
+ if ( pci_cfg_ok(currd, port & 3, size, NULL) )
sub_data = pci_conf_read(currd->arch.pci_cf8, port & 3, size);
}
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(currd, 1, port & 3, size) )
+ if ( pci_cfg_ok(currd, port & 3, size, &data) )
pci_conf_write(currd->arch.pci_cf8, port & 3, size, data);
}
vmask_t used_vectors;
};
+int pci_conf_write_intercept(unsigned int seg, unsigned int bdf,
+ unsigned int reg, unsigned int size,
+ uint32_t *data);
+int pci_msi_conf_write_intercept(struct pci_dev *, unsigned int reg,
+ unsigned int size, uint32_t *data);
+
#endif /* __X86_PCI_H__ */