#include "qemu-xen.h"
#include "qemu-aio.h"
#include "xen_backend.h"
+#include "pci.h"
#include <xen/hvm/params.h>
#include <sys/mman.h>
#endif /* defined(MAPCACHE) */
+static ioservid_t ioservid;
+
+void xen_enable_io(void)
+{
+ xc_hvm_set_ioreq_server_state(xc_handle, domid, ioservid, 1);
+}
+
+void xen_disable_io(void)
+{
+ xc_hvm_set_ioreq_server_state(xc_handle, domid, ioservid, 0);
+}
static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
const char *boot_device,
const char *initrd_filename, const char *cpu_model,
const char *direct_pci)
{
- unsigned long ioreq_pfn;
+ extern xen_pfn_t ioreq_pfn;
+ extern xen_pfn_t bufioreq_pfn;
+ extern evtchn_port_t bufioreq_evtchn;
extern void *shared_page;
extern void *buffered_io_page;
#ifdef __ia64__
}
#endif
-#ifdef CONFIG_STUBDOM /* the hvmop is not supported on older hypervisors */
- xc_set_hvm_param(xc_handle, domid, HVM_PARAM_DM_DOMAIN, DOMID_SELF);
-#endif
- xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+ if (xc_hvm_create_ioreq_server(xc_handle, domid,
+ HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ &ioservid)) {
+ fprintf(logfile, "failed to create ioreq server: error %d\n",
+ errno);
+ exit(-1);
+ }
+
+ if (xc_hvm_get_ioreq_server_info(xc_handle, domid, ioservid,
+ &ioreq_pfn, &bufioreq_pfn,
+ &bufioreq_evtchn)) {
+ fprintf(logfile, "failed to get ioreq server info: error %d\n",
+ errno);
+ exit(-1);
+ }
+
fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
PROT_READ|PROT_WRITE, ioreq_pfn);
exit(-1);
}
- xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
- fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
+ fprintf(logfile, "buffered io page at pfn %lx\n", bufioreq_pfn);
buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
- PROT_READ|PROT_WRITE, ioreq_pfn);
+ PROT_READ|PROT_WRITE,
+ bufioreq_pfn);
if (buffered_io_page == NULL) {
fprintf(logfile, "map buffered IO page returned error %d\n", errno);
exit(-1);
}
+ xen_enable_io();
+
#if defined(__ia64__)
xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFPIOREQ_PFN, &ioreq_pfn);
fprintf(logfile, "buffered pio page at pfn %lx\n", ioreq_pfn);
pc_machine.init(ram_size, vga_ram_size, boot_device,
kernel_filename, kernel_cmdline, initrd_filename,
cpu_model, direct_pci);
+
+ xc_hvm_map_io_range_to_ioreq_server(xc_handle, domid, ioservid,
+ 0, 0, 65536);
+}
+
+void map_mmio_range(target_phys_addr_t start_addr, ram_addr_t size)
+{
+ ram_addr_t end_addr = start_addr + size - 1;
+
+ xc_hvm_map_io_range_to_ioreq_server(xc_handle, domid, ioservid,
+ 1, start_addr, end_addr);
+}
+
+void unmap_mmio_range(target_phys_addr_t start_addr, ram_addr_t size)
+{
+ ram_addr_t end_addr = start_addr + size - 1;
+
+ xc_hvm_unmap_io_range_from_ioreq_server(xc_handle, domid, ioservid,
+ 1, start_addr, end_addr);
+}
+
+void map_pci_dev(int devfn)
+{
+ xc_hvm_map_pcidev_to_ioreq_server(xc_handle, domid, ioservid, 0, 0,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
+
+void unmap_pci_dev(int devfn)
+{
+ xc_hvm_unmap_pcidev_from_ioreq_server(xc_handle, domid, ioservid, 0, 0,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
}
QEMUMachine xenfv_machine = {
for (i = 0; i < mmio_cnt; i++) {
if(mmio[i].start == start_addr) {
+ unmap_mmio_range(start_addr, mmio[i].size);
mmio[i].io_index = phys_offset;
mmio[i].size = size;
+ map_mmio_range(start_addr, size);
return;
}
}
mmio[mmio_cnt].io_index = phys_offset;
mmio[mmio_cnt].start = start_addr;
mmio[mmio_cnt++].size = size;
+ map_mmio_range(start_addr, size);
}
static int get_free_io_mem_idx(void)
int io_index = io_table_address >> IO_MEM_SHIFT;
for (i = 0; i < mmio_cnt; i++) {
- if (mmio[i].io_index == io_index) {
- mmio[i].start = mmio[i].size = 0;
- break;
+ if (mmio[i].io_index == io_index) {
+ unmap_mmio_range(mmio[i].start, mmio[i].size);
+ mmio[i].start = mmio[i].size = 0;
+ break;
}
}
long time_offset = 0;
+xen_pfn_t ioreq_pfn;
+xen_pfn_t bufioreq_pfn;
+evtchn_port_t bufioreq_evtchn;
+
shared_iopage_t *shared_page = NULL;
#define BUFFER_IO_MAX_DELAY 100
CPUX86State *env;
static int inited;
int i, rc;
- unsigned long bufioreq_evtchn;
env = qemu_mallocz(sizeof(CPUX86State));
if (!env)
}
ioreq_local_port[i] = rc;
}
- rc = xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_EVTCHN,
- &bufioreq_evtchn);
- if (rc < 0) {
- fprintf(logfile, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN error=%d\n",
- errno);
- return NULL;
- }
rc = xenevtchn_bind_interdomain(xce_handle, domid, (uint32_t)bufioreq_evtchn);
if (rc == -1) {
fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
case IOREQ_TYPE_INVALIDATE:
qemu_invalidate_map_cache();
break;
+ case IOREQ_TYPE_PCI_CONFIG: {
+ uint32_t sbdf = req->addr >> 32;
+ uint32_t val;
+
+ /*
+ * QEMU doesn't support MMCFG, so replay the Config cycle as if it has
+ * been issued via the legacy cf8/cfc mechanism.
+ */
+
+ /* Fake out to 0xcf8 */
+ val = (1u << 31) |
+ ((req->addr & 0x0f00) << 16) |
+ ((sbdf & 0xffff) << 8) |
+ (req->addr & 0xfc);
+ do_outp(env, 0xcf8, 4, val);
+
+ /* Now fake I/O to 0xcfc */
+ req->addr = 0xcfc | (req->addr & 0x03);
+ cpu_ioreq_pio(env, req);
+ break;
+ }
default:
hw_error("Invalid ioreq type 0x%x\n", req->type);
}
/* Save the device state */
asprintf(&qemu_file, "/var/lib/xen/qemu-save.%d", domid);
+ xen_disable_io();
do_savevm(qemu_file);
free(qemu_file);
xenstore_process_event(NULL);
}
+ xen_enable_io();
xenstore_record_dm_state("running");
}