]> xenbits.xensource.com Git - qemu-xen-traditional.git/commitdiff
qemu-trad: stop using the default IOREQ server
authorPaul Durrant <paul.durrant@citrix.com>
Wed, 15 Aug 2018 16:38:18 +0000 (17:38 +0100)
committerIan Jackson <Ian.Jackson@eu.citrix.com>
Thu, 23 Aug 2018 13:41:32 +0000 (14:41 +0100)
Because qemu-trad is using the legacy HVM param mechanism of hooking into
Xen, it means that Xen has to maintain the notion of a 'default' IOREQ
server which is where all I/O goes if no other device model claims it.
Maintaining this code in Xen has a cost and it would be good if the project
no longer had to pay it.

This patch makes the necessary minimal changes to the qemu-trad to use the
IOREQ server API to hook into Xen. This means the default IOREQ server
will no longer be in use and thus it no longer needs to be maintained.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
hw/pci.c
hw/xen_machine_fv.c
i386-dm/exec-dm.c
i386-dm/helper2.c
qemu-xen.h

index c4232856f13dcbb1414c9dbe269c8f5280ec663a..68e5805fbdb5b22bf5b13e04123af2b685e93777 100644 (file)
--- a/hw/pci.c
+++ b/hw/pci.c
@@ -263,6 +263,8 @@ PCIDevice *pci_register_device(PCIBus *bus, const char *name,
     pci_dev->config_write = config_write;
     bus->devices[devfn] = pci_dev;
     pci_dev->irq = qemu_allocate_irqs(pci_set_irq, pci_dev, 4);
+
+    map_pci_dev(pci_dev->devfn);
     return pci_dev;
 }
 
@@ -305,6 +307,7 @@ int pci_unregister_device(PCIDevice *pci_dev)
 {
     int ret = 0;
 
+    unmap_pci_dev(pci_dev->devfn);
     if (pci_dev->unregister)
         ret = pci_dev->unregister(pci_dev);
     if (ret)
index b385d6a55d27a007b13c5a5c54583eedb837716c..f0989fad4c493058a8646a640d0c29b367554b63 100644 (file)
@@ -30,6 +30,7 @@
 #include "qemu-xen.h"
 #include "qemu-aio.h"
 #include "xen_backend.h"
+#include "pci.h"
 
 #include <xen/hvm/params.h>
 #include <sys/mman.h>
@@ -270,6 +271,17 @@ void qemu_invalidate_entry(uint8_t *buffer) {};
 
 #endif /* defined(MAPCACHE) */
 
+static ioservid_t ioservid;
+
+void xen_enable_io(void)
+{
+    xc_hvm_set_ioreq_server_state(xc_handle, domid, ioservid, 1);
+}
+
+void xen_disable_io(void)
+{
+    xc_hvm_set_ioreq_server_state(xc_handle, domid, ioservid, 0);
+}
 
 static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
                        const char *boot_device,
@@ -277,7 +289,9 @@ static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
                         const char *initrd_filename, const char *cpu_model,
                         const char *direct_pci)
 {
-    unsigned long ioreq_pfn;
+    extern xen_pfn_t ioreq_pfn;
+    extern xen_pfn_t bufioreq_pfn;
+    extern evtchn_port_t bufioreq_evtchn;
     extern void *shared_page;
     extern void *buffered_io_page;
 #ifdef __ia64__
@@ -295,10 +309,22 @@ static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
     }
 #endif
 
-#ifdef CONFIG_STUBDOM /* the hvmop is not supported on older hypervisors */
-    xc_set_hvm_param(xc_handle, domid, HVM_PARAM_DM_DOMAIN, DOMID_SELF);
-#endif
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+    if (xc_hvm_create_ioreq_server(xc_handle, domid,
+                                   HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+                                   &ioservid)) {
+        fprintf(logfile, "failed to create ioreq server: error %d\n",
+                errno);
+        exit(-1);
+    }
+
+    if (xc_hvm_get_ioreq_server_info(xc_handle, domid, ioservid,
+                                     &ioreq_pfn, &bufioreq_pfn,
+                                     &bufioreq_evtchn)) {
+        fprintf(logfile, "failed to get ioreq server info: error %d\n",
+                errno);
+        exit(-1);
+    }
+
     fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
     shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                        PROT_READ|PROT_WRITE, ioreq_pfn);
@@ -307,15 +333,17 @@ static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
         exit(-1);
     }
 
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
-    fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
+    fprintf(logfile, "buffered io page at pfn %lx\n", bufioreq_pfn);
     buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
-                                            PROT_READ|PROT_WRITE, ioreq_pfn);
+                                            PROT_READ|PROT_WRITE,
+                                            bufioreq_pfn);
     if (buffered_io_page == NULL) {
         fprintf(logfile, "map buffered IO page returned error %d\n", errno);
         exit(-1);
     }
 
+    xen_enable_io();
+
 #if defined(__ia64__)
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFPIOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "buffered pio page at pfn %lx\n", ioreq_pfn);
@@ -377,6 +405,37 @@ static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
     pc_machine.init(ram_size, vga_ram_size, boot_device,
                    kernel_filename, kernel_cmdline, initrd_filename,
                    cpu_model, direct_pci);
+
+    xc_hvm_map_io_range_to_ioreq_server(xc_handle, domid, ioservid,
+                                        0, 0, 65536);
+}
+
+void map_mmio_range(target_phys_addr_t start_addr, ram_addr_t size)
+{
+    ram_addr_t end_addr = start_addr + size - 1;
+
+    xc_hvm_map_io_range_to_ioreq_server(xc_handle, domid, ioservid,
+                                        1, start_addr, end_addr);
+}
+
+void unmap_mmio_range(target_phys_addr_t start_addr, ram_addr_t size)
+{
+    ram_addr_t end_addr = start_addr + size - 1;
+
+    xc_hvm_unmap_io_range_from_ioreq_server(xc_handle, domid, ioservid,
+                                            1, start_addr, end_addr);
+}
+
+void map_pci_dev(int devfn)
+{
+    xc_hvm_map_pcidev_to_ioreq_server(xc_handle, domid, ioservid, 0, 0,
+                                      PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
+
+void unmap_pci_dev(int devfn)
+{
+    xc_hvm_unmap_pcidev_from_ioreq_server(xc_handle, domid, ioservid, 0, 0,
+                                          PCI_SLOT(devfn), PCI_FUNC(devfn));
 }
 
 QEMUMachine xenfv_machine = {
index 96274d9d7461dd79bd6167eb35ec1f513c5e6678..efdd77ebfd33b5db6bb1837dcc0e799e6b040171 100644 (file)
@@ -294,8 +294,10 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr,
 
     for (i = 0; i < mmio_cnt; i++) { 
         if(mmio[i].start == start_addr) {
+            unmap_mmio_range(start_addr, mmio[i].size);
             mmio[i].io_index = phys_offset;
             mmio[i].size = size;
+            map_mmio_range(start_addr, size);
             return;
         }
     }
@@ -308,6 +310,7 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr,
     mmio[mmio_cnt].io_index = phys_offset;
     mmio[mmio_cnt].start = start_addr;
     mmio[mmio_cnt++].size = size;
+    map_mmio_range(start_addr, size);
 }
 
 static int get_free_io_mem_idx(void)
@@ -360,9 +363,10 @@ void cpu_unregister_io_memory(int io_table_address)
     int io_index = io_table_address >> IO_MEM_SHIFT;
 
     for (i = 0; i < mmio_cnt; i++) {
-       if (mmio[i].io_index == io_index) {
-          mmio[i].start = mmio[i].size = 0;
-          break;
+        if (mmio[i].io_index == io_index) {
+            unmap_mmio_range(mmio[i].start, mmio[i].size);
+            mmio[i].start = mmio[i].size = 0;
+            break;
        }
     }
 
index 78093fefd7a7aa9651968643505146793f4d9278..995d63f64cb647fc19efb50aa49500366093e144 100644 (file)
@@ -98,6 +98,10 @@ int domid_backend = 0;
 
 long time_offset = 0;
 
+xen_pfn_t ioreq_pfn;
+xen_pfn_t bufioreq_pfn;
+evtchn_port_t bufioreq_evtchn;
+
 shared_iopage_t *shared_page = NULL;
 
 #define BUFFER_IO_MAX_DELAY  100
@@ -120,7 +124,6 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
     CPUX86State *env;
     static int inited;
     int i, rc;
-    unsigned long bufioreq_evtchn;
 
     env = qemu_mallocz(sizeof(CPUX86State));
     if (!env)
@@ -158,13 +161,6 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
             }
             ioreq_local_port[i] = rc;
         }
-        rc = xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_EVTCHN,
-                &bufioreq_evtchn);
-        if (rc < 0) {
-            fprintf(logfile, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN error=%d\n",
-                    errno);
-            return NULL;
-        }
         rc = xenevtchn_bind_interdomain(xce_handle, domid, (uint32_t)bufioreq_evtchn);
         if (rc == -1) {
             fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
@@ -489,6 +485,27 @@ static void __handle_ioreq(CPUState *env, ioreq_t *req)
     case IOREQ_TYPE_INVALIDATE:
         qemu_invalidate_map_cache();
         break;
+    case IOREQ_TYPE_PCI_CONFIG: {
+        uint32_t sbdf = req->addr >> 32;
+        uint32_t val;
+
+        /*
+         * QEMU doesn't support MMCFG, so replay the Config cycle as if it has
+         * been issued via the legacy cf8/cfc mechanism.
+         */
+
+        /* Fake out to 0xcf8 */
+        val = (1u << 31) |
+            ((req->addr & 0x0f00) << 16) |
+            ((sbdf & 0xffff) << 8) |
+            (req->addr & 0xfc);
+        do_outp(env, 0xcf8, 4, val);
+
+        /* Now fake I/O to 0xcfc */
+        req->addr = 0xcfc | (req->addr & 0x03);
+        cpu_ioreq_pio(env, req);
+        break;
+    }
     default:
         hw_error("Invalid ioreq type 0x%x\n", req->type);
     }
@@ -645,6 +662,7 @@ int main_loop(void)
 
         /* Save the device state */
         asprintf(&qemu_file, "/var/lib/xen/qemu-save.%d", domid);
+        xen_disable_io();
         do_savevm(qemu_file);
         free(qemu_file);
 
@@ -658,6 +676,7 @@ int main_loop(void)
                 xenstore_process_event(NULL);
         }
 
+        xen_enable_io();
         xenstore_record_dm_state("running");
     }
 
index 059866874a0a40b6d8a2a644e8d7b170c6b0e094..862d3998d1a46ed2d27c71614c9a5e9f743a8d18 100644 (file)
@@ -138,4 +138,12 @@ int has_tpm_device_danger(void);
 static void vga_dirty_log_start(void *s) { }
 static void vga_dirty_log_stop(void *s) { }
 
+void xen_enable_io(void);
+void xen_disable_io(void);
+
+void map_mmio_range(target_phys_addr_t start_addr, ram_addr_t size);
+void unmap_mmio_range(target_phys_addr_t start_addr, ram_addr_t size);
+void map_pci_dev(int devfn);
+void unmap_pci_dev(int devfn);
+
 #endif /*QEMU_XEN_H*/