]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
x86/HVM: differentiate IO/mem resources tracked by ioreq server
authorShuai Ruan <shuai.ruan@linux.intel.com>
Fri, 29 Jan 2016 16:49:11 +0000 (17:49 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 29 Jan 2016 16:49:11 +0000 (17:49 +0100)
Currently in ioreq server, guest write-protected ram pages are
tracked in the same rangeset with device mmio resources. Yet
unlike device mmio, which can be in big chunks, the guest write-
protected pages may be discrete ranges with 4K bytes each. This
patch uses a seperate rangeset for the guest ram pages.

To differentiate the ioreq type between the write-protected memory
ranges and the mmio ranges when selecting an ioreq server, the p2m
type is retrieved by calling get_page_from_gfn(). And we do not
need to worry about the p2m type change during the ioreq selection
process.

Note: Previously, a new hypercall or subop was suggested to map
write-protected pages into ioreq server. However, it turned out
handler of this new hypercall would be almost the same with the
existing pair - HVMOP_[un]map_io_range_to_ioreq_server, and there's
already a type parameter in this hypercall. So no new hypercall
defined, only a new type is introduced.

Signed-off-by: Shuai Ruan <shuai.ruan@linux.intel.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
tools/libxc/include/xenctrl.h
tools/libxc/xc_domain.c
xen/arch/x86/hvm/hvm.c
xen/include/asm-x86/hvm/domain.h
xen/include/public/hvm/hvm_op.h

index 1d656ac7a53165dbef6ef6602d0102d692a963b4..1a5f4ec94033a6d1ef84566d2ecdffa3a417d4ae 100644 (file)
@@ -1714,6 +1714,37 @@ int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
                                             int is_mmio,
                                             uint64_t start,
                                             uint64_t end);
+/**
+ * This function registers a range of write-protected memory for emulation.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xc_hvm_map_wp_mem_range_to_ioreq_server(xc_interface *xch,
+                                            domid_t domid,
+                                            ioservid_t id,
+                                            xen_pfn_t start,
+                                            xen_pfn_t end);
+
+/**
+ * This function deregisters a range of write-protected memory for emulation.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xc_hvm_unmap_wp_mem_range_from_ioreq_server(xc_interface *xch,
+                                                domid_t domid,
+                                                ioservid_t id,
+                                                xen_pfn_t start,
+                                                xen_pfn_t end);
 
 /**
  * This function registers a PCI device for config space emulation.
index 921113d8edb959f16a3fb5bcc4f49af19c06087d..e21b602bd17be47008cbe1395b598836c607ba03 100644 (file)
@@ -1523,6 +1523,61 @@ int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
     return rc;
 }
 
+int xc_hvm_map_wp_mem_range_to_ioreq_server(xc_interface *xch,
+                                            domid_t domid,
+                                            ioservid_t id,
+                                            xen_pfn_t start,
+                                            xen_pfn_t end)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->domid = domid;
+    arg->id = id;
+    arg->type = HVMOP_IO_RANGE_WP_MEM;
+    arg->start = start;
+    arg->end = end;
+
+    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
+                  HVMOP_map_io_range_to_ioreq_server,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_unmap_wp_mem_range_from_ioreq_server(xc_interface *xch,
+                                                domid_t domid,
+                                                ioservid_t id,
+                                                xen_pfn_t start,
+                                                xen_pfn_t end)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->domid = domid;
+    arg->id = id;
+    arg->type = HVMOP_IO_RANGE_WP_MEM;
+    arg->start = start;
+    arg->end = end;
+
+    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
+                  HVMOP_unmap_io_range_from_ioreq_server,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+
+}
+
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
                                       ioservid_t id, uint16_t segment,
                                       uint8_t bus, uint8_t device,
index 674feeab7edc4e1333121426a543de1414cafcfb..74c2a828bb7178dcb50dc31084959f9a733d2d2a 100644 (file)
@@ -932,6 +932,9 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
         rangeset_destroy(s->range[i]);
 }
 
+const char *const io_range_name[NR_IO_RANGE_TYPES] =
+                                {"port", "mmio", "pci", "wp-mem"};
+
 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, 
                                             bool_t is_default)
 {
@@ -946,10 +949,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
-                      "");
+                      (i < NR_IO_RANGE_TYPES) ? io_range_name[i] : "");
         if ( rc )
             goto fail;
 
@@ -1267,6 +1267,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
             case HVMOP_IO_RANGE_PORT:
             case HVMOP_IO_RANGE_MEMORY:
             case HVMOP_IO_RANGE_PCI:
+            case HVMOP_IO_RANGE_WP_MEM:
                 r = s->range[type];
                 break;
 
@@ -1318,6 +1319,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
             case HVMOP_IO_RANGE_PORT:
             case HVMOP_IO_RANGE_MEMORY:
             case HVMOP_IO_RANGE_PCI:
+            case HVMOP_IO_RANGE_WP_MEM:
                 r = s->range[type];
                 break;
 
@@ -2601,6 +2603,18 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         type = (p->type == IOREQ_TYPE_PIO) ?
                 HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
         addr = p->addr;
+        if ( type == HVMOP_IO_RANGE_MEMORY )
+        {
+            p2m_type_t p2mt;
+            struct page_info *ram_page;
+
+            ram_page = get_page_from_gfn(d, PFN_DOWN(p->addr), &p2mt, 0);
+            if ( p2mt == p2m_mmio_write_dm )
+                type = HVMOP_IO_RANGE_WP_MEM;
+
+            if ( ram_page )
+                put_page(ram_page);
+        }
     }
 
     list_for_each_entry ( s,
@@ -2641,6 +2655,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                 return s;
             }
 
+            break;
+        case HVMOP_IO_RANGE_WP_MEM:
+            if ( rangeset_contains_singleton(r, PFN_DOWN(addr)) )
+                return s;
+
             break;
         }
     }
index a8cc2ad4d3f2977f76f68f5eb5f25b793e563422..1e139737ccafd3e282ecaa1453d4df9c9c29bb5e 100644 (file)
@@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_WP_MEM + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
index 16061858f784897e255d45f0e2e9d3fb245f18d1..c0b1e30f0650f1a52d56d372f18195c99a8d5b53 100644 (file)
@@ -333,6 +333,7 @@ struct xen_hvm_io_range {
 # define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
 # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
 # define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+# define HVMOP_IO_RANGE_WP_MEM 3 /* Write-protected ram range */
     uint64_aligned_t start, end; /* IN - inclusive start and end of range */
 };
 typedef struct xen_hvm_io_range xen_hvm_io_range_t;