]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
xen: extend XEN_DOMCTL_memory_mapping to handle cacheability
authorStefano Stabellini <sstabellini@kernel.org>
Fri, 4 Jan 2019 20:47:02 +0000 (12:47 -0800)
committerStefano Stabellini <sstabellini@xilinx.com>
Wed, 18 Dec 2019 19:16:19 +0000 (11:16 -0800)
Reuse the existing padding field to pass cacheability information about
the memory mapping, specifically, whether the memory should be mapped as
normal memory or as device memory (this is what we have today).

Add a cacheability parameter to map_mmio_regions. 0 means device
memory, which is what we have today.

On ARM, map device memory as p2m_mmio_direct_dev (as it is already done
today) and normal memory as p2m_ram_rw.

On x86, return error if the cacheability requested is not device memory.

Signed-off-by: Stefano Stabellini <stefanos@xilinx.com>
12 files changed:
xen/arch/arm/gic-v2.c
xen/arch/arm/p2m.c
xen/arch/arm/platforms/exynos5.c
xen/arch/arm/platforms/omap5.c
xen/arch/arm/vgic-v2.c
xen/arch/arm/vgic/vgic-v2.c
xen/arch/x86/hvm/dom0_build.c
xen/arch/x86/mm/p2m.c
xen/common/domctl.c
xen/drivers/vpci/header.c
xen/include/public/domctl.h
xen/include/xen/p2m-common.h

index 256988c665b76fe74ff682c7de45e28ccd5a1066..cfcb8338a62b4fe3973146084f00b558b5f35c6b 100644 (file)
@@ -701,7 +701,8 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d)
 
         ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
                                PFN_UP(v2m_data->size),
-                               maddr_to_mfn(v2m_data->addr));
+                               maddr_to_mfn(v2m_data->addr),
+                               CACHEABILITY_DEVMEM);
         if ( ret )
         {
             printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
index ce59f2b503b29f1e830421b7492de9cd902b88be..034329b74f27fc33f1bd41ed73815ff2f4bc3ac8 100644 (file)
@@ -1336,9 +1336,24 @@ int unmap_regions_p2mt(struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
-    return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev);
+    p2m_type_t t;
+
+    switch ( cache_policy )
+    {
+    case CACHEABILITY_MEMORY:
+        t = p2m_ram_rw;
+        break;
+    case CACHEABILITY_DEVMEM:
+        t = p2m_mmio_direct_dev;
+        break;
+    default:
+        return -ENOSYS;
+    }
+
+    return p2m_insert_mapping(d, start_gfn, nr, mfn, t);
 }
 
 int unmap_mmio_regions(struct domain *d,
index 65605070928785b9e7ac2487a02e7af684ace961..3af5fd3478889bf67cea8b4a61e46ec3acea8aae 100644 (file)
@@ -83,11 +83,11 @@ static int exynos5250_specific_mapping(struct domain *d)
 {
     /* Map the chip ID */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
-                     maddr_to_mfn(EXYNOS5_PA_CHIPID));
+                     maddr_to_mfn(EXYNOS5_PA_CHIPID), CACHEABILITY_DEVMEM);
 
     /* Map the PWM region */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
-                     maddr_to_mfn(EXYNOS5_PA_TIMER));
+                     maddr_to_mfn(EXYNOS5_PA_TIMER), CACHEABILITY_DEVMEM);
 
     return 0;
 }
index aee24e4d28b3083fe0083261935e41f91db25819..4899332c237273303aeded5145f31260c0be35e1 100644 (file)
@@ -99,19 +99,19 @@ static int omap5_specific_mapping(struct domain *d)
 {
     /* Map the PRM module */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
-                     maddr_to_mfn(OMAP5_PRM_BASE));
+                     maddr_to_mfn(OMAP5_PRM_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the PRM_MPU */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
-                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE));
+                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the Wakeup Gen */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
-                     maddr_to_mfn(OMAP5_WKUPGEN_BASE));
+                     maddr_to_mfn(OMAP5_WKUPGEN_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the on-chip SRAM */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
-                     maddr_to_mfn(OMAP5_SRAM_PA));
+                     maddr_to_mfn(OMAP5_SRAM_PA), CACHEABILITY_DEVMEM);
 
     return 0;
 }
index 64b141fea58602a504f83ad955b8dde69b98e220..9f087c46deabde9bf3fde39bb53b9b1cf7367920 100644 (file)
@@ -691,7 +691,7 @@ static int vgic_v2_domain_init(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
         return ret;
 
index b5ba4ace872beffad17f92365a32928b174d65f9..bb305a2f068ce80896e4950c046d59234a03e937 100644 (file)
@@ -309,7 +309,7 @@ int vgic_v2_map_resources(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
     {
         gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
index 831325150b36c7791eba07221e246b23566c7e64..561d339a93e5252514fa4776f42aa917f6b47463 100644 (file)
@@ -79,8 +79,11 @@ static int __init modify_identity_mmio(struct domain *d, unsigned long pfn,
 
     for ( ; ; )
     {
-        rc = map ?   map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn))
-                 : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
+        if ( map )
+            rc = map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
+                                  CACHEABILITY_DEVMEM);
+        else
+            rc = unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
         if ( rc == 0 )
             break;
         if ( rc < 0 )
index ba126f790a13ca49c027e98e40ba75a487a75502..6aa9fbd02c343cbc7549a9dca8e69220d3c6a533 100644 (file)
@@ -2285,12 +2285,16 @@ static unsigned int mmio_order(const struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
     int ret = 0;
     unsigned long i;
     unsigned int iter, order;
 
+    if ( cache_policy != CACHEABILITY_DEVMEM )
+        return -ENOSYS;
+
     if ( !paging_mode_translate(d) )
         return 0;
 
index 03d022603979c1e6771362b64d392b6248460de8..c6ce493b74522162f8be913c144a8902041d8afd 100644 (file)
@@ -926,6 +926,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
         unsigned long mfn_end = mfn + nr_mfns - 1;
         int add = op->u.memory_mapping.add_mapping;
+        uint32_t cache_policy = op->u.memory_mapping.cache_policy;
 
         ret = -EINVAL;
         if ( mfn_end < mfn || /* wrap? */
@@ -952,10 +953,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         if ( add )
         {
             printk(XENLOG_G_DEBUG
-                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
-                   d->domain_id, gfn, mfn, nr_mfns);
+                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx cache=%u\n",
+                   d->domain_id, gfn, mfn, nr_mfns, cache_policy);
 
-            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
+            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn),
+                                   cache_policy);
             if ( ret < 0 )
                 printk(XENLOG_G_WARNING
                        "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
index 3c794f486d653329e59df68e8ebdbc5642f49aaa..ba772a4a3579ba6d910a850fe143b632ccddea33 100644 (file)
@@ -52,7 +52,8 @@ static int map_range(unsigned long s, unsigned long e, void *data,
          * - {un}map_mmio_regions doesn't support preemption.
          */
 
-        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s))
+        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s),
+                                         CACHEABILITY_DEVMEM)
                       : unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
         if ( rc == 0 )
         {
index e313da499f6ab9255315120afa8298d82ce8f3f5..c44492fef96d7862408f7846a39389fcb7c9a9e9 100644 (file)
@@ -592,12 +592,14 @@ struct xen_domctl_bind_pt_irq {
 */
 #define DPCI_ADD_MAPPING         1
 #define DPCI_REMOVE_MAPPING      0
+#define CACHEABILITY_DEVMEM      0 /* device memory, the default */
+#define CACHEABILITY_MEMORY      1 /* normal memory */
 struct xen_domctl_memory_mapping {
     uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
     uint64_aligned_t first_mfn; /* first page (machine page) in range */
     uint64_aligned_t nr_mfns;   /* number of pages in range (>0) */
     uint32_t add_mapping;       /* add or remove mapping */
-    uint32_t padding;           /* padding for 64-bit aligned structure */
+    uint32_t cache_policy;      /* cacheability of the memory mapping */
 };
 
 
index 58031a6ea83d65984ec8521ce71af8f7b8b22fc3..1c945a1e0ca49411372abea811d9050d508b05c8 100644 (file)
@@ -14,7 +14,8 @@ guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn);
+                     mfn_t mfn,
+                     uint32_t cache_policy);
 int unmap_mmio_regions(struct domain *d,
                        gfn_t start_gfn,
                        unsigned long nr,