]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
xen: extend XEN_DOMCTL_memory_mapping to handle cacheability
authorStefano Stabellini <sstabellini@kernel.org>
Tue, 26 Feb 2019 23:05:52 +0000 (15:05 -0800)
committerStefano Stabellini <sstabellini@kernel.org>
Tue, 26 Feb 2019 23:05:52 +0000 (15:05 -0800)
Reuse the existing padding field to pass cacheability information about
the memory mapping, specifically, whether the memory should be mapped as
normal memory or as device memory (this is what we have today).

Add a cacheability parameter to map_mmio_regions. 0 means device
memory, which is what we have today.

On ARM, map device memory as p2m_mmio_direct_dev (as it is already done
today) and normal memory as p2m_ram_rw.

On x86, return error if the cacheability requested is not device memory.

Signed-off-by: Stefano Stabellini <stefanos@xilinx.com>
CC: JBeulich@suse.com
CC: andrew.cooper3@citrix.com
12 files changed:
xen/arch/arm/gic-v2.c
xen/arch/arm/p2m.c
xen/arch/arm/platforms/exynos5.c
xen/arch/arm/platforms/omap5.c
xen/arch/arm/vgic-v2.c
xen/arch/arm/vgic/vgic-v2.c
xen/arch/x86/hvm/dom0_build.c
xen/arch/x86/mm/p2m.c
xen/common/domctl.c
xen/drivers/vpci/header.c
xen/include/public/domctl.h
xen/include/xen/p2m-common.h

index e7eb01f30aa705d0616b4fb499d1a280c39d65c0..1ea3da21b4ec3ee7bfdec2c0b7c33574427ea738 100644 (file)
@@ -690,7 +690,8 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d)
 
         ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
                                PFN_UP(v2m_data->size),
-                               maddr_to_mfn(v2m_data->addr));
+                               maddr_to_mfn(v2m_data->addr),
+                               CACHEABILITY_DEVMEM);
         if ( ret )
         {
             printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
index 30cfb014984f77cfcb21f0d68cefb9559265b33b..5b8fcc59f5809b40b1262b0e582926fc5fa8abab 100644 (file)
@@ -1068,9 +1068,24 @@ int unmap_regions_p2mt(struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
-    return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev);
+    p2m_type_t t;
+
+    switch ( cache_policy )
+    {
+    case CACHEABILITY_MEMORY:
+        t = p2m_ram_rw;
+        break;
+    case CACHEABILITY_DEVMEM:
+        t = p2m_mmio_direct_dev;
+        break;
+    default:
+        return -ENOSYS;
+    }
+
+    return p2m_insert_mapping(d, start_gfn, nr, mfn, t);
 }
 
 int unmap_mmio_regions(struct domain *d,
index 65605070928785b9e7ac2487a02e7af684ace961..3af5fd3478889bf67cea8b4a61e46ec3acea8aae 100644 (file)
@@ -83,11 +83,11 @@ static int exynos5250_specific_mapping(struct domain *d)
 {
     /* Map the chip ID */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
-                     maddr_to_mfn(EXYNOS5_PA_CHIPID));
+                     maddr_to_mfn(EXYNOS5_PA_CHIPID), CACHEABILITY_DEVMEM);
 
     /* Map the PWM region */
     map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
-                     maddr_to_mfn(EXYNOS5_PA_TIMER));
+                     maddr_to_mfn(EXYNOS5_PA_TIMER), CACHEABILITY_DEVMEM);
 
     return 0;
 }
index aee24e4d28b3083fe0083261935e41f91db25819..4899332c237273303aeded5145f31260c0be35e1 100644 (file)
@@ -99,19 +99,19 @@ static int omap5_specific_mapping(struct domain *d)
 {
     /* Map the PRM module */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
-                     maddr_to_mfn(OMAP5_PRM_BASE));
+                     maddr_to_mfn(OMAP5_PRM_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the PRM_MPU */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
-                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE));
+                     maddr_to_mfn(OMAP5_PRCM_MPU_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the Wakeup Gen */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
-                     maddr_to_mfn(OMAP5_WKUPGEN_BASE));
+                     maddr_to_mfn(OMAP5_WKUPGEN_BASE), CACHEABILITY_DEVMEM);
 
     /* Map the on-chip SRAM */
     map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
-                     maddr_to_mfn(OMAP5_SRAM_PA));
+                     maddr_to_mfn(OMAP5_SRAM_PA), CACHEABILITY_DEVMEM);
 
     return 0;
 }
index bf77899dbafffea122d5663a4c6cd39e8361052c..2c9c15cf2ea059e840b46ef444deb235c5e7a78e 100644 (file)
@@ -691,7 +691,7 @@ static int vgic_v2_domain_init(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
         return ret;
 
index b5ba4ace872beffad17f92365a32928b174d65f9..bb305a2f068ce80896e4950c046d59234a03e937 100644 (file)
@@ -309,7 +309,7 @@ int vgic_v2_map_resources(struct domain *d)
      * region of the guest.
      */
     ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+                           maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
     if ( ret )
     {
         gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
index 3e29cd30b846a7689c791055b18a6c24f2d5ac10..3058560287a083df61559ceeb5a4a6bd352e345b 100644 (file)
@@ -67,8 +67,11 @@ static int __init modify_identity_mmio(struct domain *d, unsigned long pfn,
 
     for ( ; ; )
     {
-        rc = (map ? map_mmio_regions : unmap_mmio_regions)
-             (d, _gfn(pfn), nr_pages, _mfn(pfn));
+        if ( map )
+            rc = map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
+                                  CACHEABILITY_DEVMEM);
+        else
+            rc = unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
         if ( rc == 0 )
             break;
         if ( rc < 0 )
index 4bdc5e34e053da1928a3ad5db748b5dad5183d95..0bbb2a48feca08ee6bd92fb889aa909ebd152da8 100644 (file)
@@ -2110,12 +2110,16 @@ static unsigned int mmio_order(const struct domain *d,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn)
+                     mfn_t mfn,
+                     uint32_t cache_policy)
 {
     int ret = 0;
     unsigned long i;
     unsigned int iter, order;
 
+    if ( cache_policy != CACHEABILITY_DEVMEM )
+        return -ENOSYS;
+
     if ( !paging_mode_translate(d) )
         return 0;
 
index b2948814aa0d2b60975fffb8ae0eed7344a4239f..f4a7c16ccaddf352671fea7973d6cb96b17dc521 100644 (file)
@@ -935,6 +935,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
         unsigned long mfn_end = mfn + nr_mfns - 1;
         int add = op->u.memory_mapping.add_mapping;
+        uint32_t cache_policy = op->u.memory_mapping.cache_policy;
 
         ret = -EINVAL;
         if ( mfn_end < mfn || /* wrap? */
@@ -961,10 +962,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         if ( add )
         {
             printk(XENLOG_G_DEBUG
-                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
-                   d->domain_id, gfn, mfn, nr_mfns);
+                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx cache=%u\n",
+                   d->domain_id, gfn, mfn, nr_mfns, cache_policy);
 
-            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
+            ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn),
+                                   cache_policy);
             if ( ret < 0 )
                 printk(XENLOG_G_WARNING
                        "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
index 4573ccadf0a83f00ea03f5f061e957efc783276b..f968c2dc1d2162e9747c70547368f4969b6b9fe8 100644 (file)
@@ -52,7 +52,8 @@ static int map_range(unsigned long s, unsigned long e, void *data,
          * - {un}map_mmio_regions doesn't support preemption.
          */
 
-        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s))
+        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s),
+                                         CACHEABILITY_DEVMEM)
                       : unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
         if ( rc == 0 )
         {
index 4a46c28eed59abf9cf365ea3f02b77622db5d599..82704dd069b608a2c3efe2682c97476eed2edbbd 100644 (file)
@@ -571,12 +571,14 @@ struct xen_domctl_bind_pt_irq {
 */
 #define DPCI_ADD_MAPPING         1
 #define DPCI_REMOVE_MAPPING      0
+#define CACHEABILITY_DEVMEM      0 /* device memory, the default */
+#define CACHEABILITY_MEMORY      1 /* normal memory */
 struct xen_domctl_memory_mapping {
     uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
     uint64_aligned_t first_mfn; /* first page (machine page) in range */
     uint64_aligned_t nr_mfns;   /* number of pages in range (>0) */
     uint32_t add_mapping;       /* add or remove mapping */
-    uint32_t padding;           /* padding for 64-bit aligned structure */
+    uint32_t cache_policy;      /* cacheability of the memory mapping */
 };
 
 
index 58031a6ea83d65984ec8521ce71af8f7b8b22fc3..1c945a1e0ca49411372abea811d9050d508b05c8 100644 (file)
@@ -14,7 +14,8 @@ guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
 int map_mmio_regions(struct domain *d,
                      gfn_t start_gfn,
                      unsigned long nr,
-                     mfn_t mfn);
+                     mfn_t mfn,
+                     uint32_t cache_policy);
 int unmap_mmio_regions(struct domain *d,
                        gfn_t start_gfn,
                        unsigned long nr,