ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
PFN_UP(v2m_data->size),
- maddr_to_mfn(v2m_data->addr));
+ maddr_to_mfn(v2m_data->addr),
+ CACHEABILITY_DEVMEM);
if ( ret )
{
printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
int map_mmio_regions(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
- mfn_t mfn)
+ mfn_t mfn,
+ uint32_t cache_policy)
{
- return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev);
+ p2m_type_t t;
+
+ switch ( cache_policy )
+ {
+ case CACHEABILITY_MEMORY:
+ t = p2m_ram_rw;
+ break;
+ case CACHEABILITY_DEVMEM:
+ t = p2m_mmio_direct_dev;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return p2m_insert_mapping(d, start_gfn, nr, mfn, t);
}
int unmap_mmio_regions(struct domain *d,
{
/* Map the chip ID */
map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
- maddr_to_mfn(EXYNOS5_PA_CHIPID));
+ maddr_to_mfn(EXYNOS5_PA_CHIPID), CACHEABILITY_DEVMEM);
/* Map the PWM region */
map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
- maddr_to_mfn(EXYNOS5_PA_TIMER));
+ maddr_to_mfn(EXYNOS5_PA_TIMER), CACHEABILITY_DEVMEM);
return 0;
}
{
/* Map the PRM module */
map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
- maddr_to_mfn(OMAP5_PRM_BASE));
+ maddr_to_mfn(OMAP5_PRM_BASE), CACHEABILITY_DEVMEM);
/* Map the PRM_MPU */
map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
- maddr_to_mfn(OMAP5_PRCM_MPU_BASE));
+ maddr_to_mfn(OMAP5_PRCM_MPU_BASE), CACHEABILITY_DEVMEM);
/* Map the Wakeup Gen */
map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
- maddr_to_mfn(OMAP5_WKUPGEN_BASE));
+ maddr_to_mfn(OMAP5_WKUPGEN_BASE), CACHEABILITY_DEVMEM);
/* Map the on-chip SRAM */
map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
- maddr_to_mfn(OMAP5_SRAM_PA));
+ maddr_to_mfn(OMAP5_SRAM_PA), CACHEABILITY_DEVMEM);
return 0;
}
* region of the guest.
*/
ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
- maddr_to_mfn(vbase));
+ maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
if ( ret )
return ret;
* region of the guest.
*/
ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
- maddr_to_mfn(vbase));
+ maddr_to_mfn(vbase), CACHEABILITY_DEVMEM);
if ( ret )
{
gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
for ( ; ; )
{
- rc = map ? map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn))
- : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
+ if ( map )
+ rc = map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
+ CACHEABILITY_DEVMEM);
+ else
+ rc = unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
if ( rc == 0 )
break;
if ( rc < 0 )
int map_mmio_regions(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
- mfn_t mfn)
+ mfn_t mfn,
+ uint32_t cache_policy)
{
int ret = 0;
unsigned long i;
unsigned int iter, order;
+ if ( cache_policy != CACHEABILITY_DEVMEM )
+ return -ENOSYS;
+
if ( !paging_mode_translate(d) )
return 0;
unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
unsigned long mfn_end = mfn + nr_mfns - 1;
int add = op->u.memory_mapping.add_mapping;
+ uint32_t cache_policy = op->u.memory_mapping.cache_policy;
ret = -EINVAL;
if ( mfn_end < mfn || /* wrap? */
if ( add )
{
printk(XENLOG_G_DEBUG
- "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx cache=%u\n",
+ d->domain_id, gfn, mfn, nr_mfns, cache_policy);
- ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
+ ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn),
+ cache_policy);
if ( ret < 0 )
printk(XENLOG_G_WARNING
"memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
* - {un}map_mmio_regions doesn't support preemption.
*/
- rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s))
+ rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s),
+ CACHEABILITY_DEVMEM)
: unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
if ( rc == 0 )
{
*/
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
+#define CACHEABILITY_DEVMEM 0 /* device memory, the default */
+#define CACHEABILITY_MEMORY 1 /* normal memory */
struct xen_domctl_memory_mapping {
uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
uint64_aligned_t first_mfn; /* first page (machine page) in range */
uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
uint32_t add_mapping; /* add or remove mapping */
- uint32_t padding; /* padding for 64-bit aligned structure */
+ uint32_t cache_policy; /* cacheability of the memory mapping */
};
int map_mmio_regions(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
- mfn_t mfn);
+ mfn_t mfn,
+ uint32_t cache_policy);
int unmap_mmio_regions(struct domain *d,
gfn_t start_gfn,
unsigned long nr,