* to the mmio handler.
*/
if ( (p2mt == p2m_mmio_dm) ||
- (npfec.write_access && (p2mt == p2m_ram_ro)) )
+ (npfec.write_access && (p2m_is_discard_write(p2mt))) )
{
put_gfn(p2m->domain, gfn);
goto out_put_gfn;
}
- /* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */
- if ( npfec.write_access && (p2mt == p2m_grant_map_ro) )
- {
- gdprintk(XENLOG_WARNING,
- "trying to write to read-only grant mapping\n");
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = 1;
- goto out_put_gfn;
- }
-
/* If we fell through, the vcpu will retry now that access restrictions have
* been removed. It may fault again if the p2m entry type still requires so.
* Otherwise, this is an error condition. */
if ( flags & HVMCOPY_to_guest )
{
- if ( p2mt == p2m_ram_ro )
+ if ( p2m_is_discard_write(p2mt) )
{
static unsigned long lastpage;
if ( xchg(&lastpage, gfn) != gfn )
p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
- if ( p2mt == p2m_ram_ro )
+ if ( p2m_is_discard_write(p2mt) )
{
static unsigned long lastpage;
if ( xchg(&lastpage, gfn) != gfn )
| p2m_to_mask(p2m_grant_map_ro) \
| p2m_to_mask(p2m_ram_shared) )
+/* Write-discard types, which should discard the write operations */
+#define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \
+ | p2m_to_mask(p2m_grant_map_ro))
+
/* Types that can be subject to bulk transitions. */
#define P2M_CHANGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \
| p2m_to_mask(p2m_ram_logdirty) )
#define p2m_is_hole(_t) (p2m_to_mask(_t) & P2M_HOLE_TYPES)
#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
#define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
+#define p2m_is_discard_write(_t) (p2m_to_mask(_t) & P2M_DISCARD_WRITE_TYPES)
#define p2m_is_changeable(_t) (p2m_to_mask(_t) & P2M_CHANGEABLE_TYPES)
#define p2m_is_pod(_t) (p2m_to_mask(_t) & P2M_POD_TYPES)
#define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES)