* has failed (error case).
* So, at worst, the spurious mapcache invalidation might be sent.
*/
- if ( (p2m->domain == current->domain) &&
- domain_has_ioreq_server(p2m->domain) &&
- p2m_is_ram(entry.p2m.type) )
- p2m->domain->mapcache_invalidate = true;
+ if ( p2m_is_ram(entry.p2m.type) &&
+ domain_has_ioreq_server(p2m->domain) )
+ ioreq_request_mapcache_invalidate(p2m->domain);
#endif
p2m->stats.mappings[level]--;
* Note that sending the invalidation request causes the vCPU to block
* until all the IOREQ servers have acknowledged the invalidation.
*/
- if ( unlikely(curr->domain->mapcache_invalidate) &&
- test_and_clear_bool(curr->domain->mapcache_invalidate) )
+ if ( unlikely(curr->mapcache_invalidate) &&
+ test_and_clear_bool(curr->mapcache_invalidate) )
ioreq_signal_mapcache_invalidate();
#endif
}
static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
- const struct vcpu *curr = current;
long rc;
switch ( cmd & MEMOP_CMD_MASK )
return -ENOSYS;
}
- if ( !curr->hcall_compat )
+ if ( !current->hcall_compat )
rc = do_memory_op(cmd, arg);
else
rc = compat_memory_op(cmd, arg);
- if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
- curr->domain->mapcache_invalidate = true;
-
return rc;
}
HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
- if ( unlikely(currd->mapcache_invalidate) &&
- test_and_clear_bool(currd->mapcache_invalidate) )
+ if ( unlikely(curr->mapcache_invalidate) )
+ {
+ curr->mapcache_invalidate = false;
ioreq_signal_mapcache_invalidate();
+ }
return curr->hcall_preempted ? HVM_HCALL_preempted : HVM_HCALL_completed;
}
*/
#include <xen/event.h>
+#include <xen/ioreq.h>
#include <xen/mm.h>
#include <xen/sched.h>
#include <xen/trace.h>
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, cur_order);
+ ioreq_request_mapcache_invalidate(d);
+
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
ram -= n;
p2m_pod_cache_add(p2m, mfn_to_page(mfn0), PAGE_ORDER_2M);
p2m->pod.entry_count += SUPERPAGE_PAGES;
+ ioreq_request_mapcache_invalidate(d);
+
ret = SUPERPAGE_PAGES;
out_reset:
/* Add to cache, and account for the new p2m PoD entry */
p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), PAGE_ORDER_4K);
p2m->pod.entry_count++;
+
+ ioreq_request_mapcache_invalidate(d);
}
}
p2m->pod.entry_count -= pod_count;
BUG_ON(p2m->pod.entry_count < 0);
pod_unlock(p2m);
+
+ ioreq_request_mapcache_invalidate(d);
}
out:
#include <xen/vm_event.h>
#include <xen/event.h>
#include <xen/grant_table.h>
+#include <xen/ioreq.h>
#include <xen/param.h>
#include <public/vm_event.h>
#include <asm/domain.h>
}
}
+ ioreq_request_mapcache_invalidate(p2m->domain);
+
return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
p2m->default_access);
}
ASSERT(mfn_valid(mfn_add(omfn, i)));
set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
}
+
+ ioreq_request_mapcache_invalidate(d);
}
P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
+void ioreq_request_mapcache_invalidate(const struct domain *d)
+{
+ struct vcpu *v = current;
+
+ if ( d == v->domain )
+ v->mapcache_invalidate = true;
+ else if ( d->creation_finished )
+ for_each_vcpu ( d, v )
+ v->mapcache_invalidate = true;
+}
+
/* Ask ioemu mapcache to invalidate mappings. */
void ioreq_signal_mapcache_invalidate(void)
{
struct ioreq_server *s;
struct ioreq_vcpu *sv;
enum vio_completion completion;
+ bool res = true;
if ( has_vpci(d) && vpci_process_pending(v) )
{
break;
case VIO_mmio_completion:
- return arch_ioreq_complete_mmio();
+ res = arch_ioreq_complete_mmio();
+ break;
case VIO_pio_completion:
- return handle_pio(vio->req.addr, vio->req.size,
- vio->req.dir);
+ res = handle_pio(vio->req.addr, vio->req.size,
+ vio->req.dir);
+ break;
default:
- return arch_vcpu_ioreq_completion(completion);
+ res = arch_vcpu_ioreq_completion(completion);
+ break;
}
- return true;
+ if ( res && unlikely(v->mapcache_invalidate) )
+ {
+ v->mapcache_invalidate = false;
+ ioreq_signal_mapcache_invalidate();
+ res = false;
+ }
+
+ return res;
}
static int ioreq_server_alloc_mfn(struct ioreq_server *s, bool buf)
int ioreq_send(struct ioreq_server *s, ioreq_t *proto_p,
bool buffered);
unsigned int ioreq_broadcast(ioreq_t *p, bool buffered);
+void ioreq_request_mapcache_invalidate(const struct domain *d);
void ioreq_signal_mapcache_invalidate(void);
void ioreq_domain_init(struct domain *d);
bool hcall_compat;
#endif
+#ifdef CONFIG_IOREQ_SERVER
+ /*
+ * Indicates that mapcache invalidation request should be sent to
+ * the device emulator.
+ */
+ bool mapcache_invalidate;
+#endif
+
/* The CPU, if any, which is holding onto this VCPU's state. */
#define VCPU_CPU_CLEAN (~0u)
unsigned int dirty_cpu;
* unpaused for the first time by the systemcontroller.
*/
bool creation_finished;
- /*
- * Indicates that mapcache invalidation request should be sent to
- * the device emulator.
- */
- bool mapcache_invalidate;
/* Which guest this guest has privileges on */
struct domain *target;