{
register_t hcr;
struct p2m_domain *p2m = &n->domain->arch.p2m;
+ uint8_t *last_vcpu_ran;
if ( is_idle_vcpu(n) )
return;
WRITE_SYSREG(hcr, HCR_EL2);
isb();
+
+ last_vcpu_ran = &p2m->last_vcpu_ran[smp_processor_id()];
+
+ /*
+ * Flush local TLB for the domain to prevent wrong TLB translation
+ * when running multiple vCPU of the same domain on a single pCPU.
+ */
+ if ( *last_vcpu_ran != INVALID_VCPU_ID && *last_vcpu_ran != n->vcpu_id )
+ flush_tlb_local();
+
+ *last_vcpu_ran = n->vcpu_id;
}
static void p2m_flush_tlb(struct p2m_domain *p2m)
{
struct p2m_domain *p2m = &d->arch.p2m;
int rc = 0;
+ unsigned int cpu;
rwlock_init(&p2m->lock);
INIT_PAGE_LIST_HEAD(&p2m->pages);
rc = p2m_alloc_table(d);
+ /*
+ * Make sure that the type chosen to is able to store the an vCPU ID
+ * between 0 and the maximum of virtual CPUS supported as long as
+ * the INVALID_VCPU_ID.
+ */
+ BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS);
+ BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID);
+
+ for_each_possible_cpu(cpu)
+ p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
+
return rc;
}