!cpumask_empty(&dirty_mask)) )
{
/* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_tlb_mask(&dirty_mask);
+ flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_VCPU_STATE);
}
if ( prev != next )
sync_local_execstate();
/* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_VCPU_STATE);
}
static int relinquish_memory(
unsigned int flags = flush_flags;
ack_APIC_irq();
perfc_incr(ipis);
- if ( __sync_local_execstate() )
+ if ( (flags & FLUSH_VCPU_STATE) && __sync_local_execstate() )
flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL);
- flush_area_local(flush_va, flags);
+ if ( flags & ~(FLUSH_VCPU_STATE | FLUSH_ORDER_MASK) )
+ flush_area_local(flush_va, flags);
cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
}
ASSERT(local_irq_is_enabled());
- if ( cpumask_test_cpu(cpu, mask) )
+ if ( (flags & ~(FLUSH_VCPU_STATE | FLUSH_ORDER_MASK)) &&
+ cpumask_test_cpu(cpu, mask) )
flags = flush_area_local(va, flags);
if ( (flags & ~FLUSH_ORDER_MASK) &&
#define FLUSH_CACHE 0x400
/* VA for the flush has a valid mapping */
#define FLUSH_VA_VALID 0x800
+ /* Flush CPU state */
+#define FLUSH_VCPU_STATE 0x1000
/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);