}
}
-static bool always_flush(void *ctxt, struct vcpu *v)
-{
- return true;
-}
-
static int hvmop_flush_tlb_all(void)
{
if ( !is_hvm_domain(current->domain) )
return -EINVAL;
- return paging_flush_tlb(always_flush, NULL) ? 0 : -ERESTART;
+ return paging_flush_tlb(NULL) ? 0 : -ERESTART;
}
static int hvmop_set_evtchn_upcall_vector(
bitmap_fill(vpmask->mask, HVM_MAX_VCPUS);
}
-static bool vpmask_test(const struct hypercall_vpmask *vpmask,
- unsigned int vp)
-{
- ASSERT(vp < HVM_MAX_VCPUS);
- return test_bit(vp, vpmask->mask);
-}
-
static unsigned int vpmask_first(const struct hypercall_vpmask *vpmask)
{
return find_first_bit(vpmask->mask, HVM_MAX_VCPUS);
#undef NR_VPS_PER_BANK
}
-/*
- * Windows should not issue the hypercalls requiring this callback in the
- * case where vcpu_id would exceed the size of the mask.
- */
-static bool need_flush(void *ctxt, struct vcpu *v)
-{
- struct hypercall_vpmask *vpmask = ctxt;
-
- return vpmask_test(vpmask, v->vcpu_id);
-}
-
union hypercall_input {
uint64_t raw;
struct {
uint64_t flags;
uint64_t vcpu_mask;
} input_params;
+ unsigned long *vcpu_bitmap;
/* These hypercalls should never use the fast-call convention. */
if ( input->fast )
* so err on the safe side.
*/
if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
- vpmask_fill(vpmask);
+ vcpu_bitmap = NULL;
else
{
vpmask_empty(vpmask);
vpmask_set(vpmask, 0, input_params.vcpu_mask);
+ vcpu_bitmap = vpmask->mask;
}
/*
* A false return means that another vcpu is currently trying
* a similar operation, so back off.
*/
- if ( !paging_flush_tlb(need_flush, vpmask) )
+ if ( !paging_flush_tlb(vcpu_bitmap) )
return -ERESTART;
output->rep_complete = input->rep_count;
uint64_t flags;
struct hv_vpset set;
} input_params;
+ unsigned long *vcpu_bitmap;
/* These hypercalls should never use the fast-call convention. */
if ( input->fast )
sizeof(input_params)) != HVMTRANS_okay )
return -EINVAL;
- if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
- vpmask_fill(vpmask);
+ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS ||
+ input_params.set.format == HV_GENERIC_SET_ALL )
+ vcpu_bitmap = NULL;
else
{
union hypercall_vpset *vpset = &this_cpu(hypercall_vpset);
rc = hv_vpset_to_vpmask(set, vpmask);
if ( rc )
return rc;
+
+ vcpu_bitmap = vpmask->mask;
}
/*
* A false return means that another vcpu is currently trying
* a similar operation, so back off.
*/
- if ( !paging_flush_tlb(need_flush, vpmask) )
+ if ( !paging_flush_tlb(vcpu_bitmap) )
return -ERESTART;
output->rep_complete = input->rep_count;
hvm_update_guest_cr3(v, noflush);
}
-static bool flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
- void *ctxt)
+static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap)
+{
+ return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap);
+}
+
+/* Flush TLB of selected vCPUs. NULL for all. */
+static bool flush_tlb(const unsigned long *vcpu_bitmap)
{
static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
cpumask_t *mask = &this_cpu(flush_cpumask);
{
unsigned int cpu;
- if ( !flush_vcpu(ctxt, v) )
+ if ( !flush_vcpu(v, vcpu_bitmap) )
continue;
hvm_asid_flush_vcpu(v);
}
-/* Fluhs TLB of selected vCPUs. */
-bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
- void *ctxt)
+static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap)
+{
+ return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap);
+}
+
+/* Flush TLB of selected vCPUs. NULL for all. */
+bool shadow_flush_tlb(const unsigned long *vcpu_bitmap)
{
static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
cpumask_t *mask = &this_cpu(flush_cpumask);
/* Pause all other vcpus. */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
+ if ( v != current && flush_vcpu(v, vcpu_bitmap) )
vcpu_pause_nosync(v);
/* Now that all VCPUs are signalled to deschedule, we wait... */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
+ if ( v != current && flush_vcpu(v, vcpu_bitmap) )
while ( !vcpu_runnable(v) && v->is_running )
cpu_relax();
{
unsigned int cpu;
- if ( !flush_vcpu(ctxt, v) )
+ if ( !flush_vcpu(v, vcpu_bitmap) )
continue;
paging_update_cr3(v, false);
/* Done. */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
+ if ( v != current && flush_vcpu(v, vcpu_bitmap) )
vcpu_unpause(v);
return true;
}
/* Flush the TLB of the selected vCPUs. */
-bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
- void *ctxt);
+bool shadow_flush_tlb(const unsigned long *vcpu_bitmap);
#endif /* _XEN_SHADOW_PRIVATE_H */
void (*update_cr3 )(struct vcpu *v, int do_locking,
bool noflush);
void (*update_paging_modes )(struct vcpu *v);
- bool (*flush_tlb )(bool (*flush_vcpu)(void *ctxt,
- struct vcpu *v),
- void *ctxt);
+ bool (*flush_tlb )(const unsigned long *vcpu_bitmap);
unsigned int guest_levels;
return bits;
}
-static inline bool paging_flush_tlb(bool (*flush_vcpu)(void *ctxt,
- struct vcpu *v),
- void *ctxt)
+/* Flush selected vCPUs TLBs. NULL for all. */
+static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap)
{
- return paging_get_hostmode(current)->flush_tlb(flush_vcpu, ctxt);
+ return paging_get_hostmode(current)->flush_tlb(vcpu_bitmap);
}
#endif /* XEN_PAGING_H */