(vp) < HVM_MAX_VCPUS; \
(vp) = vpmask_next(vpmask, vp))
+struct hypercall_vpset {
+ struct hv_vpset set;
+ uint64_t __bank_contents[64];
+};
+
+static DEFINE_PER_CPU(struct hypercall_vpset, hypercall_vpset);
+
+static unsigned int hv_vpset_nr_banks(struct hv_vpset *vpset)
+{
+ uint64_t bank_mask;
+ unsigned int nr = 0;
+
+ for ( bank_mask = vpset->valid_bank_mask; bank_mask; bank_mask >>= 1 )
+ if ( bank_mask & 1 )
+ nr++;
+
+ return nr;
+}
+
+static uint16_t hv_vpset_to_vpmask(struct hv_vpset *set, size_t size,
+ struct hypercall_vpmask *vpmask)
+{
+ switch ( set->format )
+ {
+ case HV_GENERIC_SET_ALL:
+ vpmask_fill(vpmask);
+ return 0;
+
+ case HV_GENERIC_SET_SPARSE_4K:
+ {
+ uint64_t bank_mask;
+ unsigned int bank = 0, vp = 0;
+
+ vpmask_empty(vpmask);
+ for ( bank_mask = set->valid_bank_mask; bank_mask; bank_mask >>= 1 )
+ {
+ /* Make sure we won't dereference past the end of the array */
+ if ( (void *)(set->bank_contents + bank) >=
+ (void *)set + size )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( bank_mask & 1 )
+ {
+ uint64_t mask = set->bank_contents[bank];
+ unsigned int i;
+
+ for ( i = 0; i < 64; i++, vp++ )
+ {
+ if ( mask & 1 )
+ {
+ if ( vp >= HVM_MAX_VCPUS )
+ return -EINVAL;
+
+ vpmask_set(vpmask, vp);
+ }
+
+ mask >>= 1;
+ }
+
+ bank++;
+ }
+ else
+ vp += 64;
+ }
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
/*
* Windows should not issue the hypercalls requiring this callback in the
* case where vcpu_id would exceed the size of the mask.
return 0;
}
+static int hvcall_flush_ex(union hypercall_input *input,
+ union hypercall_output *output,
+ unsigned long input_params_gpa,
+ unsigned long output_params_gpa)
+{
+ struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask);
+ struct {
+ uint64_t address_space;
+ uint64_t flags;
+ struct hv_vpset set;
+ } input_params;
+
+ /* These hypercalls should never use the fast-call convention. */
+ if ( input->fast )
+ return -EINVAL;
+
+ /* Get input parameters. */
+ if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
+ sizeof(input_params)) != HVMTRANS_okay )
+ return -EINVAL;
+
+ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
+ vpmask_fill(vpmask);
+ else
+ {
+ struct hypercall_vpset *vpset = &this_cpu(hypercall_vpset);
+ struct hv_vpset *set = &vpset->set;
+ size_t size;
+ int rc;
+
+ *set = input_params.set;
+ if ( set->format == HV_GENERIC_SET_SPARSE_4K )
+ {
+ unsigned long offset = offsetof(typeof(input_params),
+ set.bank_contents);
+
+ size = sizeof(*set->bank_contents) * hv_vpset_nr_banks(set);
+ if ( hvm_copy_from_guest_phys(&set->bank_contents,
+ input_params_gpa + offset,
+ size) != HVMTRANS_okay)
+ return -EINVAL;
+
+ size += sizeof(*set);
+ }
+ else
+ size = sizeof(*set);
+
+ rc = hv_vpset_to_vpmask(set, size, vpmask);
+ if ( rc )
+ return rc;
+ }
+
+ /*
+ * A false return means that another vcpu is currently trying
+ * a similar operation, so back off.
+ */
+ if ( !paging_flush_tlb(need_flush, vpmask) )
+ return -ERESTART;
+
+ output->rep_complete = input->rep_count;
+
+ return 0;
+}
+
static void send_ipi(struct hypercall_vpmask *vpmask, uint8_t vector)
{
struct domain *currd = current->domain;
output_params_gpa);
break;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ rc = hvcall_flush_ex(&input, &output, input_params_gpa,
+ output_params_gpa);
+ break;
+
case HVCALL_SEND_IPI:
rc = hvcall_ipi(&input, &output, input_params_gpa,
output_params_gpa);