if ( rc != 0 )
goto fail6;
- if ( is_viridian_domain(d) )
- {
- rc = viridian_vcpu_init(v);
- if ( rc != 0 )
- goto fail7;
- }
-
if ( v->vcpu_id == 0 )
{
/* NB. All these really belong in hvm_domain_initialise(). */
return 0;
- fail7:
- hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
fail6:
nestedhvm_vcpu_destroy(v);
fail5:
void hvm_vcpu_destroy(struct vcpu *v)
{
- if ( is_viridian_domain(v->domain) )
- viridian_vcpu_deinit(v);
-
hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
if ( hvm_altp2m_supported() )
return 1;
}
-int viridian_vcpu_init(struct vcpu *v)
-{
- return alloc_cpumask_var(&v->arch.hvm_vcpu.viridian.flush_cpumask) ?
- 0 : -ENOMEM;
-}
-
-void viridian_vcpu_deinit(struct vcpu *v)
-{
- free_cpumask_var(v->arch.hvm_vcpu.viridian.flush_cpumask);
-}
+static DEFINE_PER_CPU(cpumask_t, ipi_cpumask);
int viridian_hypercall(struct cpu_user_regs *regs)
{
if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
input_params.vcpu_mask = ~0ul;
- pcpu_mask = curr->arch.hvm_vcpu.viridian.flush_cpumask;
+ pcpu_mask = &this_cpu(ipi_cpumask);
cpumask_clear(pcpu_mask);
/*
continue;
hvm_asid_flush_vcpu(v);
- if ( v->is_running )
+ if ( v != curr && v->is_running )
__cpumask_set_cpu(v->processor, pcpu_mask);
}
* so we may unnecessarily IPI some CPUs.
*/
if ( !cpumask_empty(pcpu_mask) )
- flush_tlb_mask(pcpu_mask);
+ smp_send_event_check_mask(pcpu_mask);
+
+ output.rep_complete = input.rep_count;
status = HV_STATUS_SUCCESS;
break;
struct viridian_vcpu
{
union viridian_apic_assist apic_assist;
- cpumask_var_t flush_cpumask;
};
union viridian_guest_os_id
void viridian_time_ref_count_freeze(struct domain *d);
void viridian_time_ref_count_thaw(struct domain *d);
-int viridian_vcpu_init(struct vcpu *v);
-void viridian_vcpu_deinit(struct vcpu *v);
-
#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
/*