In case the vCPU has pending events to inject. This fixes a bug that
happened if the guest mapped the vcpu info area using
VCPUOP_register_vcpu_info without having setup the event channel
upcall, and then setup the upcall vector.
In this scenario the guest would not receive any upcalls, because the
call to VCPUOP_register_vcpu_info would have marked the vCPU as having
pending events, but the vector could not be injected because it was
not yet setup.
This has not caused issues so far because all the consumers first
setup the vector callback and then map the vcpu info page, but there's
no limitation that prevents doing it in the inverse order.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, op.vector);
v->arch.hvm_vcpu.evtchn_upcall_vector = op.vector;
+ hvm_assert_evtchn_irq(v);
return 0;
}
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi=0, pdev=0, pintx=0;
uint8_t via_type;
+ struct vcpu *v;
via_type = (uint8_t)MASK_EXTR(via, HVM_PARAM_CALLBACK_IRQ_TYPE_MASK) + 1;
if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
spin_unlock(&d->arch.hvm_domain.irq_lock);
+ for_each_vcpu ( d, v )
+ if ( is_vcpu_online(v) )
+ hvm_assert_evtchn_irq(v);
+
#ifndef NDEBUG
printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id);
switch ( via_type )