spin_unlock(&d->arch.hvm_domain.irq_lock);
}
-void hvm_set_callback_irq_level(void)
+static void hvm_set_callback_irq_level(struct vcpu *v)
{
- struct vcpu *v = current;
struct domain *d = v->domain;
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi, pdev, pintx, asserted;
- /* Fast lock-free tests. */
- if ( (v->vcpu_id != 0) ||
- (hvm_irq->callback_via_type == HVMIRQ_callback_none) )
- return;
+ ASSERT(v->vcpu_id == 0);
spin_lock(&d->arch.hvm_domain.irq_lock);
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
+void hvm_maybe_deassert_evtchn_irq(void)
+{
+ struct domain *d = current->domain;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ if ( hvm_irq->callback_via_asserted &&
+ !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
+ hvm_set_callback_irq_level(d->vcpu[0]);
+}
+
+void hvm_assert_evtchn_irq(struct vcpu *v)
+{
+ if ( v->vcpu_id == 0 )
+ hvm_set_callback_irq_level(v);
+}
+
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
return -1;
}
-/*
- * TODO: 1. Should not need special treatment of event-channel events.
- * 2. Should take notice of interrupt shadows (or clear them).
- */
int hvm_local_events_need_delivery(struct vcpu *v)
{
- int pending;
+ int pending = cpu_has_pending_irq(v);
- pending = (vcpu_info(v, evtchn_upcall_pending) || cpu_has_pending_irq(v));
if ( unlikely(pending) )
pending = hvm_interrupts_enabled(v);
static inline void vcpu_mark_events_pending(struct vcpu *v)
{
- if ( !test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) )
+ if ( test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) )
+ return;
+
+ if ( is_hvm_vcpu(v) )
+ hvm_assert_evtchn_irq(v);
+ else
vcpu_kick(v);
}
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
-void hvm_set_callback_irq_level(void);
+void hvm_maybe_deassert_evtchn_irq(void);
+void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);
int cpu_get_interrupt(struct vcpu *v, int *type);