hvm_isa_irq_deassert(d, isa_irq);
break;
case 1:
- hvm_isa_irq_assert(d, isa_irq);
+ hvm_isa_irq_assert(d, isa_irq, NULL);
break;
default:
return -EINVAL;
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
-void hvm_isa_irq_assert(
- struct domain *d, unsigned int isa_irq)
+int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
+ int (*get_vector)(const struct domain *d,
+ unsigned int gsi))
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
+ int vector = -1;
ASSERT(isa_irq <= 15);
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
assert_irq(d, gsi, isa_irq);
+ if ( get_vector )
+ vector = get_vector(d, gsi);
+
spin_unlock(&d->arch.hvm_domain.irq_lock);
+
+ return vector;
}
void hvm_isa_irq_deassert(
ASSERT(spin_is_locked(&s->lock));
if ( acpi->pm1a_en & acpi->pm1a_sts & SCI_MASK )
- hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
+ hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ, NULL);
else
hvm_isa_irq_deassert(s->vcpu->domain, SCI_IRQ);
}
s->hw.cmos_data[RTC_REG_C] |= RTC_IRQF;
if ( rtc_mode_is(s, no_ack) )
hvm_isa_irq_deassert(vrtc_domain(s), RTC_IRQ);
- hvm_isa_irq_assert(vrtc_domain(s), RTC_IRQ);
+ hvm_isa_irq_assert(vrtc_domain(s), RTC_IRQ, NULL);
}
/* Called by the VPT code after it's injected a PF interrupt for us.
spin_unlock_irqrestore(&vlapic->esr_lock, flags);
}
+bool vlapic_test_irq(const struct vlapic *vlapic, uint8_t vec)
+{
+ if ( unlikely(vec < 16) )
+ return false;
+
+ if ( hvm_funcs.test_pir &&
+ hvm_funcs.test_pir(const_vlapic_vcpu(vlapic), vec) )
+ return true;
+
+ return vlapic_test_vector(vec, &vlapic->regs->data[APIC_IRR]);
+}
+
void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
{
struct vcpu *target = vlapic_vcpu(vlapic);
vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
}
+static bool vmx_test_pir(const struct vcpu *v, uint8_t vec)
+{
+ return pi_test_pir(vec, &v->arch.hvm_vmx.pi_desc);
+}
+
static void vmx_handle_eoi(u8 vector)
{
unsigned long status;
.process_isr = vmx_process_isr,
.deliver_posted_intr = vmx_deliver_posted_intr,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .test_pir = vmx_test_pir,
.handle_eoi = vmx_handle_eoi,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.enable_msr_interception = vmx_enable_msr_interception,
{
vmx_function_table.deliver_posted_intr = NULL;
vmx_function_table.sync_pir_to_irr = NULL;
+ vmx_function_table.test_pir = NULL;
}
if ( cpu_has_vmx_tsc_scaling )
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt, *temp, *earliest_pt;
uint64_t max_lag;
- int irq, is_lapic;
+ int irq, is_lapic, pt_vector;
spin_lock(&v->arch.hvm_vcpu.tm_lock);
spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ /*
+ * If periodic timer interrut is handled by lapic, its vector in
+ * IRR is returned and used to set eoi_exit_bitmap for virtual
+ * interrupt delivery case. Otherwise return -1 to do nothing.
+ */
if ( is_lapic )
+ {
vlapic_set_irq(vcpu_vlapic(v), irq, 0);
+ pt_vector = irq;
+ }
else
{
hvm_isa_irq_deassert(v->domain, irq);
- hvm_isa_irq_assert(v->domain, irq);
+ if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
+ v->domain->arch.hvm_domain.vpic[irq >> 3].int_output )
+ {
+ hvm_isa_irq_assert(v->domain, irq, NULL);
+ pt_vector = -1;
+ }
+ else
+ {
+ pt_vector = hvm_isa_irq_assert(v->domain, irq, vioapic_get_vector);
+ /*
+ * hvm_isa_irq_assert may not set the corresponding bit in vIRR
+ * when mask field of IOAPIC RTE is set. Check it again.
+ */
+ if ( pt_vector < 0 || !vlapic_test_irq(vcpu_vlapic(v), pt_vector) )
+ pt_vector = -1;
+ }
}
- /*
- * If periodic timer interrut is handled by lapic, its vector in
- * IRR is returned and used to set eoi_exit_bitmap for virtual
- * interrupt delivery case. Otherwise return -1 to do nothing.
- */
- if ( !is_lapic &&
- platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
- (&v->domain->arch.hvm_domain)->vpic[irq >> 3].int_output )
- return -1;
- else
- return pt_irq_vector(earliest_pt, hvm_intsrc_lapic);
+ return pt_vector;
}
static struct periodic_time *is_pt_irq(
void (*process_isr)(int isr, struct vcpu *v);
void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
void (*sync_pir_to_irr)(struct vcpu *v);
+ bool (*test_pir)(const struct vcpu *v, uint8_t vector);
void (*handle_eoi)(u8 vector);
/*Walk nested p2m */
void hvm_pci_intx_deassert(struct domain *d, unsigned int device,
unsigned int intx);
-/* Modify state of an ISA device's IRQ wire. */
-void hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq);
+/*
+ * Modify state of an ISA device's IRQ wire. For some cases, we are
+ * interested in the interrupt vector of the irq, but once the irq_lock
+ * is released, the vector may be changed by others. get_vector() callback
+ * allows us to get the interrupt vector in the protection of irq_lock.
+ * For most cases, just set get_vector to NULL.
+ */
+int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
+ int (*get_vector)(const struct domain *d,
+ unsigned int gsi));
void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq);
/* Modify state of GSIs. */
bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic);
+bool vlapic_test_irq(const struct vlapic *vlapic, uint8_t vec);
void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
int vlapic_has_pending_irq(struct vcpu *v);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
-static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+static inline int pi_test_and_set_pir(uint8_t vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, pi_desc->pir);
}
+static inline int pi_test_pir(uint8_t vector, const struct pi_desc *pi_desc)
+{
+ return test_bit(vector, pi_desc->pir);
+}
+
static inline int pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON, &pi_desc->control);