d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler,
NR_IO_HANDLERS);
+ d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(NR_HVM_DOMU_IRQS));
+
rc = -ENOMEM;
- if ( !d->arch.hvm_domain.pl_time ||
+ if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq ||
!d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
goto fail1;
+ /* Set the default number of GSIs */
+ hvm_domain_irq(d)->nr_gsis = NR_HVM_DOMU_IRQS;
+
+ BUILD_BUG_ON(NR_HVM_DOMU_IRQS < NR_ISAIRQS);
+ ASSERT(hvm_domain_irq(d)->nr_gsis >= NR_ISAIRQS);
+
/* need link to containing domain */
d->arch.hvm_domain.pl_time->domain = d;
xfree(d->arch.hvm_domain.io_handler);
xfree(d->arch.hvm_domain.params);
xfree(d->arch.hvm_domain.pl_time);
+ xfree(d->arch.hvm_domain.irq);
fail0:
hvm_destroy_cacheattr_region_list(d);
return rc;
xfree(d->arch.hvm_domain.pl_time);
d->arch.hvm_domain.pl_time = NULL;
+
+ xfree(d->arch.hvm_domain.irq);
+ d->arch.hvm_domain.irq = NULL;
}
static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
return;
gsi = hvm_pci_intx_gsi(device, intx);
+ if ( gsi >= hvm_irq->nr_gsis )
+ {
+ ASSERT_UNREACHABLE();
+ return;
+ }
if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
assert_gsi(d, gsi);
return;
gsi = hvm_pci_intx_gsi(device, intx);
+ if ( gsi >= hvm_irq->nr_gsis )
+ {
+ ASSERT_UNREACHABLE();
+ return;
+ }
--hvm_irq->gsi_assert_count[gsi];
link = hvm_pci_intx_link(device, intx);
{
case HVMIRQ_callback_gsi:
gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
- if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) )
+ if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) )
hvm_irq->callback_via_type = HVMIRQ_callback_none;
else if ( hvm_irq->callback_via_asserted &&
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
if ( unlikely(v->mce_pending) )
return hvm_intack_mce;
- if ( (plat->irq.callback_via_type == HVMIRQ_callback_vector)
+ if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
&& vcpu_info(v, evtchn_upcall_pending) )
- return hvm_intack_vector(plat->irq.callback_via.vector);
+ return hvm_intack_vector(plat->irq->callback_via.vector);
if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
return hvm_intack_pic(0);
(uint32_t) hvm_irq->isa_irq.pad[0],
hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
- for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
+ for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 )
printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
i, i+7,
hvm_irq->gsi_assert_count[i+5],
hvm_irq->gsi_assert_count[i+6],
hvm_irq->gsi_assert_count[i+7]);
+ if ( i != hvm_irq->nr_gsis )
+ {
+ printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1);
+ for ( ; i < hvm_irq->nr_gsis; i++)
+ printk(" %2"PRIu8, hvm_irq->gsi_assert_count[i]);
+ printk("\n");
+ }
printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
hvm_irq->pci_link_assert_count[0],
hvm_irq->pci_link_assert_count[1],
hvm_irq->pci_link_assert_count[link] = 0;
/* Clear the GSI link assert counts */
- for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
+ for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ )
hvm_irq->gsi_assert_count[gsi] = 0;
/* Recalculate the counts from the IRQ line state */
#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
- d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
+ (d)->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector)
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
/* Lock protects access to irq, vpic and vioapic. */
spinlock_t irq_lock;
- struct hvm_irq irq;
+ struct hvm_irq *irq;
struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */
struct hvm_vioapic *vioapic;
struct hvm_hw_stdvga stdvga;
/* Number of INTx wires asserting each PCI-ISA link. */
u8 pci_link_assert_count[4];
- /*
- * Number of wires asserting each GSI.
- *
- * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
- * except ISA IRQ 0, which is connected to GSI 2.
- * PCI links map into this space via the PCI-ISA bridge.
- *
- * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
- * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
- */
- u8 gsi_assert_count[VIOAPIC_NUM_PINS];
-
/*
* GSIs map onto PIC/IO-APIC in the usual way:
* 0-7: Master 8259 PIC, IO-APIC pins 0-7
u8 round_robin_prev_vcpu;
struct hvm_irq_dpci *dpci;
+
+ /*
+ * Number of wires asserting each GSI.
+ *
+ * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
+ * except ISA IRQ 0, which is connected to GSI 2.
+ * PCI links map into this space via the PCI-ISA bridge.
+ *
+ * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
+ * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
+ */
+ unsigned int nr_gsis;
+ u8 gsi_assert_count[];
};
#define hvm_pci_intx_gsi(dev, intx) \
(((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
#define hvm_pci_intx_link(dev, intx) \
(((dev) + (intx)) & 3)
-#define hvm_domain_irq(d) (&(d)->arch.hvm_domain.irq)
+#define hvm_domain_irq(d) ((d)->arch.hvm_domain.irq)
+#define hvm_irq_size(cnt) offsetof(struct hvm_irq, gsi_assert_count[cnt])
#define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)