else
d->arch.evtchn_irq = platform_dom0_evtchn_ppi();
+ if ( !vgic_reserve_virq(d, d->arch.evtchn_irq) )
+ BUG();
+
/*
* Virtual UART is only used by linux early printk and decompress code.
* Only use it for the hardware domain because the linux kernel may not
irq = res;
DPRINT("irq %u = %u\n", i, irq);
+ /*
+ * Checking the return of vgic_reserve_virq is not
+ * necessary. It should not fail except when we try to map
+ * the IRQ twice. This can legitimately happen if the IRQ is shared
+ */
+ vgic_reserve_virq(d, irq);
res = route_irq_to_guest(d, irq, dt_node_name(dev));
if ( res )
{
printk("Additional IRQ %u (%s)\n", irq, what);
+ if ( !vgic_reserve_virq(d, irq) )
+ printk("Failed to reserve vIRQ %u on dom%d\n",
+ irq, d->domain_id);
+
ret = route_irq_to_guest(d, irq, what);
if ( ret )
printk("Failed to route %s to dom%d\n", what, d->domain_id);
d->arch.vgic.handler->domain_init(d);
+ d->arch.vgic.allocated_irqs =
+ xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d)));
+ if ( !d->arch.vgic.allocated_irqs )
+ return -ENOMEM;
+
+ /* vIRQ0-15 (SGIs) are reserved */
+ for ( i = 0; i < NR_GIC_SGI; i++ )
+ set_bit(i, d->arch.vgic.allocated_irqs);
+
return 0;
}
{
xfree(d->arch.vgic.shared_irqs);
xfree(d->arch.vgic.pending_irqs);
+ xfree(d->arch.vgic.allocated_irqs);
}
int vcpu_vgic_init(struct vcpu *v)
return v->domain->arch.vgic.handler->emulate_sysreg(regs, hsr);
}
+bool_t vgic_reserve_virq(struct domain *d, unsigned int virq)
+{
+ if ( virq >= vgic_num_irqs(d) )
+ return 0;
+
+ return !test_and_set_bit(virq, d->arch.vgic.allocated_irqs);
+}
+
+int vgic_allocate_virq(struct domain *d, bool_t spi)
+{
+ int first, end;
+ unsigned int virq;
+
+ if ( !spi )
+ {
+ /* We only allocate PPIs. SGIs are all reserved */
+ first = 16;
+ end = 32;
+ }
+ else
+ {
+ first = 32;
+ end = vgic_num_irqs(d);
+ }
+
+ /*
+ * There is no spinlock to protect allocated_irqs, therefore
+ * test_and_set_bit may fail. If so retry it.
+ */
+ do
+ {
+ virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first);
+ if ( virq >= end )
+ return -1;
+ }
+ while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) );
+
+ return virq;
+}
+
+void vgic_free_virq(struct domain *d, unsigned int virq)
+{
+ clear_bit(virq, d->arch.vgic.allocated_irqs);
+}
+
/*
* Local variables:
* mode: C
{
d->arch.phys_timer_base.offset = NOW();
d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);
+
+ /* At this stage vgic_reserve_virq can't fail */
+ if ( is_hardware_domain(d) )
+ {
+ if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_SECURE_PPI)) )
+ BUG();
+
+ if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_NONSECURE_PPI)) )
+ BUG();
+
+ if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_VIRT_PPI)) )
+ BUG();
+ }
+ else
+ {
+ if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_S_PPI) )
+ BUG();
+
+ if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_NS_PPI) )
+ BUG();
+
+ if ( !vgic_reserve_virq(d, GUEST_TIMER_VIRT_PPI) )
+ BUG();
+ }
+
return 0;
}
spinlock_t lock;
int ctlr;
int nr_spis; /* Number of SPIs */
+ unsigned long *allocated_irqs; /* bitmap of IRQs allocated */
struct vgic_irq_rank *shared_irqs;
/*
* SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
enum gic_sgi_mode irqmode, int virq,
unsigned long vcpu_mask);
extern void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq);
+
+/* Reserve a specific guest vIRQ */
+extern bool_t vgic_reserve_virq(struct domain *d, unsigned int virq);
+
+/*
+ * Allocate a guest VIRQ
+ * - spi == 0 => allocate a PPI. It will be the same on every vCPU
+ * - spi == 1 => allocate an SPI
+ */
+extern int vgic_allocate_virq(struct domain *d, bool_t spi);
+
+static inline int vgic_allocate_ppi(struct domain *d)
+{
+ return vgic_allocate_virq(d, 0 /* ppi */);
+}
+
+static inline int vgic_allocate_spi(struct domain *d)
+{
+ return vgic_allocate_virq(d, 1 /* spi */);
+}
+
+extern void vgic_free_virq(struct domain *d, unsigned int virq);
+
#endif /* __ASM_ARM_VGIC_H__ */
/*